Exemple #1
0
def verifyAppsAndJobsSucceeded(appLogSearchPrefix,
                               stdout,
                               localDir,
                               testMap=None,
                               user=None):
    '''
  :param appLogSearchPrefix: The prefix using which the app logs are going to be searched.
  :param stdout: stdout from the app.
  :param localDir: Path to current dir.
  :param testMap: map containing the service(s) names and the kwargs of the services being restarted in the test.
  :return: success status and a dict with the relevant info.
  '''
    d = {}
    status = True

    if appLogSearchPrefix is None or localDir is None:
        status = False

    # Check if all the Jobs and the apps succeeded.
    if Hadoop.isHadoop2():
        dLog = jobLog = wprdLog = {}
        appStatus = jobStatus = True
        appIds, jobIds = YARN.getAppAndJobIdsFromConsole(stdout)
        appStatus, dLog = YARN.checkAppsSucceeded(appIds,
                                                  logPrefix=appLogSearchPrefix,
                                                  localDir=localDir)
        jobStatus, jobLog = YARN.checkJobsSucceeded(jobIds, user)
        d.update(dLog)
        d.update(jobLog)

        jobIds = cleanseJobIds(jobIds)

        # Performing WPR validations.
        wprStatus, wprdLog = verifyWorkPreservingRMRestart(jobIds, testMap)
        d.update(wprdLog)

        # Check if all the validations succeeded.
        if appStatus is False or jobStatus is False or wprStatus is False:
            d[appLogSearchPrefix] = "appStatus: %s jobStatus: %s wprStatus: %s" % (
                appStatus, jobStatus, wprStatus)
            status = False

    # Prepend the method names to all the keys in the dict.
    tempd = {}
    for k, v in d.items():
        tempd["%s: %s" % ("verifyAppsAndJobsSucceeded", k)] = v

    return status, tempd
Exemple #2
0
    def launchMultipleSleepJobs(cls,
                                numJobs,
                                mapSleepTime=1000,
                                reduceSleepTime=1000,
                                config=None):
        '''
        Function to Launch multiple sleep jobs
        :param numJobs: number of sleep jobs want to run
        :param mapSleepTime: Map sleep time
        :param reduceSleepTime: Reduce sleep time
        :param config: expected Configuration location
        :return: jobIDs
        '''
        jobIds = []
        # Create jobs
        i = 0
        for i in range(0, numJobs):
            jobclientFile = os.path.join(Config.getEnv('ARTIFACTS_DIR'),
                                         "JobClient_output.log")
            HadoopJobHelper.runSleepJob(numOfMaps=1,
                                        numOfReduce=1,
                                        mapSleepTime=mapSleepTime,
                                        reduceSleepTime=reduceSleepTime,
                                        extraJobArg=cls._jobArgs,
                                        runInBackground=False,
                                        config=config,
                                        directoutput=True,
                                        outputFile=jobclientFile)
            f = open(jobclientFile)
            text = f.read()
            f.close()
            currJobId = YARN.getAppAndJobIdsFromConsole(text)[1][0]
            jobIds.append(currJobId)
        # Join jobs

        for job in jobIds:
            ruAssert("YARN", MAPRED.isJobSucceed(job))
        return jobIds