def validate_apps(self, local_dir_name="small_rw_jobs"): # pylint: disable=unused-argument ''' Validate small apps passed :param local_dir_name: :return: ''' local_dir = os.path.join(Config.getEnv('ARTIFACTS_DIR'), self.local_dir_name) appIds = [] for root, _dirs, filenames in os.walk(local_dir): for f in filenames: logfile = open(os.path.join(root, f), 'r') stdout = logfile.read() appId = YARN.getApplicationIDFromStdout(stdout, logoutput=False) appIds.append(appId) # Sleep for 30 seconds before checking App status time.sleep(30) status, d = YARN.checkAppsSucceeded(appIds, logPrefix=None, useWS=True, localDir=None) for app, status in d.items(): if status != "SUCCEEDED": appInfo = YARN.getApplicationInfo(app) logger.info(appInfo) if appInfo: assert appInfo[ 'state'] == 'ACCEPTED', "app is neither in ACCEPTED or SUCCEEDED State"
def verifyOozieAppsAndJobsSucceeded(workflowIds, logPrefix, localDir, testMap, action_name='wc', checkJob=True): ''' Verifies if all apps and jobs submitted/created via. Oozie have succeed all the validations. :param workflowIDs: List of workflow ids to verify. :param logPrefix: log prefix for YARN app logs. :param localDir: Path to local log dir. :return: Bool status indicating if validation succeeded. ''' appIds = [] jobIds = [] dLog = {} appStatus = True jobStatus = True wprStatus = True # check the job and app status for each workflow we launched. if Hadoop.isHadoop2(): # get all the app and job ids for workflowId in workflowIds: if action_name != 'None': stdout = Oozie.getJobInfo('%s@%s' % (workflowId, action_name), verbose=True, retry=True) else: stdout = Oozie.getJobInfo('%s' % (workflowId), verbose=True, retry=True) ids = Oozie.getJobAndAppIds(stdout) for id in ids: appIds.append(id['application']) jobIds.append(id['job']) # get the app and job status for all the jobs we found appStatus, appLog = YARN.checkAppsSucceeded(appIds, logPrefix=logPrefix, localDir=localDir) dLog.update(appLog) if checkJob: jobStatus, jobLog = YARN.checkJobsSucceeded(jobIds) dLog.update(jobLog) for key, value in dLog.items(): logger.info("%s -> %s" % (key, value)) wprStatus, d = verifyWorkPreservingRMRestart(jobIds, testMap) for k, v in d.items(): logger.info("%s -> %s" % (k, v)) logger.info("appStatus: %s jobStatus: %s wprStatus: %s" % (appStatus, jobStatus, wprStatus)) return appStatus and jobStatus and wprStatus
def verifyAppsAndJobsSucceeded(appLogSearchPrefix, stdout, localDir, testMap=None, user=None): ''' :param appLogSearchPrefix: The prefix using which the app logs are going to be searched. :param stdout: stdout from the app. :param localDir: Path to current dir. :param testMap: map containing the service(s) names and the kwargs of the services being restarted in the test. :return: success status and a dict with the relevant info. ''' d = {} status = True if appLogSearchPrefix is None or localDir is None: status = False # Check if all the Jobs and the apps succeeded. if Hadoop.isHadoop2(): dLog = jobLog = wprdLog = {} appStatus = jobStatus = True appIds, jobIds = YARN.getAppAndJobIdsFromConsole(stdout) appStatus, dLog = YARN.checkAppsSucceeded(appIds, logPrefix=appLogSearchPrefix, localDir=localDir) jobStatus, jobLog = YARN.checkJobsSucceeded(jobIds, user) d.update(dLog) d.update(jobLog) jobIds = cleanseJobIds(jobIds) # Performing WPR validations. wprStatus, wprdLog = verifyWorkPreservingRMRestart(jobIds, testMap) d.update(wprdLog) # Check if all the validations succeeded. if appStatus is False or jobStatus is False or wprStatus is False: d[appLogSearchPrefix] = "appStatus: %s jobStatus: %s wprStatus: %s" % ( appStatus, jobStatus, wprStatus) status = False # Prepend the method names to all the keys in the dict. tempd = {} for k, v in d.items(): tempd["%s: %s" % ("verifyAppsAndJobsSucceeded", k)] = v return status, tempd