def findRelevantSubdirectories(self, repository, app, extraVersions, versionTitleMethod=None): subdirs = OrderedDict() dirlist = os.listdir(repository) dirlist.sort() appVersions = set(app.versions) for dir in dirlist: dirVersions = dir.split(".") if set(dirVersions).issuperset(appVersions): currExtraVersion = self.findMatchingExtraVersion(dirVersions, extraVersions) if currExtraVersion: version = dir.replace("." + currExtraVersion, "") else: version = dir if versionTitleMethod: versionTitle = versionTitleMethod(app, version) else: versionTitle = version fullPath = os.path.join(repository, dir) self.diag.info( "Found subdirectory " + dir + " with version " + versionTitle + " and extra version '" + currExtraVersion + "'" ) subdirs.setdefault(versionTitle, []).append((currExtraVersion, fullPath)) return subdirs
def addContents(self): dirToProperties = OrderedDict() props = self.getAllProperties() for prop in props: dirToProperties.setdefault(prop.dir, []).append(prop) vbox = self.createVBox(dirToProperties) self.dialog.vbox.pack_start(vbox, expand=True, fill=True)
class JUnitResponder(plugins.Responder): """Respond to test results and write out results in format suitable for JUnit report writer. Only does anything if the app has batch_junit_format:true in its config file """ def __init__(self, optionMap, *args): plugins.Responder.__init__(self) self.runId = optionMap.get("name", calculateBatchDate()) # use the command-line name if given, else the date self.allApps = OrderedDict() self.appData = OrderedDict() def useJUnitFormat(self, app): return app.getBatchConfigValue("batch_junit_format") == "true" def notifyComplete(self, test): if not self.useJUnitFormat(test.app): return if not self.appData.has_key(test.app): self._addApplication(test) self.appData[test.app].storeResult(test) def notifyAllComplete(self): # allApps is {appname : [app]} for appList in self.allApps.values(): # appData is {app : data} for app in appList: if self.useJUnitFormat(app): data = self.appData[app] ReportWriter(self.runId).writeResults(app, data) def _addApplication(self, test): app = test.app self.appData[app] = JUnitApplicationData() self.allApps.setdefault(app.name, []).append(app)
class JUnitResponder(plugins.Responder): """Respond to test results and write out results in format suitable for JUnit report writer. Only does anything if the app has batch_junit_format:true in its config file """ def __init__(self, optionMap, *args): plugins.Responder.__init__(self) self.runId = getBatchRunName(optionMap) self.allApps = OrderedDict() self.appData = OrderedDict() def useJUnitFormat(self, app): return app.getBatchConfigValue("batch_junit_format") == "true" def notifyComplete(self, test): if not self.useJUnitFormat(test.app): return if not self.appData.has_key(test.app): self._addApplication(test) self.appData[test.app].storeResult(test) def notifyAllComplete(self): # allApps is {appname : [app]} for appList in self.allApps.values(): # appData is {app : data} for app in appList: if self.useJUnitFormat(app): data = self.appData[app] ReportWriter(self.runId).writeResults(app, data) def _addApplication(self, test): app = test.app self.appData[app] = JUnitApplicationData() self.allApps.setdefault(app.name, []).append(app)
class EmailResponder(plugins.Responder): def __init__(self, optionMap, *args): plugins.Responder.__init__(self) self.runId = optionMap.get("name", calculateBatchDate()) # use the command-line name if given, else the date self.batchAppData = OrderedDict() self.allApps = OrderedDict() def notifyComplete(self, test): if test.app.emailEnabled(): if not self.batchAppData.has_key(test.app): self.addApplication(test) self.batchAppData[test.app].storeCategory(test) def getRootSuite(self, test): if test.parent: return self.getRootSuite(test.parent) else: return test def addApplication(self, test): rootSuite = self.getRootSuite(test) app = test.app self.batchAppData[app] = BatchApplicationData(rootSuite) self.allApps.setdefault(app.name, []).append(app) def notifyAllComplete(self): mailSender = MailSender(self.runId) for appList in self.allApps.values(): batchDataList = map(self.batchAppData.get, appList) mailSender.send(batchDataList)
def split_sets(split, response_sets, survey_cache, split_entities = False): result = [] if split: for ds in split: result.append((ds, flatten_response_queryset(response_sets.filter(data_series=ds), survey_cache))) else: split = [None] result.append((None, flatten_response_queryset(response_sets, survey_cache))) if split_entities: result_dict = OrderedDict() series = [] for ds, qs in result: series.append(ds) for ds, qs in result: filter_dict = OrderedDict() for rs in qs: #Assign all responsesets to the entity that owns them filter_dict.setdefault(rs.entity, []).append(rs) for entity, data in filter_dict.items(): if entity not in result_dict.keys(): for s in series: # Ensure all dataseries are present in entity output result_dict.setdefault(entity, OrderedDict())[s] = [] result_dict[entity][ds] = data for entity, data in result_dict.items(): result_dict[entity] = data.items() result = result_dict return result
def edges_to_dict(edges, dct=None): """Take an iterator of edges and return an ordered dict of sources mapped to lists of destinations. """ if dct is None: dct = OrderedDict() for u, v in edges: dct.setdefault(u, []).append(v) return dct
def test_setdefault(self): pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)] shuffle(pairs) od = OrderedDict(pairs) pair_order = list(od.items()) self.assertEqual(od.setdefault('a', 10), 3) # make sure order didn't change self.assertEqual(list(od.items()), pair_order) self.assertEqual(od.setdefault('x', 10), 10) # make sure 'x' is added to the end self.assertEqual(list(od.items())[-1], ('x', 10))
def findAllClasses(self, className): if len(self.allApps) == 0: return [ (className, []) ] else: classNames = OrderedDict() for app in self.allApps: allConfigsForApp = self.getAllIntvConfigs([ app ]) replacements = plugins.ResponseAggregator([ x.getReplacements for x in allConfigsForApp])() for config in allConfigsForApp: if className in config.getInteractiveActionClasses(self.dynamic): realClassName = replacements.get(className, className) classNames.setdefault(realClassName, []).append(app) return classNames.items()
def writeErrors(self, rejectionInfo): # Don't write errors if only some of a group are rejected appsByName = OrderedDict() rejectedApps = set(rejectionInfo.keys()) for suite in self.suites: app = suite.app appsByName.setdefault(app.name, []).append(app) for _, appGroup in appsByName.items(): if set(appGroup).issubset(rejectedApps): for app in appGroup: if app in rejectionInfo: sys.stderr.write(app.rejectionMessage(rejectionInfo.get(app)))
class GenerateTestSummary(guiplugins.ActionDialogGUI): def __init__(self, *args): guiplugins.ActionDialogGUI.__init__(self, *args) self.addOption("generate", "", possibleDirs=[os.getenv("TEXTTEST_TMP", "")], saveFile=True) self.batchAppData = OrderedDict() self.allApps = OrderedDict() def performOnCurrent(self): fileName = self.getFileName() for test in self.currTestSelection: if test.state.isComplete(): if not self.batchAppData.has_key(test.app): self.addApplication(test) self.batchAppData[test.app].storeCategory(test) self.writeTextSummary(fileName) def writeTextSummary(self, fileName): mailSender = MailSender() with open(fileName, "w") as f: for appList in self.allApps.values(): batchDataList = map(self.batchAppData.get, appList) f.write(mailSender.makeContents(batchDataList, False)) def getFileName(self): fileName = self.optionGroup.getOptionValue("generate") if not fileName: raise plugins.TextTestError, "Cannot save selection - no file name specified" elif os.path.isdir(fileName): raise plugins.TextTestError, "Cannot save selection - existing directory specified" else: return fileName def _getTitle(self): return "Generate test summary" def getRootSuite(self, test): if test.parent: return self.getRootSuite(test.parent) else: return test def addApplication(self, test): rootSuite = self.getRootSuite(test) app = test.app self.batchAppData[app] = BatchApplicationData(rootSuite) self.allApps.setdefault(app.name, []).append(app)
def findAllClasses(self, className): if len(self.allApps) == 0: return [(className, [])] else: classNames = OrderedDict() for app in self.allApps: allConfigsForApp = self.getAllIntvConfigs([app]) replacements = plugins.ResponseAggregator( [x.getReplacements for x in allConfigsForApp])() for config in allConfigsForApp: if className in config.getInteractiveActionClasses( self.dynamic): realClassName = replacements.get(className, className) classNames.setdefault(realClassName, []).append(app) return classNames.items()
def organiseByProject(self, differences): differencesByProject = OrderedDict() changes = [] for artefact, oldHash, hash in differences: actualArtefact, projects = self.projectData.getProjects(artefact) if projects: for project, scopeProvided in projects: differencesByProject.setdefault(project, []).append((actualArtefact, oldHash, hash, scopeProvided)) if project in self.markedArtefacts: changes.append((actualArtefact, project)) else: projectName = artefact.split(":")[-1].split("[")[0][:-1] if projectName in self.markedArtefacts: changes.append((actualArtefact, projectName)) return changes, differencesByProject
class GenerateTestSummary(guiplugins.ActionDialogGUI): def __init__(self, *args): guiplugins.ActionDialogGUI.__init__(self, *args) self.addOption("generate", "",possibleDirs=[os.getenv("TEXTTEST_TMP", "")], saveFile=True) self.batchAppData = OrderedDict() self.allApps = OrderedDict() def performOnCurrent(self): fileName = self.getFileName() for test in self.currTestSelection: if test.state.isComplete(): if not self.batchAppData.has_key(test.app): self.addApplication(test) self.batchAppData[test.app].storeCategory(test) self.writeTextSummary(fileName) def writeTextSummary(self, fileName): mailSender = MailSender() with open(fileName, "w") as f: for appList in self.allApps.values(): batchDataList = map(self.batchAppData.get, appList) f.write(mailSender.makeContents(batchDataList, False)) def getFileName(self): fileName = self.optionGroup.getOptionValue("generate") if not fileName: raise plugins.TextTestError, "Cannot save selection - no file name specified" elif os.path.isdir(fileName): raise plugins.TextTestError, "Cannot save selection - existing directory specified" else: return fileName def _getTitle(self): return "Generate test summary" def getRootSuite(self, test): if test.parent: return self.getRootSuite(test.parent) else: return test def addApplication(self, test): rootSuite = self.getRootSuite(test) app = test.app self.batchAppData[app] = BatchApplicationData(rootSuite) self.allApps.setdefault(app.name, []).append(app)
def organiseByProject(self, differences): differencesByProject = OrderedDict() changes = [] for artefact, oldHash, hash in differences: actualArtefact, projects = self.projectData.getProjects(artefact) if projects: for project, scopeProvided in projects: differencesByProject.setdefault(project, []).append( (actualArtefact, oldHash, hash, scopeProvided)) if project in self.markedArtefacts: changes.append((actualArtefact, project)) else: projectName = artefact.split(":")[-1].split("[")[0][:-1] if projectName in self.markedArtefacts: changes.append((actualArtefact, projectName)) return changes, differencesByProject
def getAppRepositoryInfo(self): appInfo = OrderedDict() for suite in self.suitesToGenerate: app = suite.app repositories = self.getRepositories(suite) if len(repositories) == 0: continue pageTitle = app.getBatchConfigValue("historical_report_page_name") extraApps = [] for extraApp in app.extras: extraPageTitle = extraApp.getBatchConfigValue("historical_report_page_name") if extraPageTitle != pageTitle and extraPageTitle != extraApp.getDefaultPageName(): appInfo.setdefault(extraPageTitle, []).append((extraApp, repositories, [])) else: extraApps.append(extraApp) appInfo.setdefault(pageTitle, []).append((app, repositories, extraApps)) return appInfo
def groupByFile(declarations): by_file = OrderedDict() for decl in declarations: if not decl.location: continue filedecls = by_file.setdefault(decl.location.file_name, []) filedecls.append(decl) return by_file
def expandCollations(self, test): newColl = OrderedDict() coll = test.getConfigValue("collate_file") self.diag.info("coll initial:" + str(coll)) for targetPattern in sorted(coll.keys()): sourcePatterns = coll.get(targetPattern) if not glob.has_magic(targetPattern): newColl[targetPattern] = sourcePatterns continue # add each file to newColl by transferring wildcards across for sourcePattern in sourcePatterns: for sourcePath in self.findPaths(test, sourcePattern): # Use relative paths: easier to debug and SequenceMatcher breaks down if strings are longer than 200 chars relativeSourcePath = plugins.relpath(sourcePath, test.getDirectory(temporary=1)) newTargetStem = self.makeTargetStem(targetPattern, sourcePattern, relativeSourcePath) self.diag.info("New collation to " + newTargetStem + " : from " + relativeSourcePath) newColl.setdefault(newTargetStem, []).append(sourcePath) return newColl.items()
def get_planned_deployments(deploy_info): """Yield deployment environments in the form 'cluster.instance' in the order they appear in the deploy.yaml file for service. :return : a series of strings of the form: 'cluster.instance', exits on error if deploy.yaml is not found""" cluster_dict = OrderedDict() # Store cluster names in the order in which they are read # Clusters map to an ordered list of instances for entry in deploy_info['pipeline']: namespace = entry['instancename'] if namespace not in DEPLOY_PIPELINE_NON_DEPLOY_STEPS: cluster, instance = namespace.split('.') cluster_dict.setdefault(cluster, []).append(instance) # Yield deployment environments in the form of 'cluster.instance' for cluster in cluster_dict: for instance in cluster_dict[cluster]: yield "%s.%s" % (cluster, instance)
def getAppRepositoryInfo(self): appInfo = OrderedDict() for suite in self.suitesToGenerate: repository = getBatchRepository(suite) if not repository: continue app = suite.app repository = os.path.join(repository, app.name) if not os.path.isdir(repository): plugins.printWarning( "Batch result repository " + repository + " does not exist - not creating pages for " + repr(app) ) continue pageTitle = app.getBatchConfigValue("historical_report_page_name") extraApps = [] for extraApp in app.extras: extraPageTitle = extraApp.getBatchConfigValue("historical_report_page_name") if extraPageTitle != pageTitle and extraPageTitle != extraApp.getDefaultPageName(): appInfo.setdefault(extraPageTitle, []).append((extraApp, repository, [])) else: extraApps.append(extraApp) appInfo.setdefault(pageTitle, []).append((app, repository, extraApps)) return appInfo
def get_responsesets(scorecard, compare_series=None, limit_to_dataseries=[], limit_to_entity=[], limit_to_entitytype=[], aggregate_on=None, aggregate_by_entity=None): if not aggregate_by_entity and not aggregate_on: raise ReportRunError("No aggregation mode selected") surveys = dict([(s.pk, s) for s in scorecard.project.survey_set.all()]) #TODO: Optimisation for django 1.4, add prefetch_related('data_series') for better performance qs = ResponseSet.objects.filter(survey__project__pk=scorecard.project_id).select_related('entity','survey') result_sets = None if limit_to_dataseries and len(limit_to_dataseries): qs = qs.filter(data_series__in=limit_to_dataseries) if compare_series: limit = limit_to_dataseries.filter(group=compare_series) result_sets = [ds for ds in compare_series.get_dataseries() if ds in limit] or None if len(limit_to_entity): qs = qs.filter(entity__in=limit_to_entity) if len(limit_to_entitytype): qs = qs.filter(entity__entity_type__in=limit_to_entitytype) if not result_sets and compare_series: try: result_sets = [ds for ds in compare_series.get_dataseries()] except Exception: result_sets = [ds for ds in compare_series[0].group.get_dataseries() if ds in compare_series] if aggregate_on: rs_dict = OrderedDict() for dataseries in aggregate_on.get_dataseries(): ds_qs = qs.filter(data_series=dataseries) if ds_qs.count(): if aggregate_by_entity: for entity, data in split_sets(result_sets, ds_qs, surveys, split_entities=True).items(): rs_dict.setdefault(entity, OrderedDict())[dataseries] = data else: rs_dict[dataseries] = split_sets(result_sets, ds_qs, surveys) else: rs_dict = split_sets(result_sets, qs, surveys, split_entities=True) """ returns (aggregate_on): { <dataseries>:<response_set>, ... } or (aggregate_on_entity): { <entity>:<response_set>, ... } or (aggregate_on_entity, aggregate_on): { <dataseries>:{ <entity>:<response_set>, ... } ... } Where <response_set> is: [(None, [ResponseSet, ...]] or (compare_series): [(DataSeries, [ResponseSet, ...]), ...] """ return rs_dict
def generate(self, repositoryDirs, subPageNames): foundMinorVersions = {} allMonthSelectors = set() latestMonth = None pageToGraphs = {} for version, repositoryDirInfo in repositoryDirs.items(): self.diag.info("Generating " + version) allFiles, tags = self.findTestStateFilesAndTags(repositoryDirInfo) if len(allFiles) > 0: selectors = self.makeSelectors(subPageNames, tags) monthSelectors = SelectorByMonth.makeInstances(tags) allMonthSelectors.update(monthSelectors) allSelectors = selectors + list(reversed(monthSelectors)) # If we already have month pages, we only regenerate the current one if len(self.getExistingMonthPages()) == 0: selectors = allSelectors else: currLatestMonthSel = monthSelectors[-1] if latestMonth is None or currLatestMonthSel.linkName == latestMonth: selectors.append(monthSelectors[-1]) latestMonth = currLatestMonthSel.linkName tags = list(reduce(set.union, (set(selector.selectedTags) for selector in selectors), set())) tags.sort(self.compareTags) loggedTests = OrderedDict() categoryHandlers = {} for stateFile, repository in allFiles: tag = self.getTagFromFile(stateFile) if len(tags) == 0 or tag in tags: testId, state, extraVersion = self.processTestStateFile(stateFile, repository) loggedTests.setdefault(extraVersion, OrderedDict()).setdefault(testId, OrderedDict())[tag] = state categoryHandlers.setdefault(tag, CategoryHandler()).registerInCategory(testId, state, extraVersion) versionToShow = self.removePageVersion(version) for resourceName in self.resourceNames: hasData = False for sel in selectors: filePath = self.getPageFilePath(sel, resourceName) if self.pagesOverview.has_key(filePath): _, page, pageColours = self.pagesOverview[filePath] else: page = self.createPage(resourceName) pageColours = set() self.pagesOverview[filePath] = resourceName, page, pageColours for cellInfo in self.getCellInfoForResource(resourceName): tableHeader = self.getTableHeader(resourceName, cellInfo, version, repositoryDirs) heading = self.getHeading(resourceName, versionToShow) hasNewData, graphLink, tableColours = self.addTable(page, cellInfo, categoryHandlers, version, loggedTests, sel, tableHeader, filePath, heading) hasData |= hasNewData pageColours.update(tableColours) if graphLink: pageToGraphs.setdefault(page, []).append(graphLink) if hasData and versionToShow: link = HTMLgen.Href("#" + version, versionToShow) foundMinorVersions.setdefault(resourceName, HTMLgen.Container()).append(link) # put them in reverse order, most relevant first linkFromDetailsToOverview = [ sel.getLinkInfo(self.pageVersion) for sel in allSelectors ] for tag in tags: details = self.pagesDetails.setdefault(tag, TestDetails(tag, self.pageTitle, self.pageSubTitle)) details.addVersionSection(version, categoryHandlers[tag], linkFromDetailsToOverview) selContainer = HTMLgen.Container() selectors = self.makeSelectors(subPageNames) for sel in selectors: target, linkName = sel.getLinkInfo(self.pageVersion) selContainer.append(HTMLgen.Href(target, linkName)) monthContainer = HTMLgen.Container() for sel in sorted(allMonthSelectors): target, linkName = sel.getLinkInfo(self.pageVersion) monthContainer.append(HTMLgen.Href(target, linkName)) for resourceName, page, pageColours in self.pagesOverview.values(): if len(monthContainer.contents) > 0: page.prepend(HTMLgen.Heading(2, monthContainer, align = 'center')) graphs = pageToGraphs.get(page) page.prepend(HTMLgen.Heading(2, selContainer, align = 'center')) minorVersionHeader = foundMinorVersions.get(resourceName) if minorVersionHeader: if not graphs is None and len(graphs) > 1: page.prepend(HTMLgen.Heading(1, *graphs, align = 'center')) page.prepend(HTMLgen.Heading(1, minorVersionHeader, align = 'center')) page.prepend(HTMLgen.Heading(1, self.getHeading(resourceName), align = 'center')) if len(pageColours) > 0: page.prepend(HTMLgen.BR()); page.prepend(HTMLgen.BR()); page.script = self.getFilterScripts(pageColours) self.writePages()
class GenerateWebPages(object): def __init__(self, getConfigValue, pageDir, resourceNames, pageTitle, pageSubTitles, pageVersion, extraVersions, descriptionInfo): self.pageTitle = pageTitle self.pageSubTitles = pageSubTitles self.pageVersion = pageVersion self.extraVersions = extraVersions self.pageDir = pageDir self.pagesOverview = OrderedDict() self.pagesDetails = OrderedDict() self.getConfigValue = getConfigValue self.resourceNames = resourceNames self.descriptionInfo = descriptionInfo self.diag = logging.getLogger("GenerateWebPages") def makeSelectors(self, subPageNames, tags=[]): allSelectors = [] firstSubPageName = self.getConfigValue("historical_report_subpages", "default")[0] for subPageName in subPageNames: if subPageName == firstSubPageName: suffix = "" else: suffix = "_" + subPageName.lower() allSelectors.append(Selector(subPageName, suffix, self.getConfigValue, tags)) return allSelectors def removeUnused(self, unused, tagData): successTags = {} for tag in unused: for fn in tagData.get(tag): if os.path.basename(fn).startswith("teststate_"): os.remove(fn) else: successTags.setdefault(fn, []).append(tag) for fn, tagsToRemove in successTags.items(): linesToKeep = [] with open(fn) as readFile: for line in readFile: tag = line.strip().split()[0] if tag not in tagsToRemove: linesToKeep.append(line) with open(fn, "w") as writeFile: for line in linesToKeep: writeFile.write(line) def generate(self, repositoryDirs, subPageNames, archiveUnused): minorVersionHeader = HTMLgen.Container() allMonthSelectors = set() latestMonth = None pageToGraphs = {} for version, repositoryDirInfo in repositoryDirs.items(): self.diag.info("Generating " + version) tagData, stateFiles, successFiles = self.findTestStateFilesAndTags(repositoryDirInfo) if len(stateFiles) > 0 or len(successFiles) > 0: tags = tagData.keys() tags.sort(self.compareTags) selectors = self.makeSelectors(subPageNames, tags) monthSelectors = SelectorByMonth.makeInstances(tags) allMonthSelectors.update(monthSelectors) allSelectors = selectors + list(reversed(monthSelectors)) # If we already have month pages, we only regenerate the current one if len(self.getExistingMonthPages()) == 0: selectors = allSelectors else: currLatestMonthSel = monthSelectors[-1] if latestMonth is None or currLatestMonthSel.linkName == latestMonth: selectors.append(monthSelectors[-1]) latestMonth = currLatestMonthSel.linkName selectedTags = set() unusedTags = set(tags) for selector in selectors: currTags = set(selector.selectedTags) selectedTags.update(currTags) if archiveUnused: unusedTags.difference_update(currTags) tags = filter(lambda t: t in selectedTags, tags) if archiveUnused and unusedTags: plugins.log.info("Automatic repository cleaning will now remove old data for the following runs:") for tag in sorted(unusedTags, self.compareTags): plugins.log.info("- " + tag) plugins.log.info("(To disable automatic repository cleaning in future, please run with the --manualarchive flag when collating the HTML report.)") self.removeUnused(unusedTags, tagData) loggedTests = OrderedDict() categoryHandlers = {} self.diag.info("Processing " + str(len(stateFiles)) + " teststate files") relevantFiles = 0 for stateFile, repository in stateFiles: tag = self.getTagFromFile(stateFile) if len(tags) == 0 or tag in tags: relevantFiles += 1 testId, state, extraVersion = self.processTestStateFile(stateFile, repository) loggedTests.setdefault(extraVersion, OrderedDict()).setdefault(testId, OrderedDict())[tag] = state categoryHandlers.setdefault(tag, CategoryHandler()).registerInCategory(testId, state.category, extraVersion, state) if relevantFiles % 100 == 0: self.diag.info("- Processed " + str(relevantFiles) + " files with matching tags so far") self.diag.info("Processed " + str(relevantFiles) + " relevant teststate files") self.diag.info("Processing " + str(len(successFiles)) + " success files") for successFile, repository in successFiles: testId = self.getTestIdentifier(successFile, repository) extraVersion = self.findExtraVersion(repository) with open(successFile) as f: fileTags = set() for line in f: parts = line.strip().split(" ", 1) if len(parts) != 2: continue tag, text = parts if tag in fileTags: sys.stderr.write("WARNING: more than one result present for tag '" + tag + "' in file " + successFile + "!\n") sys.stderr.write("Ignoring later ones\n") continue fileTags.add(tag) if len(tags) == 0 or tag in tags: loggedTests.setdefault(extraVersion, OrderedDict()).setdefault(testId, OrderedDict())[tag] = text categoryHandlers.setdefault(tag, CategoryHandler()).registerInCategory(testId, "success", extraVersion, text) self.diag.info("Processed " + str(len(successFiles)) + " success files") versionToShow = self.removePageVersion(version) hasData = False for sel in selectors: filePath = self.getPageFilePath(sel) if self.pagesOverview.has_key(filePath): page, pageColours = self.pagesOverview[filePath] else: page = self.createPage() pageColours = set() self.pagesOverview[filePath] = page, pageColours tableHeader = self.getTableHeader(version, repositoryDirs) heading = self.getHeading(versionToShow) hasNewData, graphLink, tableColours = self.addTable(page, self.resourceNames, categoryHandlers, version, loggedTests, sel, tableHeader, filePath, heading, repositoryDirInfo) hasData |= hasNewData pageColours.update(tableColours) if graphLink: pageToGraphs.setdefault(page, []).append(graphLink) if hasData and versionToShow: link = HTMLgen.Href("#" + version, versionToShow) minorVersionHeader.append(link) # put them in reverse order, most relevant first linkFromDetailsToOverview = [ sel.getLinkInfo(self.pageVersion) for sel in allSelectors ] for tag in tags: details = self.pagesDetails.setdefault(tag, TestDetails(tag, self.pageTitle, self.pageSubTitles)) details.addVersionSection(version, categoryHandlers[tag], linkFromDetailsToOverview) selContainer = HTMLgen.Container() selectors = self.makeSelectors(subPageNames) for sel in selectors: target, linkName = sel.getLinkInfo(self.pageVersion) selContainer.append(HTMLgen.Href(target, linkName)) monthContainer = HTMLgen.Container() if len(allMonthSelectors) == 1: # Don't want just one month, no navigation possible prevMonth = list(allMonthSelectors)[0].getPreviousMonthSelector() allMonthSelectors.add(prevMonth) for sel in sorted(allMonthSelectors): target, linkName = sel.getLinkInfo(self.pageVersion) monthContainer.append(HTMLgen.Href(target, linkName)) for page, pageColours in self.pagesOverview.values(): if len(monthContainer.contents) > 0: page.prepend(HTMLgen.Heading(2, monthContainer, align = 'center')) graphs = pageToGraphs.get(page) page.prepend(HTMLgen.Heading(2, selContainer, align = 'center')) if minorVersionHeader.contents: if not graphs is None and len(graphs) > 1: page.prepend(HTMLgen.Heading(1, *graphs, align = 'center')) page.prepend(HTMLgen.Heading(1, minorVersionHeader, align = 'center')) creationDate = TitleWithDateStamp("").__str__().strip() page.prepend(HTMLgen.Paragraph(creationDate, align="center")) page.prepend(HTMLgen.Heading(1, self.getHeading(), align = 'center')) if len(pageColours) > 0: page.prepend(HTMLgen.BR()); page.prepend(HTMLgen.BR()); page.script = self.getFilterScripts(pageColours) self.writePages() def getFilterScripts(self, pageColours): finder = ColourFinder(self.getConfigValue) rowHeaderColour = finder.find("row_header_bg") successColour = finder.find("success_bg") # Always put green at the start, we often want to filter that sortedColours = sorted(pageColours, key=lambda c: (c != successColour, c)) scriptCode = "var TEST_ROW_HEADER_COLOR = " + repr(rowHeaderColour) + ";\n" + \ "var Colors = " + repr(sortedColours) + ";" return [ HTMLgen.Script(code=scriptCode), HTMLgen.Script(src="../javascript/jquery.js"), HTMLgen.Script(src="../javascript/filter.js"), HTMLgen.Script(src="../javascript/comment.js") ] def getHeading(self, versionToShow=""): heading = "Test results for " + self.pageTitle if versionToShow: heading += "." + versionToShow return heading def getTableHeader(self, version, repositoryDirs): return version if len(repositoryDirs) > 1 else "" def getExistingMonthPages(self): return glob(os.path.join(self.pageDir, "test_" + self.pageVersion + "_all_???[0-9][0-9][0-9][0-9].html")) def compareTags(self, x, y): timeCmp = cmp(self.getTagTimeInSeconds(x), self.getTagTimeInSeconds(y)) if timeCmp: return timeCmp elif len(x) != len(y): # If the timing is the same, sort alphabetically # Any number should be sorted numerically, do this by padding them with leading zeroes return cmp(plugins.padNumbersWithZeroes(x), plugins.padNumbersWithZeroes(y)) else: return cmp(x, y) def getTagFromFile(self, fileName): return os.path.basename(fileName).replace("teststate_", "") def findTestStateFilesAndTags(self, repositoryDirs): tagData, stateFiles, successFiles = {}, [], [] for _, dir in repositoryDirs: self.diag.info("Looking for teststate files in " + dir) for root, _, files in sorted(os.walk(dir)): for file in files: path = os.path.join(root, file) if file.startswith("teststate_"): tag = self.getTagFromFile(file) stateFiles.append((path, dir)) tagData.setdefault(tag, []).append(path) elif file.startswith("succeeded_"): successFiles.append((path, dir)) with open(path) as f: for line in f: parts = line.split() if parts: tag = parts[0] tagData.setdefault(tag, []).append(path) self.diag.info("Found " + str(len(stateFiles)) + " teststate files and " + str(len(successFiles)) + " success files in " + dir) return tagData, stateFiles, successFiles def processTestStateFile(self, stateFile, repository): state = self.readState(stateFile) testId = self.getTestIdentifier(stateFile, repository) extraVersion = self.findExtraVersion(repository) return testId, state, extraVersion def findExtraVersion(self, repository): versions = os.path.basename(repository).split(".") for i in xrange(len(versions)): version = ".".join(versions[i:]) if version in self.extraVersions: return version return "" @staticmethod def findGlobal(modName, className): try: exec "from " + modName + " import " + className + " as _class" except ImportError: exec "from texttestlib." + modName + " import " + className + " as _class" return _class #@UndefinedVariable @classmethod def getNewState(cls, file): # Would like to do load(file) here... but it doesn't work with universal line endings, see Python bug 1724366 from cStringIO import StringIO unpickler = Unpickler(StringIO(file.read())) # Magic to keep us backward compatible in the face of packages changing... unpickler.find_global = cls.findGlobal return unpickler.load() @classmethod def readState(cls, stateFile): file = open(stateFile, "rU") try: state = cls.getNewState(file) if isinstance(state, plugins.TestState): return state else: return cls.readErrorState("Incorrect type for state object.") except Exception, e: if os.path.getsize(stateFile) > 0: return cls.readErrorState("Stack info follows:\n" + str(e)) else: return plugins.Unrunnable("Results file was empty, probably the disk it resides on is full.", "Disk full?")
class CategoryHandler: def __init__(self): self.testsInCategory = OrderedDict() def update(self, categoryHandler): for category, testInfo in categoryHandler.testsInCategory.items(): testInfoList = self.testsInCategory.setdefault(category, []) testInfoList += testInfo def registerInCategory(self, testId, category, extraVersion, state): self.testsInCategory.setdefault(category, []).append((testId, state, extraVersion)) def getDescription(self, cat, count): shortDescr, _ = getCategoryDescription(cat) return str(count) + " " + shortDescr def getTestCountDescription(self, count): return str(count) + " tests: " def generateTextSummary(self): numTests, summaryData = self.getSummaryData() categoryDescs = [ self.getDescription(cat, count) for cat, count in summaryData ] return self.getTestCountDescription(numTests) + " ".join(categoryDescs) def generateHTMLSummary(self, detailPageRef, extraVersion=None): numTests, summaryData = self.getSummaryData(extraVersion) container = HTMLgen.Container() for cat, count in summaryData: summary = HTMLgen.Text(self.getDescription(cat, count)) if cat == "success": container.append(summary) else: linkTarget = detailPageRef + getCategoryDescription(cat)[-1] container.append(HTMLgen.Href(linkTarget, summary)) testCountSummary = HTMLgen.Text(self.getTestCountDescription(numTests)) return HTMLgen.Container(testCountSummary, container) def countTests(self, testInfo, extraVersion): if extraVersion is not None: return sum((currExtra == extraVersion for (testId, state, currExtra) in testInfo)) else: return len(testInfo) def getSummaryData(self, extraVersion=None): numTests = 0 summaryData = [] for cat, testInfo in self.testsInCategory.items(): testCount = self.countTests(testInfo, extraVersion) if testCount > 0: summaryData.append((cat, testCount)) numTests += testCount summaryData.sort(key=self.getSummarySortKey) return numTests, summaryData def getTestsWithDescriptions(self): return sorted([ (getCategoryDescription(cat)[1], testInfo) for cat, testInfo in self.testsInCategory.items() ]) def getSummarySortKey(self, data): # Put success at the start, it's neater like that return data[0] != "success", -data[1], data[0]
def GET(self): u = web.input() pg, sl, with_img = u['pg'] if 'pg' in u else None, u['sl'] if 'sl' in u else None, u['with_img'] if 'with_img' in u else None current_page = int(pg) sites_for_now = parts_model.Sites().show_sites() if sl: new_sites = [] for site in sl.split(","): new_sites.append("'%s'" % site) sites_for_now = new_sites site_select = ",".join(sites_for_now) img_post_ids = "" if with_img: sk = SkuInfo() sk.cl.SetLimits(0, 1000) res = sk.cl.Query('.jpg') img_post = [("'%s'" % i['id']) for i in res['matches']] img_post_ids = "AND SUBSTRING_INDEX( SUBSTRING_INDEX(listings_posts.idlistings_posts, ':', 2), ':', -1) IN (" + ",".join(img_post) + ")" values = {'site_select': site_select, 'img': img_post_ids} sc = sphinxapi.SphinxClient() sc.SetServer("127.0.0.1", 3312) #sc.SetSortMode(sphinxapi.SPH_SORT_ATTR_DESC, "post_date") sql_id = """SELECT site_id FROM site WHERE site_nm IN (%(site_select)s)""" % ({'site_select': site_select}) id_rows = db.bind.execute(sql_id) site_ids = [int(i[0]) for i in id_rows] limit = 50 sc.SetFilter('site_id', site_ids) #sc.SetLimits(int(pg) * 10, limit) res = sc.Query("") num_rows = res['total_found'] pg = Pageset(num_rows, limit) pg.current_page(current_page) option_select_key = "%s:%s:%s" % (":".join(sites_for_now), pg.skipped(), pg.entries_per_page()) option_select_key_browse = "%s:%s:%s:browse" % (":".join(sites_for_now), pg.skipped(), pg.entries_per_page()) #sk = SkuInfo() #ids_list = sk.sku_info(','.join([str(i['id']) for i in res['matches']]), None, sc) # d = OrderedDict() # for i in ids_list: # d.setdefault(i['date'], []) # d[i['date']].append((i['title'], i['sku'])) if r_server.get(option_select_key): print "cache_hit:browsedata-date:retrieve" date_result = cPickle.loads(str(r_server.get(option_select_key))) else: print "cache_hit:browsedata-date:set" date_sql = """ SELECT dp.list_date AS list_date , dp.list_title AS title , dp.list_sku AS sku , SUBSTRING_INDEX( SUBSTRING_INDEX(lp.idlistings_posts, ':', 2), ':', -1) AS post_id FROM ( SELECT list_date, list_title, list_sku FROM data_prep WHERE 1=1 AND SUBSTRING_INDEX(list_sku, ":", 1) IN (%(site_select)s) ) AS dp INNER JOIN ( SELECT list_sku, idlistings_posts FROM listings_posts WHERE 1=1 AND list_starter = 1 ) As lp ON lp.list_sku = dp.list_sku ORDER BY list_date DESC LIMIT %(offset)i, %(limit)i """ % ({'site_select': site_select, 'offset': pg.skipped(), 'limit': pg.entries_per_page()}) date_result = db.bind.execute(date_sql).fetchall() r_server.set(option_select_key, cPickle.dumps(date_result)) pages = pg.pages_in_set() first = pg.first_page() last = pg.last_page() sites_alpha = parts_model.Sites().show_sites(with_quotes=False) chosen = [] if sl: chosen = sl.split(',') if chosen != None: remaining = filter(lambda x : x not in chosen, sites_alpha) else: remaining = sites_alpha selected = filter(lambda x : x in chosen, sites_alpha) connect_str = "" if len(selected) == 1 or len(selected) == 0 else "&sl=" img_str = "&with_img=1" if with_img else "" img_str_sl = "&sl=" if len(selected) > 0 else "" if r_server.get(option_select_key_browse): print "cache_hit:browsedata-browse:retrieve" d = cPickle.loads(str(r_server.get(option_select_key_browse))) else: print "cache_hit:browsedata-browse:set" d = OrderedDict() for i in date_result: d.setdefault(i[0], []) d[i[0]].append((i[1], i[2])) r_server.set(option_select_key_browse, cPickle.dumps(d)) r_server.expire(option_select_key, cache_timeout) r_server.expire(option_select_key_browse, cache_timeout) return render('browse_view.mako', pages=pages, date_result=d, first=first, last=last, current_page=current_page, sl=sl, with_img=with_img, chosen=chosen, remaining=remaining, selected=selected, connect_str=connect_str, img_str_sl=img_str_sl, img_str=img_str)
def generate(self, repositoryDirs, subPageNames, archiveUnused): minorVersionHeader = HTMLgen.Container() allMonthSelectors = set() latestMonth = None pageToGraphs = {} for version, repositoryDirInfo in repositoryDirs.items(): self.diag.info("Generating " + version) tagData, stateFiles, successFiles = self.findTestStateFilesAndTags(repositoryDirInfo) if len(stateFiles) > 0 or len(successFiles) > 0: tags = tagData.keys() tags.sort(self.compareTags) selectors = self.makeSelectors(subPageNames, tags) monthSelectors = SelectorByMonth.makeInstances(tags) allMonthSelectors.update(monthSelectors) allSelectors = selectors + list(reversed(monthSelectors)) # If we already have month pages, we only regenerate the current one if len(self.getExistingMonthPages()) == 0: selectors = allSelectors else: currLatestMonthSel = monthSelectors[-1] if latestMonth is None or currLatestMonthSel.linkName == latestMonth: selectors.append(monthSelectors[-1]) latestMonth = currLatestMonthSel.linkName selectedTags = set() unusedTags = set(tags) for selector in selectors: currTags = set(selector.selectedTags) selectedTags.update(currTags) if archiveUnused: unusedTags.difference_update(currTags) tags = filter(lambda t: t in selectedTags, tags) if archiveUnused and unusedTags: plugins.log.info("Automatic repository cleaning will now remove old data for the following runs:") for tag in sorted(unusedTags, self.compareTags): plugins.log.info("- " + tag) plugins.log.info("(To disable automatic repository cleaning in future, please run with the --manualarchive flag when collating the HTML report.)") self.removeUnused(unusedTags, tagData) loggedTests = OrderedDict() categoryHandlers = {} self.diag.info("Processing " + str(len(stateFiles)) + " teststate files") relevantFiles = 0 for stateFile, repository in stateFiles: tag = self.getTagFromFile(stateFile) if len(tags) == 0 or tag in tags: relevantFiles += 1 testId, state, extraVersion = self.processTestStateFile(stateFile, repository) loggedTests.setdefault(extraVersion, OrderedDict()).setdefault(testId, OrderedDict())[tag] = state categoryHandlers.setdefault(tag, CategoryHandler()).registerInCategory(testId, state.category, extraVersion, state) if relevantFiles % 100 == 0: self.diag.info("- Processed " + str(relevantFiles) + " files with matching tags so far") self.diag.info("Processed " + str(relevantFiles) + " relevant teststate files") self.diag.info("Processing " + str(len(successFiles)) + " success files") for successFile, repository in successFiles: testId = self.getTestIdentifier(successFile, repository) extraVersion = self.findExtraVersion(repository) with open(successFile) as f: fileTags = set() for line in f: parts = line.strip().split(" ", 1) if len(parts) != 2: continue tag, text = parts if tag in fileTags: sys.stderr.write("WARNING: more than one result present for tag '" + tag + "' in file " + successFile + "!\n") sys.stderr.write("Ignoring later ones\n") continue fileTags.add(tag) if len(tags) == 0 or tag in tags: loggedTests.setdefault(extraVersion, OrderedDict()).setdefault(testId, OrderedDict())[tag] = text categoryHandlers.setdefault(tag, CategoryHandler()).registerInCategory(testId, "success", extraVersion, text) self.diag.info("Processed " + str(len(successFiles)) + " success files") versionToShow = self.removePageVersion(version) hasData = False for sel in selectors: filePath = self.getPageFilePath(sel) if self.pagesOverview.has_key(filePath): page, pageColours = self.pagesOverview[filePath] else: page = self.createPage() pageColours = set() self.pagesOverview[filePath] = page, pageColours tableHeader = self.getTableHeader(version, repositoryDirs) heading = self.getHeading(versionToShow) hasNewData, graphLink, tableColours = self.addTable(page, self.resourceNames, categoryHandlers, version, loggedTests, sel, tableHeader, filePath, heading, repositoryDirInfo) hasData |= hasNewData pageColours.update(tableColours) if graphLink: pageToGraphs.setdefault(page, []).append(graphLink) if hasData and versionToShow: link = HTMLgen.Href("#" + version, versionToShow) minorVersionHeader.append(link) # put them in reverse order, most relevant first linkFromDetailsToOverview = [ sel.getLinkInfo(self.pageVersion) for sel in allSelectors ] for tag in tags: details = self.pagesDetails.setdefault(tag, TestDetails(tag, self.pageTitle, self.pageSubTitles)) details.addVersionSection(version, categoryHandlers[tag], linkFromDetailsToOverview) selContainer = HTMLgen.Container() selectors = self.makeSelectors(subPageNames) for sel in selectors: target, linkName = sel.getLinkInfo(self.pageVersion) selContainer.append(HTMLgen.Href(target, linkName)) monthContainer = HTMLgen.Container() if len(allMonthSelectors) == 1: # Don't want just one month, no navigation possible prevMonth = list(allMonthSelectors)[0].getPreviousMonthSelector() allMonthSelectors.add(prevMonth) for sel in sorted(allMonthSelectors): target, linkName = sel.getLinkInfo(self.pageVersion) monthContainer.append(HTMLgen.Href(target, linkName)) for page, pageColours in self.pagesOverview.values(): if len(monthContainer.contents) > 0: page.prepend(HTMLgen.Heading(2, monthContainer, align = 'center')) graphs = pageToGraphs.get(page) page.prepend(HTMLgen.Heading(2, selContainer, align = 'center')) if minorVersionHeader.contents: if not graphs is None and len(graphs) > 1: page.prepend(HTMLgen.Heading(1, *graphs, align = 'center')) page.prepend(HTMLgen.Heading(1, minorVersionHeader, align = 'center')) creationDate = TitleWithDateStamp("").__str__().strip() page.prepend(HTMLgen.Paragraph(creationDate, align="center")) page.prepend(HTMLgen.Heading(1, self.getHeading(), align = 'center')) if len(pageColours) > 0: page.prepend(HTMLgen.BR()); page.prepend(HTMLgen.BR()); page.script = self.getFilterScripts(pageColours) self.writePages()
def organiseApps(self): appsWithVersions = OrderedDict() for app in self.allApps: appsWithVersions.setdefault(app.fullName(), []).append(app.versionSuffix()) return appsWithVersions
class GenerateWebPages(object): def __init__(self, getConfigValue, pageDir, resourceNames, pageTitle, pageSubTitle, pageVersion, extraVersions, descriptionInfo): self.pageTitle = pageTitle self.pageSubTitle = pageSubTitle self.pageVersion = pageVersion self.extraVersions = extraVersions self.pageDir = pageDir self.pagesOverview = OrderedDict() self.pagesDetails = OrderedDict() self.getConfigValue = getConfigValue self.resourceNames = resourceNames self.descriptionInfo = descriptionInfo self.diag = logging.getLogger("GenerateWebPages") def makeSelectors(self, subPageNames, tags=[]): allSelectors = [] firstSubPageName = self.getConfigValue("historical_report_subpages", "default")[0] for subPageName in subPageNames: if subPageName == firstSubPageName: suffix = "" else: suffix = "_" + subPageName.lower() allSelectors.append(Selector(subPageName, suffix, self.getConfigValue, tags)) return allSelectors def generate(self, repositoryDirs, subPageNames): foundMinorVersions = {} allMonthSelectors = set() latestMonth = None pageToGraphs = {} for version, repositoryDirInfo in repositoryDirs.items(): self.diag.info("Generating " + version) allFiles, tags = self.findTestStateFilesAndTags(repositoryDirInfo) if len(allFiles) > 0: selectors = self.makeSelectors(subPageNames, tags) monthSelectors = SelectorByMonth.makeInstances(tags) allMonthSelectors.update(monthSelectors) allSelectors = selectors + list(reversed(monthSelectors)) # If we already have month pages, we only regenerate the current one if len(self.getExistingMonthPages()) == 0: selectors = allSelectors else: currLatestMonthSel = monthSelectors[-1] if latestMonth is None or currLatestMonthSel.linkName == latestMonth: selectors.append(monthSelectors[-1]) latestMonth = currLatestMonthSel.linkName tags = list(reduce(set.union, (set(selector.selectedTags) for selector in selectors), set())) tags.sort(self.compareTags) loggedTests = OrderedDict() categoryHandlers = {} for stateFile, repository in allFiles: tag = self.getTagFromFile(stateFile) if len(tags) == 0 or tag in tags: testId, state, extraVersion = self.processTestStateFile(stateFile, repository) loggedTests.setdefault(extraVersion, OrderedDict()).setdefault(testId, OrderedDict())[tag] = state categoryHandlers.setdefault(tag, CategoryHandler()).registerInCategory(testId, state, extraVersion) versionToShow = self.removePageVersion(version) for resourceName in self.resourceNames: hasData = False for sel in selectors: filePath = self.getPageFilePath(sel, resourceName) if self.pagesOverview.has_key(filePath): _, page, pageColours = self.pagesOverview[filePath] else: page = self.createPage(resourceName) pageColours = set() self.pagesOverview[filePath] = resourceName, page, pageColours for cellInfo in self.getCellInfoForResource(resourceName): tableHeader = self.getTableHeader(resourceName, cellInfo, version, repositoryDirs) heading = self.getHeading(resourceName, versionToShow) hasNewData, graphLink, tableColours = self.addTable(page, cellInfo, categoryHandlers, version, loggedTests, sel, tableHeader, filePath, heading) hasData |= hasNewData pageColours.update(tableColours) if graphLink: pageToGraphs.setdefault(page, []).append(graphLink) if hasData and versionToShow: link = HTMLgen.Href("#" + version, versionToShow) foundMinorVersions.setdefault(resourceName, HTMLgen.Container()).append(link) # put them in reverse order, most relevant first linkFromDetailsToOverview = [ sel.getLinkInfo(self.pageVersion) for sel in allSelectors ] for tag in tags: details = self.pagesDetails.setdefault(tag, TestDetails(tag, self.pageTitle, self.pageSubTitle)) details.addVersionSection(version, categoryHandlers[tag], linkFromDetailsToOverview) selContainer = HTMLgen.Container() selectors = self.makeSelectors(subPageNames) for sel in selectors: target, linkName = sel.getLinkInfo(self.pageVersion) selContainer.append(HTMLgen.Href(target, linkName)) monthContainer = HTMLgen.Container() for sel in sorted(allMonthSelectors): target, linkName = sel.getLinkInfo(self.pageVersion) monthContainer.append(HTMLgen.Href(target, linkName)) for resourceName, page, pageColours in self.pagesOverview.values(): if len(monthContainer.contents) > 0: page.prepend(HTMLgen.Heading(2, monthContainer, align = 'center')) graphs = pageToGraphs.get(page) page.prepend(HTMLgen.Heading(2, selContainer, align = 'center')) minorVersionHeader = foundMinorVersions.get(resourceName) if minorVersionHeader: if not graphs is None and len(graphs) > 1: page.prepend(HTMLgen.Heading(1, *graphs, align = 'center')) page.prepend(HTMLgen.Heading(1, minorVersionHeader, align = 'center')) page.prepend(HTMLgen.Heading(1, self.getHeading(resourceName), align = 'center')) if len(pageColours) > 0: page.prepend(HTMLgen.BR()); page.prepend(HTMLgen.BR()); page.script = self.getFilterScripts(pageColours) self.writePages() def getFilterScripts(self, pageColours): finder = ColourFinder(self.getConfigValue) rowHeaderColour = finder.find("row_header_bg") successColour = finder.find("success_bg") # Always put green at the start, we often want to filter that sortedColours = sorted(pageColours, key=lambda c: (c != successColour, c)) scriptCode = "var TEST_ROW_HEADER_COLOR = " + repr(rowHeaderColour) + ";\n" + \ "var Colors = " + repr(sortedColours) + ";" return [ HTMLgen.Script(code=scriptCode), HTMLgen.Script(src="../javascript/jquery.js"), HTMLgen.Script(src="../javascript/filter.js"), HTMLgen.Script(src="../javascript/plugin.js") ] def getHeading(self, resourceName, versionToShow=""): heading = self.getResultType(resourceName) + " results for " + self.pageTitle if versionToShow: heading += "." + versionToShow return heading def getTableHeader(self, resourceName, cellInfo, version, repositoryDirs): parts = [] if resourceName != cellInfo: parts.append(cellInfo.capitalize() + " Results") if len(repositoryDirs) > 1: parts.append(version) return " for ".join(parts) def getCellInfoForResource(self, resourceName): fromConfig = self.getConfigValue("historical_report_resource_page_tables", resourceName) if fromConfig: return fromConfig else: return [ resourceName ] def getResultType(self, resourceName): if resourceName: return resourceName.capitalize() else: return "Test" def getExistingMonthPages(self): return glob(os.path.join(self.pageDir, "test_" + self.pageVersion + "_all_???[0-9][0-9][0-9][0-9].html")) def compareTags(self, x, y): timeCmp = cmp(self.getTagTimeInSeconds(x), self.getTagTimeInSeconds(y)) if timeCmp: return timeCmp elif len(x) != len(y): # If the timing is the same, sort alphabetically # Any number should be sorted numerically, do this by padding them with leading zeroes return cmp(plugins.padNumbersWithZeroes(x), plugins.padNumbersWithZeroes(y)) else: return cmp(x, y) def getTagFromFile(self, fileName): return os.path.basename(fileName).replace("teststate_", "") def findTestStateFilesAndTags(self, repositoryDirs): allFiles = [] allTags = set() for _, dir in repositoryDirs: for root, _, files in os.walk(dir): for file in files: if file.startswith("teststate_"): allFiles.append((os.path.join(root, file), dir)) allTags.add(self.getTagFromFile(file)) return allFiles, sorted(allTags, self.compareTags) def processTestStateFile(self, stateFile, repository): state = self.readState(stateFile) testId = self.getTestIdentifier(stateFile, repository) extraVersion = self.findExtraVersion(repository) return testId, state, extraVersion def findExtraVersion(self, repository): versions = os.path.basename(repository).split(".") for i in xrange(len(versions)): version = ".".join(versions[i:]) if version in self.extraVersions: return version return "" def findGlobal(self, modName, className): exec "from " + modName + " import " + className + " as _class" return _class #@UndefinedVariable def getNewState(self, file): # Would like to do load(file) here... but it doesn't work with universal line endings, see Python bug 1724366 from cStringIO import StringIO unpickler = Unpickler(StringIO(file.read())) # Magic to keep us backward compatible in the face of packages changing... unpickler.find_global = self.findGlobal return unpickler.load() def readState(self, stateFile): file = open(stateFile, "rU") try: state = self.getNewState(file) if isinstance(state, plugins.TestState): return state else: return self.readErrorState("Incorrect type for state object.") except (UnpicklingError, ImportError, EOFError, AttributeError), e: if os.path.getsize(stateFile) > 0: return self.readErrorState("Stack info follows:\n" + str(e)) else: return plugins.Unrunnable("Results file was empty, probably the disk it resides on is full.", "Disk full?")
class CategoryHandler: def __init__(self): self.testsInCategory = OrderedDict() def update(self, categoryHandler): for category, testInfo in categoryHandler.testsInCategory.items(): testInfoList = self.testsInCategory.setdefault(category, []) testInfoList += testInfo def registerInCategory(self, testId, state, extraVersion): self.testsInCategory.setdefault(state.category, []).append((testId, state, extraVersion)) def getDescription(self, cat, count): shortDescr, _ = getCategoryDescription(cat) return str(count) + " " + shortDescr def getTestCountDescription(self, count): return str(count) + " tests: " def generateTextSummary(self): numTests, summaryData = self.getSummaryData() categoryDescs = [ self.getDescription(cat, count) for cat, count in summaryData ] return self.getTestCountDescription(numTests) + " ".join(categoryDescs) def generateHTMLSummary(self, detailPageRef, extraVersion=None): numTests, summaryData = self.getSummaryData(extraVersion) container = HTMLgen.Container() for cat, count in summaryData: summary = HTMLgen.Text(self.getDescription(cat, count)) if cat == "success": container.append(summary) else: linkTarget = detailPageRef + getCategoryDescription(cat)[-1] container.append(HTMLgen.Href(linkTarget, summary)) testCountSummary = HTMLgen.Text(self.getTestCountDescription(numTests)) return HTMLgen.Container(testCountSummary, container) def countTests(self, testInfo, extraVersion): if extraVersion is not None: return sum((currExtra == extraVersion for (testId, state, currExtra) in testInfo)) else: return len(testInfo) def getSummaryData(self, extraVersion=None): numTests = 0 summaryData = [] for cat, testInfo in self.testsInCategory.items(): testCount = self.countTests(testInfo, extraVersion) if testCount > 0: summaryData.append((cat, testCount)) numTests += testCount summaryData.sort(key=self.getSummarySortKey) return numTests, summaryData def getTestsWithDescriptions(self): return sorted([ (getCategoryDescription(cat)[1], testInfo) for cat, testInfo in self.testsInCategory.items() ]) def getSummarySortKey(self, data): # Put success at the start, it's neater like that return data[0] != "success", -data[1], data[0]
class QueueSystemServer(BaseActionRunner): instance = None def __init__(self, optionMap, allApps): BaseActionRunner.__init__(self, optionMap, logging.getLogger("Queue System Submit")) # queue for putting tests when we couldn't reuse the originals self.reuseFailureQueue = Queue() self.testCount = 0 self.testsSubmitted = 0 self.maxCapacity = 100000 # infinity, sort of self.allApps = allApps for app in allApps: currCap = app.getConfigValue("queue_system_max_capacity") if currCap is not None and currCap < self.maxCapacity: self.maxCapacity = currCap self.jobs = OrderedDict() self.submissionRules = {} self.killedJobs = {} self.queueSystems = {} self.reuseOnly = False self.submitAddress = None self.slaveLogDirs = set() self.delayedTestsForAdd = [] self.remainingForApp = OrderedDict() capacityPerSuite = self.maxCapacity / len(allApps) for app in allApps: self.remainingForApp[app.name] = capacityPerSuite self.getQueueSystem(app) # populate cache QueueSystemServer.instance = self def addSuites(self, suites): for suite in suites: self.slaveLogDirs.add(suite.app.makeWriteDirectory("slavelogs")) plugins.log.info("Using " + queueSystemName(suite.app) + " queues for " + suite.app.description(includeCheckout=True)) def setSlaveServerAddress(self, address): self.submitAddress = os.getenv("CAPTUREMOCK_SERVER", address) self.testQueue.put("TextTest slave server started on " + address) def addTest(self, test): capacityForApp = self.remainingForApp[test.app.name] if capacityForApp > 0: self.addTestToQueues(test) self.remainingForApp[test.app.name] = capacityForApp - 1 else: if test.app.name == self.remainingForApp.keys()[-1]: self.addTestToQueues(test) # For the last app (which may be the only one) there is no point in delaying else: self.delayedTestsForAdd.append(test) def addTestToQueues(self, test): self.testCount += 1 queue = self.findQueueForTest(test) if queue: queue.put(test) def addDelayedTests(self): for test in self.delayedTestsForAdd: self.addTestToQueues(test) self.delayedTestsForAdd = [] def notifyAllRead(self, suites): self.addDelayedTests() BaseActionRunner.notifyAllRead(self, suites) def run(self): # picked up by core to indicate running in a thread self.runAllTests() if len(self.jobs): self.diag.info("All jobs submitted, polling the queue system now.") if self.canPoll(): self.pollQueueSystem() def pollQueueSystem(self): # Start by polling after 5 seconds, ever after try every 15 attempts = int(os.getenv("TEXTTEST_QS_POLL_WAIT", "5")) * 2 # Amount of time to wait before initiating polling of SGE if attempts >= 0: while True: for i in range(attempts): time.sleep(0.5) if self.allComplete or self.exited: return self.updateJobStatus() attempts = 30 # In case any tests have had reruns triggered since we stopped submitting self.runQueue(self.getTestForRun, self.runTest, "rerunning", block=False) def canPoll(self): queueSystem = self.getQueueSystem(self.jobs.keys()[0]) return queueSystem.supportsPolling() def updateJobStatus(self): queueSystem = self.getQueueSystem(self.jobs.keys()[0]) statusInfo = queueSystem.getStatusForAllJobs() self.diag.info("Got status for all jobs : " + repr(statusInfo)) if statusInfo is not None: # queue system not available for some reason for test, jobs in self.jobs.items(): if not test.state.isComplete(): for jobId, _ in jobs: status = statusInfo.get(jobId) if status and test.state.hasStarted() and test.state.briefText: # Only do this to test jobs (might make a difference for derived configurations) # Ignore filtering states for now, which have empty 'briefText'. self.updateRunStatus(test, status) elif not status and not self.jobStarted(test): # Do this to any jobs self.setSlaveFailed(test, False, True) def updateRunStatus(self, test, status): newRunStatus, newExplanation = status newState = test.state.makeModifiedState(newRunStatus, newExplanation, "grid status update") if newState: test.changeState(newState) def findQueueForTest(self, test): # If we've gone into reuse mode and there are no active tests for reuse, use the "reuse failure queue" if self.reuseOnly and self.testsSubmitted == 0: self.diag.info("Putting " + test.uniqueName + " in reuse failure queue " + self.remainStr()) return self.reuseFailureQueue else: self.diag.info("Putting " + test.uniqueName + " in normal queue " + self.remainStr()) return self.testQueue def handleLocalError(self, test, previouslySubmitted): self.handleErrorState(test, previouslySubmitted) if self.testCount == 0 or (self.reuseOnly and self.testsSubmitted == 0): self.diag.info("Submitting terminators after local error") self.submitTerminators() def submitTerminators(self): # snap out of our loop if this was the last one. Rely on others to manage the test queue self.reuseFailureQueue.put(None) def getTestForReuse(self, test, state, tryReuse): # Pick up any test that matches the current one's resource requirements if not self.exited: # Don't allow this to use up the terminator newTest = self.getTest(block=False, replaceTerminators=True) if newTest: if tryReuse and self.allowReuse(test, state, newTest): self.jobs[newTest] = self.getJobInfo(test) if self.testCount > 1: self.testCount -= 1 postText = self.remainStr() else: # Don't allow test count to drop to 0 here, can cause race conditions self.submitTerminators() postText = ": submitting terminators as final test" self.diag.info("Reusing slave from " + test.uniqueName + " for " + newTest.uniqueName + postText) return newTest else: self.diag.info("Adding to reuse failure queue : " + newTest.uniqueName) self.reuseFailureQueue.put(newTest) else: self.diag.info("No tests available for reuse : " + test.uniqueName) # Allowed a submitted job to terminate self.testsSubmitted -= 1 self.diag.info("No reuse for " + test.uniqueName + " : " + repr(self.testsSubmitted) + " tests still submitted") if self.exited and self.testsSubmitted == 0: self.diag.info("Forcing termination") self.submitTerminators() def allowReuse(self, oldTest, oldState, newTest): # Don't reuse jobs that have been killed if newTest.state.isComplete() or oldState.category == "killed": return False if oldTest.getConfigValue("queue_system_proxy_executable") or \ newTest.getConfigValue("queue_system_proxy_executable"): return False # Jobs maintain the same virtual display instance where possible, if they require different settings they can't be reused if oldTest.getConfigValue("virtual_display_extra_args") != newTest.getConfigValue("virtual_display_extra_args"): return False oldRules = self.getSubmissionRules(oldTest) newRules = self.getSubmissionRules(newTest) return oldRules.allowsReuse(newRules) def getJobSubmissionRules(self, test): proxyRules = self.getProxySubmissionRules(test) if proxyRules: return proxyRules else: return self.getSubmissionRules(test) def getProxySubmissionRules(self, test): proxyResources = test.getConfigValue("queue_system_proxy_resource") if proxyResources: return test.app.getProxySubmissionRulesClass()(test, proxyResources) def getSubmissionRules(self, test): if self.submissionRules.has_key(test): return self.submissionRules[test] else: submissionRules = test.app.getSubmissionRules(test) self.submissionRules[test] = submissionRules return submissionRules def getTest(self, block, replaceTerminators=False): testOrStatus = self.getItemFromQueue(self.testQueue, block, replaceTerminators) if not testOrStatus: return if type(testOrStatus) == StringType: self.sendServerState(testOrStatus) return self.getTest(block) else: return testOrStatus def sendServerState(self, state): self.diag.info("Sending server state '" + state + "'") mimServAddr = os.getenv("CAPTUREMOCK_SERVER") if mimServAddr: host, port = mimServAddr.split(":") serverAddress = (host, int(port)) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect(serverAddress) sock.sendall("SUT_SERVER:" + state + "\n") sock.close() def getTestForRunNormalMode(self, block): self.reuseOnly = False reuseFailure = self.getItemFromQueue(self.reuseFailureQueue, block=False) if reuseFailure: self.diag.info("Found a reuse failure...") return reuseFailure else: self.diag.info("Waiting for new tests...") newTest = self.getTest(block=block) if newTest: return newTest else: # Make sure we pick up anything that failed in reuse while we were submitting the final test... self.diag.info("No normal test found, checking reuse failures...") return self.getItemFromQueue(self.reuseFailureQueue, block=False) def getTestForRunReuseOnlyMode(self, block): self.reuseOnly = True self.diag.info("Waiting for reuse failures...") reuseFailure = self.getItemFromQueue(self.reuseFailureQueue, block=block) if reuseFailure: return reuseFailure elif self.testCount > 0 and self.testsSubmitted < self.maxCapacity: # Try again, the capacity situation has changed... return self.getTestForRunNormalMode(block) def getTestForRun(self, block=True): if self.testCount == 0 or (self.testsSubmitted < self.maxCapacity): return self.getTestForRunNormalMode(block) else: return self.getTestForRunReuseOnlyMode(block) def notifyAllComplete(self): BaseActionRunner.notifyAllComplete(self) errors = {} errorFiles = [] for logDir in self.slaveLogDirs: errorFiles += filter(os.path.getsize, glob(os.path.join(logDir, "*.errors"))) if len(errorFiles) == 0: return for fileName in errorFiles: contents = None # Take the shortest (i.e. most filtered) one for app in self.allApps: currContent = app.filterErrorText(fileName) if contents is None or len(currContent) < len(contents): contents = currContent if contents: errors[contents] = os.path.basename(fileName)[:-7] for msg, jobName in errors.items(): sys.stderr.write("WARNING: error produced by slave job '" + jobName + "'\n" + msg) def cleanup(self): self.sendServerState("Completed submission of all tests") def remainStr(self): return " : " + str(self.testCount) + " tests remain, " + str(self.testsSubmitted) + " are submitted." def runTest(self, test): submissionRules = self.getSubmissionRules(test) command = self.getSlaveCommand(test, submissionRules) plugins.log.info("Q: Submitting " + repr(test) + submissionRules.getSubmitSuffix()) sys.stdout.flush() self.jobs[test] = [] # Preliminary jobs aren't interesting any more if not self.submitJob(test, submissionRules, command, self.getSlaveEnvironment()): return self.testCount -= 1 self.testsSubmitted += 1 self.diag.info("Submission successful" + self.remainStr()) if not test.state.hasStarted(): test.changeState(self.getPendingState(test)) if self.testsSubmitted == self.maxCapacity: self.sendServerState("Completed submission of tests up to capacity") def getSlaveVarsToBlock(self): """Make sure we clear out the master scripts so the slave doesn't use them too, otherwise just use the environment as is. If we're being run via SSH, don't pass this on to the slave jobs This has been known to trip up shell starter scripts, e.g. on SuSE 10 making them believe that the SGE job is an SSH login and setting things wrongly as a result.""" return [ "USECASE_REPLAY_SCRIPT", "USECASE_RECORD_SCRIPT", "SSH_TTY" ] def getSlaveEnvironment(self): return plugins.copyEnvironment(ignoreVars=self.getSlaveVarsToBlock()) def fixDisplay(self, env): # Must make sure SGE jobs don't get a locally referencing DISPLAY display = env.get("DISPLAY") if display and display.startswith(":"): env["DISPLAY"] = plugins.gethostname() + display def getPendingState(self, test): freeText = "Job pending in " + queueSystemName(test.app) return plugins.TestState("pending", freeText=freeText, briefText="PEND", lifecycleChange="become pending") def shellWrap(self, command): # Must use exec so as not to create extra processes: SGE's qdel isn't very clever when # it comes to noticing extra shells return "exec $SHELL -c \"exec " + command + "\"" def getSlaveCommand(self, test, submissionRules): cmdArgs = [ plugins.getTextTestProgram(), "-d", ":".join(self.optionMap.rootDirectories), "-a", test.app.name + test.app.versionSuffix(), "-l", "-tp", plugins.quote(test.getRelPath()) ] + \ self.getSlaveArgs(test) + self.getRunOptions(test.app, submissionRules) return " ".join(cmdArgs) def getSlaveArgs(self, test): return [ "-slave", test.app.writeDirectory, "-servaddr", self.submitAddress ] def getRunOptions(self, app, submissionRules): runOptions = [] for slaveSwitch in app.getSlaveSwitches(): if self.optionMap.has_key(slaveSwitch): option = "-" + slaveSwitch runOptions.append(option) value = self.optionMap.get(slaveSwitch) if value: runOptions.append(value) if self.optionMap.has_key("x"): runOptions.append("-xr") runOptions.append(self.optionMap.get("xr", os.path.expandvars("$TEXTTEST_PERSONAL_LOG/logging.debug"))) runOptions.append("-xw") runOptions.append(os.path.expandvars("$TEXTTEST_PERSONAL_LOG/" + submissionRules.getJobName())) return runOptions def getSlaveLogDir(self, test): return os.path.join(test.app.writeDirectory, "slavelogs") def getSubmitCmdArgs(self, test, submissionRules): queueSystem = self.getQueueSystem(test) extraArgs = submissionRules.getExtraSubmitArgs() cmdArgs = queueSystem.getSubmitCmdArgs(submissionRules) if extraArgs: cmdArgs += plugins.splitcmd(extraArgs) return cmdArgs def getQueueSystemCommand(self, test): submissionRules = self.getSubmissionRules(test) cmdArgs = self.getSubmitCmdArgs(test, submissionRules) text = queueSystemName(test) + " Command : " + plugins.commandLineString(cmdArgs) + " ...\n" + \ "Slave Command : " + self.getSlaveCommand(test, submissionRules) + "\n" proxyArgs = self.getProxyCmdArgs(test) if proxyArgs: return queueSystemName(test) + " Proxy Command : " + plugins.commandLineString(proxyArgs) + "\n" + text else: return text def getProxyCmdArgs(self, test): proxyCmd = test.getConfigValue("queue_system_proxy_executable") if proxyCmd: proxyOptions = test.getCommandLineOptions("proxy_options") fullProxyCmd = proxyCmd + " " + " ".join(proxyOptions) proxyRules = self.getJobSubmissionRules(test) proxyArgs = self.getSubmitCmdArgs(test, proxyRules) proxyArgs.append(self.shellWrap(fullProxyCmd)) return proxyArgs else: return [] def createSubmitProcess(self, test, cmdArgs, slaveEnv): logDir = self.getSlaveLogDir(test) proxyArgs = self.getProxyCmdArgs(test) if proxyArgs: cmdArgs[1:1] = [ "-sync", "y" ] # must synchronise in the proxy slaveEnv["TEXTTEST_SUBMIT_COMMAND_ARGS"] = repr(cmdArgs) # Exact command arguments to run TextTest slave, for use by proxy cmdArgs = proxyArgs return subprocess.Popen(cmdArgs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=logDir, env=slaveEnv) def submitJob(self, test, submissionRules, command, slaveEnv): self.diag.info("Submitting job at " + plugins.localtime() + ":" + command) self.diag.info("Creating job at " + plugins.localtime()) cmdArgs = self.getSubmitCmdArgs(test, submissionRules) cmdArgs.append(self.shellWrap(command)) jobName = submissionRules.getJobName() self.fixDisplay(slaveEnv) self.diag.info("Creating job " + jobName + " with command arguments : " + repr(cmdArgs)) self.lock.acquire() if self.exited: self.cancel(test) self.lock.release() plugins.log.info("Q: Submission cancelled for " + repr(test) + " - exit underway") return False self.lockDiag.info("Got lock for submission") queueSystem = self.getQueueSystem(test) try: process = self.createSubmitProcess(test, cmdArgs, slaveEnv) stdout, stderr = process.communicate() errorMessage = self.findErrorMessage(stderr, queueSystem) except OSError: errorMessage = "local machine is not a submit host: running '" + cmdArgs[0] + "' failed." if not errorMessage: jobId = queueSystem.findJobId(stdout) self.diag.info("Job created with id " + jobId) self.jobs.setdefault(test, []).append((jobId, jobName)) self.lockDiag.info("Releasing lock for submission...") self.lock.release() return True else: self.lock.release() self.diag.info("Job not created : " + errorMessage) fullError = self.getFullSubmitError(test, errorMessage, cmdArgs) test.changeState(plugins.Unrunnable(fullError, "NOT SUBMITTED")) self.handleErrorState(test) return False def findErrorMessage(self, stderr, queueSystem): if len(stderr) > 0: return queueSystem.findSubmitError(stderr) def getFullSubmitError(self, test, errorMessage, cmdArgs): qname = queueSystemName(test.app) return "Failed to submit to " + qname + " (" + errorMessage.strip() + ")\n" + \ "Submission command was '" + " ".join(cmdArgs[:-1]) + " ... '\n" def handleErrorState(self, test, previouslySubmitted=False): if previouslySubmitted: self.testsSubmitted -= 1 else: self.testCount -= 1 self.diag.info(repr(test) + " in error state" + self.remainStr()) bugchecker = CheckForBugs() self.setUpSuites(bugchecker, test) bugchecker(test) test.actionsCompleted() def setUpSuites(self, bugchecker, test): if test.parent: bugchecker.setUpSuite(test.parent) self.setUpSuites(bugchecker, test.parent) def _getJobFailureInfo(self, test): jobInfo = self.getJobInfo(test) if len(jobInfo) == 0: return "No job has been submitted to " + queueSystemName(test) queueSystem = self.getQueueSystem(test) # Take the most recent job, it's hopefully the most interesting jobId = jobInfo[-1][0] return queueSystem.getJobFailureInfo(jobId) def getSlaveErrors(self, test, name): slaveErrFile = self.getSlaveErrFile(test) if slaveErrFile: errors = open(slaveErrFile).read() if errors: return "-" * 10 + " Error messages written by " + name + " job " + "-" * 10 + \ "\n" + errors def getSlaveErrFile(self, test): for _, jobName in self.getJobInfo(test): errFile = os.path.join(self.getSlaveLogDir(test), jobName + ".errors") if os.path.isfile(errFile): return errFile def getJobInfo(self, test): return self.jobs.get(test, []) def killJob(self, test, jobId, jobName): prevTest, prevJobExisted = self.killedJobs.get(jobId, (None, False)) # Killing the same job for other tests should result in the cached result being returned if prevTest and test is not prevTest: return prevJobExisted self.describeJob(test, jobId, jobName) queueSystem = self.getQueueSystem(test) jobExisted = queueSystem.killJob(jobId) self.killedJobs[jobId] = test, jobExisted return jobExisted def getQueueSystem(self, test): queueModuleText = queueSystemName(test) if queueModuleText is None: return None queueModule = queueModuleText.lower() if self.queueSystems.has_key(queueModule): return self.queueSystems[queueModule] command = "from " + queueModule + " import QueueSystem as _QueueSystem" exec command system = _QueueSystem() self.queueSystems[queueModule] = system return system def changeState(self, test, newState, previouslySubmitted=True): test.changeState(newState) self.handleLocalError(test, previouslySubmitted) def killTests(self): # If we've been killed with some sort of limit signal, wait here until we know # all tests terminate. Otherwise we rely on them terminating naturally, and if they don't wantStatus = self.killSignal and self.killSignal != signal.SIGINT killedTests = [] for test, jobList in self.jobs.items(): if not test.state.isComplete(): for jobId, jobName in jobList: if self.killTest(test, jobId, jobName, wantStatus): killedTests.append((test, jobId)) if wantStatus: self.waitForKill(killedTests) def waitForKill(self, killedTests): # Wait for a minute for the kill to take effect, otherwise give up stillRunning = killedTests for attempt in range(1, 61): stillRunning = filter(lambda (test, jobId): not test.state.isComplete(), stillRunning) if len(stillRunning) == 0: return time.sleep(1) for test, jobId in stillRunning: plugins.log.info("T: Cancellation in progress for " + repr(test) + ", waited " + str(attempt) + " seconds so far.") for test, jobId in stillRunning: name = queueSystemName(test.app) freeText = "Could not delete test in " + name + " (job " + jobId + "): have abandoned it" self.changeState(test, Abandoned(freeText)) def killOrCancel(self, test): # Explicitly chose test to kill (from the GUI) jobInfo = self.getJobInfo(test) if len(jobInfo) > 0: for jobId, jobName in jobInfo: self.killTest(test, jobId, jobName, wantStatus=True) else: self.diag.info("No job info found from queue system server, changing state to cancelled") return self.cancel(test) def killTest(self, test, jobId, jobName, wantStatus): self.diag.info("Killing test " + repr(test) + " in state " + test.state.category) jobExisted = self.killJob(test, jobId, jobName) startNotified = self.jobStarted(test) if jobExisted: if startNotified: self.diag.info("Job " + jobId + " was running.") return True else: self.diag.info("Job " + jobId + " was pending.") self.setKilledPending(test, jobId) return False else: self.diag.info("Job " + jobId + " did not exist.") # might get here when the test completed since we checked... if not test.state.isComplete(): self.setSlaveFailed(test, startNotified, wantStatus) return False def setSuspendStateForTests(self, tests, newState): for test in tests: queueSystem = self.getQueueSystem(test) for jobId, jobName in self.getJobInfo(test): queueSystem.setSuspendState(jobId, newState) def jobStarted(self, test): return test.state.hasStarted() def setKilledPending(self, test, jobId): timeStr = plugins.localtime("%H:%M") briefText = "cancelled pending job at " + timeStr freeText = "Test job " + jobId + " was cancelled (while still pending in " + queueSystemName(test.app) +\ ") at " + timeStr self.cancel(test, briefText, freeText) def getJobFailureInfo(self, test, name, wantStatus): if wantStatus: return "-" * 10 + " Full accounting info from " + name + " " + "-" * 10 + "\n" + \ self._getJobFailureInfo(test) else: # Job accounting info can take ages to find, don't do it from GUI quit return "No accounting info found as quitting..." def setSlaveFailed(self, test, startNotified, wantStatus): failReason, fullText = self.getSlaveFailure(test, startNotified, wantStatus) fullText = failReason + "\n" + fullText self.changeState(test, self.getSlaveFailureState(startNotified, failReason, fullText)) def getSlaveFailure(self, test, startNotified, wantStatus): fullText = "" name = queueSystemName(test.app) slaveErrors = self.getSlaveErrors(test, name) if slaveErrors: fullText += slaveErrors fullText += self.getJobFailureInfo(test, name, wantStatus) return self.getSlaveFailureBriefText(name, startNotified), fullText def getSlaveFailureBriefText(self, name, startNotified): if startNotified: return "no report, possibly killed with SIGKILL" else: return name + " job exited" def getSlaveFailureState(self, startNotified, failReason, fullText): if startNotified: return plugins.TestState("killed", briefText=failReason, \ freeText=fullText, completed=1, lifecycleChange="complete") else: return plugins.Unrunnable(briefText=failReason, freeText=fullText, lifecycleChange="complete") def getPostText(self, test, jobId): name = queueSystemName(test.app) return "in " + name + " (job " + jobId + ")" def describeJob(self, test, jobId, *args): postText = self.getPostText(test, jobId) plugins.log.info("T: Cancelling " + repr(test) + " " + postText)