async def process(self, req: Request['TaskRunnerHistory_GET.Arguments'], user: User) -> None: runnerId = req.args.runnerId jobDB = self.jobDB jobs = list(jobDB.values()) jobs.sort(key=jobDB.retrieverFor('recent')) reachedJobsLimit = len(jobs) > _jobsLimit if reachedJobsLimit: jobs[_jobsLimit:] = [] # TODO: This is actually not a filter, since it changes the # record type. def recordFilter(jobs: Iterable[Job]) -> List[Task]: return [ task for job in jobs for task in job.getTaskSequence() if task['runner'] == runnerId ] tasks = cast(List[Task], runQuery((recordFilter, ), jobs)) # pylint: disable=attribute-defined-outside-init self.reachedJobsLimit = reachedJobsLimit self.tasks = tasks
def __filterRecords( self, tagKey: Optional[str], tagValue: Optional[str]) -> Collection[SelectableRecord]: if tagKey: assert tagValue is not None if tagValue: # The casts are necessary because mypy seems to ignore # the narrowed type in the parameter default value. # https://github.com/python/mypy/issues/2608 def valueFilter( record: SelectableRecord, tagKey: str = cast(str, tagKey), tagValue: str = cast(str, tagValue) ) -> bool: return record.tags.hasTagValue(tagKey, tagValue) recordFilter = CustomFilter(valueFilter) else: def keyFilter( record: SelectableRecord, tagKey: str = cast(str, tagKey) ) -> bool: return not record.tags.hasTagKey(tagKey) recordFilter = CustomFilter(keyFilter) return runQuery((recordFilter, ), self.db) else: return self.db
async def process(self, req: Request['ExtractedData_GET.Arguments'], user: User) -> None: await super().process(req, user) taskNames = req.args.task taskRunDB = self.taskRunDB # Determine keys that exist for all the given task names. # And clean up the list of active keys. keys = intersection( taskRunDB.getKeys(taskName) for taskName in taskNames) # The empty task set is rejected at argument parsing, # so the intersection is always defined. assert keys is not None activeKeys = req.args.key & keys # Query DB. query: List[RecordProcessor[Task]] = list(self.iterFilters()) query.append(KeySorter[Task].forCustom(['starttime'])) tasks = runQuery(query, self.taskToJobs.iterDoneTasks(taskNames)) dataByRunId = gatherData(taskRunDB, taskNames, tasks, activeKeys) # pylint: disable=attribute-defined-outside-init self.keys = keys self.activeKeys = activeKeys self.tasks = tasks self.dataByRunId = dataByRunId
async def process(self, req: Request['GetJobHistory_GET.Arguments'], user: User) -> None: await super().process(req, user) jobs = runQuery(self.iterFilters(), self.jobDB) # pylint: disable=attribute-defined-outside-init self.jobs = jobs
def __init__(self, db: JobDB, key: str, number: int): super().__init__() self.number = number query: List[RecordProcessor] = [ CustomFilter(Job.hasFinalResult), KeySorter.forDB([key], db) ] self.records = runQuery(query, db)[:number] db.addObserver(self)
async def process(self, req: Request['GetResourceInfo_GET.Arguments'], user: User ) -> None: resTypeNames = req.args.type resNames = req.args.name # Check validity of optional typenames resTypeDB = self.resTypeDB invalidTypeNames = sorted( name for name in resTypeNames if name not in resTypeDB ) if invalidTypeNames: raise InvalidRequest( 'Non-existing resource types: ' + ', '.join(invalidTypeNames) ) # Check validity of optional names resourceDB = self.resourceDB invalidNames = [ name for name in resNames if name not in resourceDB ] if invalidNames: raise InvalidRequest( 'Non-existing resource names: ' + ', '.join(sorted(invalidNames)) ) # Determine set of resource types query: List[RecordProcessor] = [] if resTypeNames: # TODO: Use SetFilter.create(). query.append(SetFilter('type', resTypeNames, resourceDB)) resources = runQuery(query, resourceDB) # Filter out resources with id's other than in 'resNames' if # filter is present # TODO: This could also be done using querylib. if resNames: resources = [ res for res in resources if res.getId() in resNames ] # pylint: disable=attribute-defined-outside-init self.resources = resources
async def process(self, req: Request['ReportTasksCSV_GET.Arguments'], user: User ) -> None: await super().process(req, user) # Note: iterDoneTasks() can efficiently handle an empty (nothing # matches) filter, no need for a special case here. query: List[RecordProcessor] = list(self.iterFilters()) query.append(KeySorter.forCustom(['recent'])) taskName = self.args.task tasks = runQuery(query, self.taskToJobs.iterDoneTasks(taskName)) # pylint: disable=attribute-defined-outside-init self.tasks = tasks
def filterJobs(jobs: Iterable[Job], beginWeek: int, endWeek: int, configFilter: Optional[str]) -> List[Job]: '''Returns a new list containing those jobs from the given list which match the configuration and time filter. ''' query = [] if configFilter: def configMatches(job: Job) -> bool: return job.configId == configFilter query.append(CustomFilter(configMatches)) def timeMatches(job: Job) -> bool: return beginWeek <= job.getCreateTime() < endWeek query.append(CustomFilter(timeMatches)) return runQuery(query, jobs)
async def process(self, req: Request['UserDetails_GET.Arguments'], user: User ) -> None: infoUserName = req.args.user try: infoUser = self.userDB[infoUserName] except KeyError: raise PresentableError(xhtml[ 'User ', xhtml.b[ infoUserName ], ' does not exist.' ]) jobDB = self.jobDB jobs = runQuery( [ ValueFilter('owner', infoUserName, jobDB), KeySorter.forDB(['recent'], jobDB) ], jobDB )[ : self.visibleJobs] # pylint: disable=attribute-defined-outside-init self.infoUser = infoUser self.jobs = jobs
def __init__(self, table: 'DataTable[Record]', proc: PageProcessor): super().__init__() columns = tuple(table.iterColumns(proc=proc, data=None)) dbName = table.dbName db: Optional[Database[Any]] = \ None if dbName is None else getattr(proc, dbName) records = table.getRecordsToQuery(proc) if isinstance(records, SizedABC): unfilteredNrRecords: Optional[int] = len(records) elif db is not None: unfilteredNrRecords = len(db) else: # We could store all records in a list or wrap a counting iterator # around it, but so far that has not been necessary. unfilteredNrRecords = None sortField = table.sortField if sortField is None: # We don't know if getRecordsToQuery() has filtered or not. filtered = None if isinstance(records, list): records = cast(List[Record], records) else: records = list(records) else: sortOrder = cast(Sequence[str], getattr(proc.args, sortField)) cleanSortOrder = self.__cleanSortOrder(columns, sortOrder) if sortOrder != cleanSortOrder: if proc.args.isArgument(sortField): raise ArgsCorrected( proc.args.override(**{sortField: cleanSortOrder})) else: setattr(proc.args, sortField, cleanSortOrder) query: List[RecordProcessor] = list(table.iterFilters(proc)) filtered = bool(query) keyMap = _buildKeyMap(columns, proc) sortKeys = (keyMap.get(key, key) for key in cleanSortOrder) # TODO: Maybe we should have a class (RecordCollection?) for # records that are not DBRecords or to keep track of # a subset of a full DB. Then 'uniqueKeys' could be moved # from DataTable to RecordCollection. getRetriever: Callable[[str], Retriever[Record, Comparable]] if db is None: getRetriever = itemgetter uniqueKeys = table.uniqueKeys or () else: getRetriever = db.retrieverFor assert table.uniqueKeys is None, "table's uniqueKeys is ignored" uniqueKeys = db.uniqueKeys retrievers: List[Retriever[Record, Comparable]] = [] for key in sortKeys: if callable(key): retrievers.append(substMissingForNone(key)) else: retrievers.append(substMissingForNone(getRetriever(key))) if key in uniqueKeys: break else: retrievers.append( cast(Callable[[Record], Comparable], lambda record: record)) query.append(KeySorter(retrievers)) records = runQuery(query, records) totalNrRecords = len(records) tabOffsetField = table.tabOffsetField if tabOffsetField is not None: tabOffset: int = getattr(proc.args, tabOffsetField) recordsPerPage = table.recordsPerPage if tabOffset < 0: # User tried to be funny and entered negative offset in URL. # Clip to first tab. newOffset = 0 elif tabOffset >= totalNrRecords: # URL could be manipulated or were are looking at a database # from which records were recently deleted. # Clip to last tab. newOffset = (totalNrRecords // recordsPerPage) * recordsPerPage else: # Make sure the first record on a tab matches the tab label. # Round down to current tab label. newOffset = (tabOffset // recordsPerPage) * recordsPerPage if newOffset != tabOffset: raise ArgsCorrected( proc.args.override(**{tabOffsetField: newOffset})) records = records[tabOffset:tabOffset + table.recordsPerPage] objectName = table.objectName if objectName is None: assert db is not None objectName = pluralize(db.description, 42) self.objectName = objectName self.columns = columns self.records = records self.unfilteredNrRecords = unfilteredNrRecords self.totalNrRecords = totalNrRecords self.filtered = filtered