def pullWork(self, resources = None, draining_resources = None, continuousReplication = True): """ Pull work from another WorkQueue to be processed If resources passed in get work for them, if not available resources from get from wmbs. """ if not self.params['ParentQueueCouchUrl']: msg = 'Unable to pull work from parent, ParentQueueCouchUrl not provided' self.logger.warning(msg) return 0 if not self.backend.isAvailable() or not self.parent_queue.isAvailable(): self.logger.info('Backend busy or down: skipping work pull') return 0 if self.params['DrainMode']: self.logger.info('Draining queue: skipping work pull') return 0 if not draining_resources: draining_resources = {} if not resources: # find out available resources from wmbs from WMCore.WorkQueue.WMBSHelper import freeSlots sites = freeSlots(self.params['QueueDepth'], knownCmsSites = cmsSiteNames()) draining_sites = freeSlots(self.params['QueueDepth'], onlyDrain = True) # resources for new work are free wmbs resources minus what we already have queued _, resources = self.backend.availableWork(sites) draining_resources = draining_sites # don't minus available as large run-anywhere could decimate if not resources and not draining_resources: self.logger.info('Not pulling more work. No free slots.') return 0 left_over = self.parent_queue.getElements('Negotiating', returnIdOnly = True, ChildQueueUrl = self.params['QueueURL']) if left_over: self.logger.info('Not pulling more work. Still replicating %d previous units' % len(left_over)) return 0 still_processing = self.backend.getInboxElements('Negotiating', returnIdOnly = True) if still_processing: self.logger.info('Not pulling more work. Still processing %d previous units' % len(still_processing)) return 0 self.logger.info("Pull work for sites %s: " % str(resources)) work, _ = self.parent_queue.availableWork(resources, self.params['Teams']) # get work for draining sites (only get work for existing workflows) work.extend(self.parent_queue.availableWork(draining_resources, self.params['Teams'], self.backend.getWorkflows())[0]) if not work: self.logger.info('No available work in parent queue.') return 0 work = self._assignToChildQueue(self.params['QueueURL'], *work) # do this whether we have work or not - other events i.e. cancel may have happened self.backend.pullFromParent(continuous = continuousReplication) return len(work)
def algorithm(self, parameters): """ Get work from local workqueue to be injected into WMBS/DBSBuffer """ self.queue.logger.info("Getting work and feeding WMBS files...") try: # need to make sure jobs are created resources, jobCounts = freeSlots( minusRunning=True, allowedStates=['Normal', 'Draining'], knownCmsSites=cmsSiteNames()) for site in resources: self.queue.logger.info("I need %d jobs on site %s" % (resources[site], site)) abortedAndForceCompleteRequests = self.abortedAndForceCompleteWorkflowCache.getData( ) previousWorkList = self.queue.getWork( resources, jobCounts, excludeWorkflows=abortedAndForceCompleteRequests) self.queue.logger.info( "Acquired %s units of work for WMBS file creation", len(previousWorkList)) except Exception as ex: self.queue.logger.error("Error in wmbs inject loop: %s" % str(ex))
def getWorks(self): """ Inject work into wmbs for idle sites """ self.queue.logger.info("Getting work and feeding WMBS files") # need to make sure jobs are created resources, jobCounts = freeSlots(minusRunning=True, allowedStates=['Normal', 'Draining'], knownCmsSites=cmsSiteNames()) for site in resources: self.queue.logger.info("I need %d jobs on site %s" % (resources[site], site)) abortedAndForceCompleteRequests = self.abortedAndForceCompleteWorkflowCache.getData( ) previousWorkList = self.queue.getWork( resources, jobCounts, excludeWorkflows=abortedAndForceCompleteRequests) self.queue.logger.info( "%s of units of work acquired for file creation" % len(previousWorkList)) return
def getWorks(self): """ Inject work into wmbs for idle sites """ self.queue.logger.info("Getting work and feeding WMBS files") # need to make sure jobs are created resources = freeSlots(minusRunning=True) for site in resources: self.queue.logger.info("I need %d jobs on site %s" % (resources[site], site)) self.previousWorkList = self.queue.getWork(resources) self.queue.logger.info("%s of units of work acquired for file creation" % len(self.previousWorkList)) return
def getAgentMonitoring(self): """ Return a list of dicts with all information we can gather from the databases. """ monitoring = {} monitoring['wmbsCreatedTypeCount'] = self.jobTypeCountByStatus.execute('created') monitoring['wmbsExecutingTypeCount'] = self.jobTypeCountByStatus.execute('executing') monitoring['wmbsCountByState'] = self.jobCountByStatus.execute() monitoring['activeRunJobByStatus'] = self.runJobByStatus.execute(active=True) monitoring['completeRunJobByStatus'] = self.runJobByStatus.execute(active=False) # get thresholds for job creation (GQ to LQ), only for sites in Normal state # also get the number of pending jobs and their priority per site thresholdsForCreate, pendingCountByPrio = freeSlots(minusRunning=True) monitoring['thresholdsGQ2LQ'] = thresholdsForCreate monitoring['sitePendCountByPrio'] = pendingCountByPrio return monitoring
def getWorks(self): """ Inject work into wmbs for idle sites """ self.queue.logger.info("Getting work and feeding WMBS files") # need to make sure jobs are created resources, jobCounts = freeSlots(minusRunning = True, allowedStates = ['Normal', 'Draining'], knownCmsSites = cmsSiteNames()) for site in resources: self.queue.logger.info("I need %d jobs on site %s" % (resources[site], site)) self.previousWorkList = self.queue.getWork(resources, jobCounts) self.queue.logger.info("%s of units of work acquired for file creation" % len(self.previousWorkList)) return
def getWorks(self): """ Inject work into wmbs for idle sites """ self.queue.logger.info("Getting work and feeding WMBS files") # need to make sure jobs are created resources = freeSlots(minusRunning=True, allowedStates=['Normal', 'Draining'], knownCmsSites=cmsSiteNames()) for site in resources: self.queue.logger.info("I need %d jobs on site %s" % (resources[site], site)) self.previousWorkList = self.queue.getWork(resources) self.queue.logger.info( "%s of units of work acquired for file creation" % len(self.previousWorkList)) return
def algorithm(self, parameters): """ Get work from local workqueue to be injected into WMBS/DBSBuffer """ self.queue.logger.info("Getting work and feeding WMBS files...") try: # need to make sure jobs are created resources, jobCounts = freeSlots(minusRunning=True, allowedStates=['Normal', 'Draining'], knownCmsSites=cmsSiteNames()) for site in resources: self.queue.logger.info("I need %d jobs on site %s" % (resources[site], site)) abortedAndForceCompleteRequests = self.abortedAndForceCompleteWorkflowCache.getData() previousWorkList = self.queue.getWork(resources, jobCounts, excludeWorkflows=abortedAndForceCompleteRequests) self.queue.logger.info("Acquired %s units of work for WMBS file creation", len(previousWorkList)) except Exception as ex: self.queue.logger.error("Error in wmbs inject loop: %s" % str(ex))
def pullWork(self, resources=None, draining_resources=None, continuousReplication=True): """ Pull work from another WorkQueue to be processed If resources passed in get work for them, if not available resources from get from wmbs. """ if not self.params['ParentQueueCouchUrl']: msg = 'Unable to pull work from parent, ParentQueueCouchUrl not provided' self.logger.warning(msg) return 0 if not self.backend.isAvailable() or not self.parent_queue.isAvailable( ): self.logger.info('Backend busy or down: skipping work pull') return 0 if self.params['DrainMode']: self.logger.info('Draining queue: skipping work pull') return 0 if not draining_resources: draining_resources = {} if not resources: # find out available resources from wmbs from WMCore.WorkQueue.WMBSHelper import freeSlots sites = freeSlots(self.params['QueueDepth'], knownCmsSites=cmsSiteNames()) draining_sites = freeSlots(self.params['QueueDepth'], allowedStates=['Draining']) # resources for new work are free wmbs resources minus what we already have queued _, resources = self.backend.availableWork(sites) draining_resources = draining_sites # don't minus available as large run-anywhere could decimate if not resources and not draining_resources: self.logger.info('Not pulling more work. No free slots.') return 0 left_over = self.parent_queue.getElements( 'Negotiating', returnIdOnly=True, ChildQueueUrl=self.params['QueueURL']) if left_over: self.logger.info( 'Not pulling more work. Still replicating %d previous units' % len(left_over)) return 0 still_processing = self.backend.getInboxElements('Negotiating', returnIdOnly=True) if still_processing: self.logger.info( 'Not pulling more work. Still processing %d previous units' % len(still_processing)) return 0 self.logger.info("Pull work for sites %s: " % str(resources)) work, _ = self.parent_queue.availableWork(resources, self.params['Teams']) # get work for draining sites (only get work for existing workflows) work.extend( self.parent_queue.availableWork(draining_resources, self.params['Teams'], self.backend.getWorkflows())[0]) if not work: self.logger.info('No available work in parent queue.') return 0 work = self._assignToChildQueue(self.params['QueueURL'], *work) # do this whether we have work or not - other events i.e. cancel may have happened self.backend.pullFromParent(continuous=continuousReplication) return len(work)