def testCompletedWorkflow(self): # test getWork specName = "RerecoSpec" specUrl = self.specGenerator.createReRecoSpec(specName, "file") globalQ = globalQueue(DbName='workqueue_t', QueueURL=self.testInit.couchUrl, UnittestFlag=True) self.assertTrue(globalQ.queueWork(specUrl, specName, "teamA") > 0) wqApi = WorkQueueDS(self.testInit.couchUrl, 'workqueue_t') # overwrite default - can't test with stale view wqApi.defaultOptions = {'reduce': True, 'group': True} # This only checks minimum client call not exactly correctness of return # values. self.assertEqual(wqApi.getTopLevelJobsByRequest(), [{'total_jobs': 339, 'request_name': specName}]) results = wqApi.getJobsByStatusAndPriority() self.assertEqual(results.keys(), ['Available']) self.assertEqual(results['Available'].keys(), [8000]) self.assertTrue(results['Available'][8000]['sum'], 339) result = wqApi.getElementsCountAndJobsByWorkflow() self.assertEqual(len(result), 1) self.assertEqual(result[specName]['Available']['Jobs'], 339) data = wqApi.db.loadView('WorkQueue', 'elementsDetailByWorkflowAndStatus', {'startkey': [specName], 'endkey': [specName, {}], 'reduce': False}) elements = [x['id'] for x in data.get('rows', [])] wqApi.updateElements(*elements, Status='Canceled') # load this view once again to make sure it will be updated in the next assert.. data = wqApi.db.loadView('WorkQueue', 'elementsDetailByWorkflowAndStatus', {'startkey': [specName], 'endkey': [specName, {}], 'reduce': False}) self.assertEqual(len(wqApi.getCompletedWorkflow(stale=False)), 1) self.assertEqual(wqApi.getJobsByStatusAndPriority().keys(), ['Canceled'])
def testWorkQueueService(self): # test getWork specName = "RerecoSpec" specUrl = self.specGenerator.createReRecoSpec( specName, "file", assignKwargs={'SiteWhitelist': ['T2_XX_SiteA']}) globalQ = globalQueue(DbName='workqueue_t', QueueURL=self.testInit.couchUrl, UnittestFlag=True) self.assertTrue(globalQ.queueWork(specUrl, specName, "teamA") > 0) wqApi = WorkQueueDS(self.testInit.couchUrl, 'workqueue_t') # overwrite default - can't test with stale view wqApi.defaultOptions = {'reduce': True, 'group': True} # This only checks minimum client call not exactly correctness of return # values. self.assertEqual(wqApi.getTopLevelJobsByRequest(), [{ 'total_jobs': 339, 'request_name': specName }]) # work still available, so no childQueue self.assertEqual(wqApi.getChildQueuesAndStatus().keys(), [None]) result = wqApi.getElementsCountAndJobsByWorkflow() self.assertEqual(len(result), 1) self.assertEqual(result[specName]['Available']['Jobs'], 339) self.assertEqual(wqApi.getChildQueuesAndPriority()[None].keys(), [8000]) self.assertEqual(wqApi.getWMBSUrl(), []) self.assertEqual(wqApi.getWMBSUrlByRequest(), [])
def testWorkQueueService(self): # test getWork specName = "RerecoSpec" specUrl = self.specGenerator.createReRecoSpec(specName, "file", assignKwargs={'SiteWhitelist': ['T2_XX_SiteA']}) globalQ = globalQueue(DbName='workqueue_t', QueueURL=self.testInit.couchUrl, UnittestFlag=True) self.assertTrue(globalQ.queueWork(specUrl, specName, "teamA") > 0) wqApi = WorkQueueDS(self.testInit.couchUrl, 'workqueue_t') # overwrite default - can't test with stale view wqApi.defaultOptions = {'reduce': True, 'group': True} # This only checks minimum client call not exactly correctness of return # values. self.assertEqual(wqApi.getTopLevelJobsByRequest(), [{'total_jobs': 339, 'request_name': specName}]) # work still available, so no childQueue results = wqApi.getChildQueuesAndStatus() self.assertItemsEqual(set([item['agent_name'] for item in results]), ["AgentNotDefined"]) result = wqApi.getElementsCountAndJobsByWorkflow() self.assertEqual(len(result), 1) self.assertEqual(result[specName]['Available']['Jobs'], 339) results = wqApi.getChildQueuesAndPriority() resultsPrio = set([item['priority'] for item in results if item['agent_name'] == "AgentNotDefined"]) self.assertItemsEqual(resultsPrio, [8000]) self.assertEqual(wqApi.getWMBSUrl(), []) self.assertEqual(wqApi.getWMBSUrlByRequest(), [])
def testWorkQueueService(self): # test getWork specName = "RerecoSpec" specUrl = self.specGenerator.createReRecoSpec(specName, "file") globalQ = globalQueue(DbName='workqueue_t', QueueURL=self.testInit.couchUrl) self.assertTrue(globalQ.queueWork(specUrl, "RerecoSpec", "teamA") > 0) wqApi = WorkQueueDS(self.testInit.couchUrl, 'workqueue_t') #overwrite default - can't test with stale view wqApi.defaultOptions = {'reduce': True, 'group': True} #This only checks minimum client call not exactly correctness of return # values. self.assertEqual(wqApi.getTopLevelJobsByRequest(), [{ 'total_jobs': 10, 'request_name': specName }]) self.assertEqual(wqApi.getChildQueues(), []) self.assertEqual(wqApi.getJobStatusByRequest(), [{ 'status': 'Available', 'jobs': 10, 'request_name': specName }]) self.assertEqual(wqApi.getChildQueuesByRequest(), []) self.assertEqual(wqApi.getWMBSUrl(), []) self.assertEqual(wqApi.getWMBSUrlByRequest(), [])
def addAdditionalMonitorReport(self, config): """ Collect some statistics for Global Workqueue and upload it to WMStats. They are: - by status: count of elements and total number of estimated jobs - by status: count of elements and sum of jobs by *priority*. - by agent: count of elements and sum of jobs by *status* - by agent: count of elements and sum of jobs by *priority* - by status: unique (distributed) and possible (total assigned) number of jobs and elements per *site*, taking into account data locality - by status: unique (distributed) and possible (total assigned) number of jobs and elements per *site*, regardless data locality (using AAA) TODO: these still need to be done * for Available workqueue elements: - WQE without a common site list (that does not pass the work restrictions) - WQE older than 7 days (or whatever number we decide) - WQE that create > 30k jobs (or whatever number we decide) * for Acquired workqueue elements - WQE older than 7 days (or whatever the number is) """ self.logger.info("Collecting GlobalWorkqueue statistics...") # retrieve whole docs for these status in order to create site metrics status = ['Available', 'Negotiating', 'Acquired'] globalQ = globalQueue(**config.queueParams) results = globalQ.monitorWorkQueue(status) return results
def setupGlobalWorkqueue(self, **kwargs): """Return a workqueue instance""" globalQ = globalQueue(DbName = self.globalQDB, InboxDbName = self.globalQInboxDB, QueueURL = self.globalQCouchUrl, **kwargs) return globalQ
def addAdditionalMonitorReport(self, config): """ Collect some statistics for Global Workqueue and upload it to WMStats. They are: - by status: count of elements and total number of estimated jobs - by status: count of elements and sum of jobs by *priority*. - by agent: count of elements and sum of jobs by *status* - by agent: count of elements and sum of jobs by *priority* - by status: unique (distributed) and possible (total assigned) number of jobs and elements per *site*, taking into account data locality - by status: unique (distributed) and possible (total assigned) number of jobs and elements per *site*, regardless data locality (using AAA) TODO: these still need to be done * for Available workqueue elements: - WQE without a common site list (that does not pass the work restrictions) - WQE older than 7 days (or whatever number we decide) - WQE that create > 30k jobs (or whatever number we decide) * for Acquired workqueue elements - WQE older than 7 days (or whatever the number is) """ self.logger.info("Collecting GlobalWorkqueue statistics...") # retrieve whole docs for these status in order to create site metrics status = ['Available', 'Negotiating', 'Acquired'] globalQ = globalQueue(**config.queueParams) results = globalQ.monitorWorkQueue(status) return results
def setupGlobalWorkqueue(self, **kwargs): """Return a workqueue instance""" globalQ = globalQueue(DbName=self.globalQDB, InboxDbName=self.globalQInboxDB, QueueURL=self.globalQCouchUrl, **kwargs) return globalQ
def setupGlobalWorkqueue(self, **kwargs): """Return a workqueue instance""" globalQ = globalQueue(DbName = self.globalQDB, InboxDbName = self.globalQInboxDB, QueueURL = self.globalQCouchUrl, Teams = ["The A-Team", "some other bloke"], **kwargs) return globalQ
def setupGlobalWorkqueue(self): """Return a workqueue instance""" globalQ = globalQueue(CacheDir = self.workDir, QueueURL = 'global.example.com', Teams = ["The A-Team", "some other bloke"], DbName = 'workqueue_t_global') return globalQ
def setupGlobalWorkqueue(self): """Return a workqueue instance""" globalQ = globalQueue(CacheDir=self.workDir, QueueURL='global.example.com', Teams=["The A-Team", "some other bloke"], DbName='workqueue_t_global') return globalQ
def updateDataLocation(self, config): """ gather active data statistics """ globalQ = globalQueue(**config.queueParams) globalQ.updateLocationInfo() return
def testUpdatePriorityService(self): """ _testUpdatePriorityService_ Check that we can update the priority correctly also check the available workflows feature """ specName = "RerecoSpec" specUrl = self.specGenerator.createReRecoSpec( specName, "file", assignKwargs={'SiteWhitelist': ["T2_XX_SiteA"]}) globalQ = globalQueue(DbName='workqueue_t', QueueURL=self.testInit.couchUrl, UnittestFlag=True, **self.queueParams) localQ = localQueue(DbName='local_workqueue_t', QueueURL=self.testInit.couchUrl, CacheDir=self.testInit.testDir, ParentQueueCouchUrl='%s/workqueue_t' % self.testInit.couchUrl, ParentQueueInboxCouchDBName='workqueue_t_inbox', **self.queueParams) # Try a full chain of priority update and propagation self.assertTrue(globalQ.queueWork(specUrl, "RerecoSpec", "teamA") > 0) globalApi = WorkQueueDS(self.testInit.couchUrl, 'workqueue_t') # overwrite default - can't test with stale view globalApi.defaultOptions = {'reduce': True, 'group': True} globalApi.updatePriority(specName, 100) self.assertEqual(globalQ.backend.getWMSpec(specName).priority(), 100) storedElements = globalQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 100) numWorks = localQ.pullWork({'T2_XX_SiteA': 10}) self.assertTrue(numWorks > 0) # replicate from GQ to LQ manually localQ.backend.pullFromParent(continuous=False) # wait until replication is done time.sleep(2) localQ.processInboundWork(continuous=False) storedElements = localQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 100) localApi = WorkQueueDS(self.testInit.couchUrl, 'local_workqueue_t') # overwrite default - can't test with stale view localApi.defaultOptions = {'reduce': True, 'group': True} localApi.updatePriority(specName, 500) self.assertEqual(localQ.backend.getWMSpec(specName).priority(), 500) storedElements = localQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 500) availableWF = localApi.getAvailableWorkflows() self.assertEqual(availableWF, set([(specName, 500)])) # Attempt to update an inexistent workflow in the queue try: globalApi.updatePriority('NotExistent', 2) except Exception as ex: self.fail('No exception should be raised.: %s' % str(ex))
def testCompletedWorkflow(self): # test getWork specName = "RerecoSpec" specUrl = self.specGenerator.createReRecoSpec( specName, "file", assignKwargs={'SiteWhitelist': ['T2_XX_SiteA']}) globalQ = globalQueue(DbName='workqueue_t', QueueURL=self.testInit.couchUrl, UnittestFlag=True, **self.queueParams) self.assertTrue(globalQ.queueWork(specUrl, specName, "teamA") > 0) wqApi = WorkQueueDS(self.testInit.couchUrl, 'workqueue_t') # overwrite default - can't test with stale view wqApi.defaultOptions = {'reduce': True, 'group': True} # This only checks minimum client call not exactly correctness of return # values. self.assertEqual(wqApi.getTopLevelJobsByRequest(), [{ 'total_jobs': 339, 'request_name': specName }]) results = wqApi.getJobsByStatus() self.assertEqual(results['Available']['sum_jobs'], 339) results = wqApi.getJobsByStatusAndPriority() resultsPrio = set( [item['priority'] for item in results.get('Available')]) self.assertItemsEqual(resultsPrio, [8000]) resultsJobs = sum([ item['sum_jobs'] for item in results.get('Available') if item['priority'] == 8000 ]) self.assertTrue(resultsJobs, 339) result = wqApi.getElementsCountAndJobsByWorkflow() self.assertEqual(len(result), 1) self.assertEqual(result[specName]['Available']['Jobs'], 339) data = wqApi.db.loadView('WorkQueue', 'elementsDetailByWorkflowAndStatus', { 'startkey': [specName], 'endkey': [specName, {}], 'reduce': False }) elements = [x['id'] for x in data.get('rows', [])] wqApi.updateElements(*elements, Status='Canceled') # load this view once again to make sure it will be updated in the next assert.. data = wqApi.db.loadView('WorkQueue', 'elementsDetailByWorkflowAndStatus', { 'startkey': [specName], 'endkey': [specName, {}], 'reduce': False }) self.assertEqual(len(wqApi.getCompletedWorkflow(stale=False)), 1) results = wqApi.getJobsByStatusAndPriority() resultsPrio = set( [item['priority'] for item in results.get('Canceled')]) self.assertItemsEqual(resultsPrio, [8000])
def updateDataLocation(self, config): """ gather active data statistics """ globalQ = globalQueue(**config.queueParams) globalQ.updateLocationInfo() return
def setupGlobalWorkqueue(self, **kwargs): """Return a workqueue instance""" kwargs.setdefault('rucioAccount', "wmcore_transferor") kwargs.setdefault('rucioAuthUrl', "https://cms-rucio-auth-int.cern.ch") kwargs.setdefault('rucioUrl', "http://cms-rucio-int.cern.ch") globalQ = globalQueue(DbName=self.globalQDB, InboxDbName=self.globalQInboxDB, QueueURL=self.globalQCouchUrl, **kwargs) return globalQ
def testUpdatePriorityService(self): """ _testUpdatePriorityService_ Check that we can update the priority correctly also check the available workflows feature """ specName = "RerecoSpec" specUrl = self.specGenerator.createReRecoSpec(specName, "file", assignKwargs={'SiteWhitelist':["T2_XX_SiteA"]}) globalQ = globalQueue(DbName='workqueue_t', QueueURL=self.testInit.couchUrl, UnittestFlag=True) localQ = localQueue(DbName='local_workqueue_t', QueueURL=self.testInit.couchUrl, CacheDir=self.testInit.testDir, ParentQueueCouchUrl='%s/workqueue_t' % self.testInit.couchUrl, ParentQueueInboxCouchDBName='workqueue_t_inbox' ) # Try a full chain of priority update and propagation self.assertTrue(globalQ.queueWork(specUrl, "RerecoSpec", "teamA") > 0) globalApi = WorkQueueDS(self.testInit.couchUrl, 'workqueue_t') # overwrite default - can't test with stale view globalApi.defaultOptions = {'reduce': True, 'group': True} globalApi.updatePriority(specName, 100) self.assertEqual(globalQ.backend.getWMSpec(specName).priority(), 100) storedElements = globalQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 100) numWorks = localQ.pullWork({'T2_XX_SiteA': 10}) self.assertTrue(numWorks > 0) # replicate from GQ to LQ manually localQ.backend.pullFromParent(continuous=False) # wait until replication is done time.sleep(2) localQ.processInboundWork(continuous=False) storedElements = localQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 100) localApi = WorkQueueDS(self.testInit.couchUrl, 'local_workqueue_t') # overwrite default - can't test with stale view localApi.defaultOptions = {'reduce': True, 'group': True} localApi.updatePriority(specName, 500) self.assertEqual(localQ.backend.getWMSpec(specName).priority(), 500) storedElements = localQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 500) availableWF = localApi.getAvailableWorkflows() self.assertEqual(availableWF, set([(specName, 500)])) # Attempt to update an inexistent workflow in the queue try: globalApi.updatePriority('NotExistent', 2) except Exception as ex: self.fail('No exception should be raised.: %s' % str(ex))
def cleanUpAndSyncCanceledElements(self, config): """ 1. deleted the wqe in end states 2. synchronize cancelled elements. We can also make this in the separate thread """ globalQ = globalQueue(**config.queueParams) globalQ.performQueueCleanupActions(skipWMBS=True) return
def cleanUpAndSyncCanceledElements(self, config): """ 1. deleted the wqe in end states 2. synchronize cancelled elements. We can also make this in the separate thread """ globalQ = globalQueue(**config.queueParams) globalQ.performQueueCleanupActions(skipWMBS=True) return
def queueFromConfig(config): """Create a queue from the config object""" config = queueConfigFromConfigObject(config) if config.WorkQueueManager.level == 'GlobalQueue': from WMCore.WorkQueue.WorkQueue import globalQueue return globalQueue(**config.WorkQueueManager.queueParams) elif config.WorkQueueManager.level == 'LocalQueue': from WMCore.WorkQueue.WorkQueue import localQueue return localQueue(**config.WorkQueueManager.queueParams) else: from WMCore.WorkQueue.WorkQueue import WorkQueue return WorkQueue(**config.WorkQueueManager.queueParams)
def queueFromConfig(config): """Create a queue from the config object""" config = queueConfigFromConfigObject(config) if config.WorkQueueManager.level == 'GlobalQueue': from WMCore.WorkQueue.WorkQueue import globalQueue return globalQueue(**config.WorkQueueManager.queueParams) elif config.WorkQueueManager.level == 'LocalQueue': from WMCore.WorkQueue.WorkQueue import localQueue return localQueue(**config.WorkQueueManager.queueParams) else: from WMCore.WorkQueue.WorkQueue import WorkQueue return WorkQueue(**config.WorkQueueManager.queueParams)
def cleanUpAndSyncCanceledElements(self, config): """ 1. deleted the wqe in end states 2. synchronize cancelled elements. We can also make this in the separate thread """ start = int(time()) globalQ = globalQueue(**config.queueParams) globalQ.performQueueCleanupActions(skipWMBS=True) end = int(time()) self.logger.info("%s executed in %d secs.", self.__class__.__name__, end - start) return
def interactWithReqmgr(self, config): """ 1. pull new work 2. add the new element from running-open request 3. report element status to reqmgr (need to be removed and set as reqmgr task) 4. record this activity """ globalQ = globalQueue(**config.queueParams) reqMgrInt = WorkQueueReqMgrInterface(**config.reqMgrConfig) reqMgrInt(globalQ) return
def interactWithReqmgr(self, config): """ 1. pull new work 2. add the new element from running-open request 3. report element status to reqmgr (need to be removed and set as reqmgr task) 4. record this activity """ globalQ = globalQueue(**config.queueParams) reqMgrInt = WorkQueueReqMgrInterface(**config.reqMgrConfig) reqMgrInt(globalQ) return
def testUpdatePriorityService(self): """ _testUpdatePriorityService_ Check that we can update the priority correctly also check the available workflows feature """ specName = "RerecoSpec" specUrl = self.specGenerator.createReRecoSpec(specName, "file") globalQ = globalQueue(DbName='workqueue_t', QueueURL=self.testInit.couchUrl) localQ = localQueue(DbName='local_workqueue_t', QueueURL=self.testInit.couchUrl, CacheDir=self.testInit.testDir, ParentQueueCouchUrl='%s/workqueue_t' % self.testInit.couchUrl, ParentQueueInboxCouchDBName='workqueue_t_inbox') # Try a full chain of priority update and propagation self.assertTrue(globalQ.queueWork(specUrl, "RerecoSpec", "teamA") > 0) globalApi = WorkQueueDS(self.testInit.couchUrl, 'workqueue_t') #overwrite default - can't test with stale view globalApi.defaultOptions = {'reduce': True, 'group': True} globalApi.updatePriority(specName, 100) self.assertEqual(globalQ.backend.getWMSpec(specName).priority(), 100) storedElements = globalQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 100) self.assertTrue( localQ.pullWork({'T2_XX_SiteA': 10}, continuousReplication=False) > 0) localQ.processInboundWork(continuous=False) storedElements = localQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 100) localApi = WorkQueueDS(self.testInit.couchUrl, 'local_workqueue_t') #overwrite default - can't test with stale view localApi.defaultOptions = {'reduce': True, 'group': True} localApi.updatePriority(specName, 500) self.assertEqual(localQ.backend.getWMSpec(specName).priority(), 500) storedElements = localQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 500) self.assertEqual(localApi.getAvailableWorkflows(), set([(specName, 500)])) # Attempt to update an inexistent workflow in the queue try: globalApi.updatePriority('NotExistent', 2) except: self.fail('No exception should be raised.')
def addAdditionalMonitorReport(self, config): """ Collect some statistics for Global Workqueue and upload it to WMStats and MonIT. """ self.logger.info("Collecting GlobalWorkqueue statistics...") # retrieve whole docs for these status in order to create site metrics globalQ = globalQueue(**config.queueParams) results = globalQ.monitorWorkQueue(self.initialStatus) if self.postToAMQ: allDocs = self.buildMonITDocs(results) self.uploadToAMQ(allDocs) return results
def addAdditionalMonitorReport(self, config): """ Collect some statistics for Global Workqueue and upload it to WMStats and MonIT. """ self.logger.info("Collecting GlobalWorkqueue statistics...") # retrieve whole docs for these status in order to create site metrics globalQ = globalQueue(**config.queueParams) results = globalQ.monitorWorkQueue(self.initialStatus) if self.postToAMQ: allDocs = self.buildMonITDocs(results) self.uploadToAMQ(allDocs) return results
def testUpdatePriorityService(self): """ _testUpdatePriorityService_ Check that we can update the priority correctly also check the available workflows feature """ specName = "RerecoSpec" specUrl = self.specGenerator.createReRecoSpec(specName, "file") globalQ = globalQueue(DbName = 'workqueue_t', QueueURL = self.testInit.couchUrl) localQ = localQueue(DbName = 'local_workqueue_t', QueueURL = self.testInit.couchUrl, CacheDir = self.testInit.testDir, ParentQueueCouchUrl = '%s/workqueue_t' % self.testInit.couchUrl, ParentQueueInboxCouchDBName = 'workqueue_t_inbox' ) # Try a full chain of priority update and propagation self.assertTrue(globalQ.queueWork(specUrl, "RerecoSpec", "teamA") > 0) globalApi = WorkQueueDS(self.testInit.couchUrl, 'workqueue_t') #overwrite default - can't test with stale view globalApi.defaultOptions = {'reduce' : True, 'group' : True} globalApi.updatePriority(specName, 100) self.assertEqual(globalQ.backend.getWMSpec(specName).priority(), 100) storedElements = globalQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 100) self.assertTrue(localQ.pullWork({'T2_XX_SiteA' : 10}) > 0) localQ.processInboundWork(continuous = False) storedElements = localQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 100) localApi = WorkQueueDS(self.testInit.couchUrl, 'local_workqueue_t') #overwrite default - can't test with stale view localApi.defaultOptions = {'reduce' : True, 'group' : True} localApi.updatePriority(specName, 500) self.assertEqual(localQ.backend.getWMSpec(specName).priority(), 500) storedElements = localQ.backend.getElementsForWorkflow(specName) for element in storedElements: self.assertEqual(element['Priority'], 500) self.assertEqual(localApi.getAvailableWorkflows(), set([(specName, 500)])) # Attempt to update an inexistent workflow in the queue try: globalApi.updatePriority('NotExistent', 2) except: self.fail('No exception should be raised.')
def setUp(self): """ If we dont have a wmspec file create one Warning: For the real profiling test including spec generation. need to use real spec instead of using emulator generated spec which doesn't include couchDB access and cmssw access """ EmulatorHelper.setEmulators(phedex=True, dbs=True, siteDB=True, requestMgr=True) WorkQueueTestCase.setUp(self) self.cacheDir = tempfile.mkdtemp() self.specGenerator = WMSpecGenerator(self.cacheDir) self.specNamePrefix = "TestReReco_" self.specs = self.createReRecoSpec(5, "file") # Create queues self.globalQueue = globalQueue(DbName=self.globalQDB, InboxDbName=self.globalQInboxDB, NegotiationTimeout=0)
def setUp(self): """ If we dont have a wmspec file create one Warning: For the real profiling test including spec generation. need to use real spec instead of using emulator generated spec which doesn't include couchDB access and cmssw access """ WorkQueueTestCase.setUp(self) self.cacheDir = tempfile.mkdtemp() self.specGenerator = WMSpecGenerator(self.cacheDir) self.specNamePrefix = "TestReReco_" self.specs = self.createReRecoSpec(5, "file") # Create queues self.globalQueue = globalQueue(DbName=self.globalQDB, InboxDbName=self.globalQInboxDB, NegotiationTimeout=0)
def testWorkQueueService(self): # test getWork specName = "RerecoSpec" specUrl = self.specGenerator.createReRecoSpec(specName, "file") globalQ = globalQueue(DbName = 'workqueue_t', QueueURL = self.testInit.couchUrl) self.assertTrue(globalQ.queueWork(specUrl, "RerecoSpec", "teamA") > 0) wqApi = WorkQueueDS(self.testInit.couchUrl, 'workqueue_t') #This only checks minimum client call not exactly correctness of return # values. self.assertEqual(wqApi.getTopLevelJobsByRequest(), [{'total_jobs': 2, 'request_name': specName}]) self.assertEqual(wqApi.getChildQueues(), []) self.assertEqual(wqApi.getJobStatusByRequest(), [{'status': 'Available', 'jobs': 2, 'request_name': specName}]) self.assertEqual(wqApi.getChildQueuesByRequest(), []) self.assertEqual(wqApi.getWMBSUrl(), []) self.assertEqual(wqApi.getWMBSUrlByRequest(), [])
def __init__(self, rest, config): super(CleanUpTask, self).__init__(config) self.globalQ = globalQueue(logger=self.logger, **config.queueParams)
def __init__(self, rest, config): super(HeartbeatMonitor, self).__init__(rest, config) self.initialStatus = ['Available', 'Negotiating', 'Acquired'] self.producer = "global_workqueue" self.docTypeAMQ = "cms_%s_info" % self.producer self.globalQ = globalQueue(logger=self.logger, **config.queueParams)
def setUp(self): """ If we dont have a wmspec file create one """ EmulatorHelper.setEmulators(phedex = True, dbs = True, siteDB = True, requestMgr = False) #set up WMAgent config file for couchdb self.configFile = EmulatorSetup.setupWMAgentConfig() WorkQueueTestCase.setUp(self) # Basic production Spec self.spec = monteCarloWorkload('testProduction', mcArgs) getFirstTask(self.spec).setSiteWhitelist(['T2_XX_SiteA', 'T2_XX_SiteB']) getFirstTask(self.spec).addProduction(totalevents = 10000) self.spec.setSpecUrl(os.path.join(self.workDir, 'testworkflow.spec')) self.spec.save(self.spec.specUrl()) # Sample Tier1 ReReco spec self.processingSpec = rerecoWorkload('testProcessing', rerecoArgs) self.processingSpec.setSpecUrl(os.path.join(self.workDir, 'testProcessing.spec')) self.processingSpec.save(self.processingSpec.specUrl()) # Sample Tier1 ReReco spec self.parentProcSpec = rerecoWorkload('testParentProcessing', parentProcArgs) self.parentProcSpec.setSpecUrl(os.path.join(self.workDir, 'testParentProcessing.spec')) self.parentProcSpec.save(self.parentProcSpec.specUrl()) # ReReco spec with blacklist self.blacklistSpec = rerecoWorkload('blacklistSpec', rerecoArgs) self.blacklistSpec.setSpecUrl(os.path.join(self.workDir, 'testBlacklist.spec')) getFirstTask(self.blacklistSpec).data.constraints.sites.blacklist = ['T2_XX_SiteA'] self.blacklistSpec.save(self.blacklistSpec.specUrl()) # ReReco spec with whitelist self.whitelistSpec = rerecoWorkload('whitelistlistSpec', rerecoArgs) self.whitelistSpec.setSpecUrl(os.path.join(self.workDir, 'testWhitelist.spec')) getFirstTask(self.whitelistSpec).data.constraints.sites.whitelist = ['T2_XX_SiteB'] self.whitelistSpec.save(self.whitelistSpec.specUrl()) # setup Mock DBS and PhEDEx inputDataset = getFirstTask(self.processingSpec).inputDataset() self.dataset = "/%s/%s/%s" % (inputDataset.primary, inputDataset.processed, inputDataset.tier) # Create queues globalCouchUrl = "%s/%s" % (self.testInit.couchUrl, self.globalQDB) self.globalQueue = globalQueue(DbName = self.globalQDB, InboxDbName = self.globalQInboxDB, QueueURL = globalCouchUrl) # self.midQueue = WorkQueue(SplitByBlock = False, # mid-level queue # PopulateFilesets = False, # ParentQueue = self.globalQueue, # CacheDir = None) # ignore mid queue as it causes database duplication's # copy jobStateMachine couchDB configuration here since we don't want/need to pass whole configuration jobCouchConfig = Configuration() jobCouchConfig.section_("JobStateMachine") jobCouchConfig.JobStateMachine.couchurl = os.environ["COUCHURL"] jobCouchConfig.JobStateMachine.couchDBName = "testcouchdb" # copy bossAir configuration here since we don't want/need to pass whole configuration bossAirConfig = Configuration() bossAirConfig.section_("BossAir") bossAirConfig.BossAir.pluginDir = "WMCore.BossAir.Plugins" bossAirConfig.BossAir.pluginNames = ["CondorPlugin"] bossAirConfig.section_("Agent") bossAirConfig.Agent.agentName = "TestAgent" self.localQueue = localQueue(DbName = self.localQDB, InboxDbName = self.localQInboxDB, ParentQueueCouchUrl = globalCouchUrl, JobDumpConfig = jobCouchConfig, BossAirConfig = bossAirConfig, CacheDir = self.workDir) self.localQueue2 = localQueue(DbName = self.localQDB2, InboxDbName = self.localQInboxDB2, ParentQueueCouchUrl = globalCouchUrl, JobDumpConfig = jobCouchConfig, BossAirConfig = bossAirConfig, CacheDir = self.workDir) # configuration for the Alerts messaging framework, work (alerts) and # control channel addresses to which alerts # these are destination addresses where AlertProcessor:Receiver listens config = Configuration() config.section_("Alert") config.Alert.address = "tcp://127.0.0.1:5557" config.Alert.controlAddr = "tcp://127.0.0.1:5559" # standalone queue for unit tests self.queue = WorkQueue(JobDumpConfig = jobCouchConfig, BossAirConfig = bossAirConfig, DbName = self.queueDB, InboxDbName = self.queueInboxDB, CacheDir = self.workDir, config = config) # create relevant sites in wmbs rc = ResourceControl() for site, se in self.queue.SiteDB.mapping.items(): rc.insertSite(site, 100, se, cmsName = site) daofactory = DAOFactory(package = "WMCore.WMBS", logger = threading.currentThread().logger, dbinterface = threading.currentThread().dbi) addLocation = daofactory(classname = "Locations.New") addLocation.execute(siteName = site, seName = se)
def __init__(self, rest, config): super(LocationUpdateTask, self).__init__(config) self.globalQ = globalQueue(logger=self.logger, **config.queueParams)
def __init__(self, rest, config): super(ReqMgrInteractionTask, self).__init__(config) self.globalQ = globalQueue(logger=self.logger, **config.queueParams) self.reqMgrInt = WorkQueueReqMgrInterface(logger=self.logger, **config.reqMgrConfig)
from __future__ import print_function from WMCore.WorkQueue.WorkQueue import globalQueue ### Same configuration as defined in the deployment scripts BASE_URL = "https://cmsweb.cern.ch" COUCH_URL = "%s/couchdb" % BASE_URL REQMGR2 = "%s/reqmgr2" % BASE_URL WEBURL = "%s/%s" % (COUCH_URL, "workqueue") LOG_DB_URL = "%s/wmstats_logdb" % COUCH_URL LOG_REPORTER = "global_workqueue" queueParams = {} queueParams['CouchUrl'] = COUCH_URL queueParams['DbName'] = "workqueue" queueParams['InboxDbName'] = "workqueue_inbox" queueParams['WMStatsCouchUrl'] = "%s/%s" % (COUCH_URL, "wmstats") queueParams['QueueURL'] = WEBURL queueParams['ReqMgrServiceURL'] = REQMGR2 queueParams['RequestDBURL'] = "%s/%s" % (COUCH_URL, "reqmgr_workload_cache") queueParams['central_logdb_url'] = LOG_DB_URL queueParams['log_reporter'] = LOG_REPORTER queueParams['rucioAccount'] = "" globalQ = globalQueue(**queueParams) globalQ.updateLocationInfo()