class AlertProcessorTest(unittest.TestCase): def setUp(self): self.testInit = TestInit(__file__) self.testInit.setLogging(logLevel = logging.DEBUG) self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = ["WMCore.WMBS",'WMCore.Agent.Database', "WMCore.ResourceControl"], useDefault = False) self.testDir = self.testInit.generateWorkDir() self.config = Configuration() self.config.section_("Agent") self.config.Agent.useMsgService = False self.config.Agent.useTrigger = False self.config.component_("AlertProcessor") self.config.AlertProcessor.componentDir = self.testDir self.config.AlertProcessor.address = "tcp://127.0.0.1:5557" self.config.AlertProcessor.controlAddr = "tcp://127.0.0.1:5559" self.config.section_("CoreDatabase") self.config.CoreDatabase.socket = os.environ.get("DBSOCK") self.config.CoreDatabase.connectUrl = os.environ.get("DATABASE") self.config.AlertProcessor.section_("critical") self.config.AlertProcessor.section_("soft") self.config.AlertProcessor.critical.level = 5 self.config.AlertProcessor.soft.level = 0 self.config.AlertProcessor.soft.bufferSize = 3 self.config.AlertProcessor.critical.section_("sinks") self.config.AlertProcessor.soft.section_("sinks") def tearDown(self): self.testInit.clearDatabase() self.testInit.delWorkDir() def testAlertProcessorBasic(self): alertProcessor = AlertProcessor(self.config) try: # alertProcessor.startComponent() causes the flow to stop, Harness.py # the method just calls prepareToStart() and waits for ever # alertProcessor.startDaemon() no good for this either ... puts everything # on background alertProcessor.prepareToStart() except Exception, ex: print ex self.fail(str(ex)) logging.debug("AlertProcessor and its sub-components should be running now ...") logging.debug("Going to stop the component ...") # stop via component method try: alertProcessor.stopAlertProcessor() except Exception, ex: print ex self.fail(str(ex))
class AlertProcessorTest(unittest.TestCase): def setUp(self): self.testInit = TestInit(__file__) self.testInit.setLogging(logLevel=logging.DEBUG) self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=[ "WMCore.WMBS", 'WMCore.Agent.Database', "WMCore.ResourceControl" ], useDefault=False) self.testDir = self.testInit.generateWorkDir() self.config = Configuration() self.config.section_("Agent") self.config.Agent.useMsgService = False self.config.Agent.useTrigger = False self.config.component_("AlertProcessor") self.config.AlertProcessor.componentDir = self.testDir self.config.AlertProcessor.address = "tcp://127.0.0.1:5557" self.config.AlertProcessor.controlAddr = "tcp://127.0.0.1:5559" self.config.section_("CoreDatabase") self.config.CoreDatabase.socket = os.environ.get("DBSOCK") self.config.CoreDatabase.connectUrl = os.environ.get("DATABASE") self.config.AlertProcessor.section_("critical") self.config.AlertProcessor.section_("soft") self.config.AlertProcessor.critical.level = 5 self.config.AlertProcessor.soft.level = 0 self.config.AlertProcessor.soft.bufferSize = 3 self.config.AlertProcessor.critical.section_("sinks") self.config.AlertProcessor.soft.section_("sinks") def tearDown(self): self.testInit.clearDatabase() self.testInit.delWorkDir() def testAlertProcessorBasic(self): alertProcessor = AlertProcessor(self.config) try: # alertProcessor.startComponent() causes the flow to stop, Harness.py # the method just calls prepareToStart() and waits for ever # alertProcessor.startDaemon() no good for this either ... puts everything # on background alertProcessor.prepareToStart() except Exception as ex: print ex self.fail(str(ex)) logging.debug( "AlertProcessor and its sub-components should be running now ...") logging.debug("Going to stop the component ...") # stop via component method try: alertProcessor.stopAlertProcessor() except Exception as ex: print ex self.fail(str(ex))
class DBSBufferDatasetTest(unittest.TestCase): """ _DBSBufferDatasetTest_ Collection of tests for the DBSBuffer object """ def setUp(self): self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMComponent.DBS3Buffer"], useDefault=False) def tearDown(self): self.testInit.clearDatabase() def testBasic(self): """ _testBasic_ Test the basic functions of the DBSBufferDataset, create, load, exists and also the ability to add subscriptions. """ originalDataset = DBSBufferDataset(path='/bogus/bogus/go') originalDataset.create() myThread = threading.currentThread() result = myThread.dbi.processData( "SELECT id FROM dbsbuffer_dataset")[0].fetchall() self.assertEqual(originalDataset.exists(), result[0][0]) duplicateDataset = DBSBufferDataset(path='/bogus/bogus/go') duplicateDataset.create() self.assertEqual(originalDataset.exists(), duplicateDataset.exists()) result = myThread.dbi.processData( "SELECT COUNT(id) FROM dbsbuffer_dataset")[0].fetchall() self.assertEqual(result[0][0], 1) loadedDataset = DBSBufferDataset(path='/bogus/bogus/go') loadedDataset.load() self.assertEqual(loadedDataset.exists(), originalDataset.exists()) secondDataset = DBSBufferDataset( path='/BogusPrimary/Run2012Z-PromptReco-v1/RECO') secondDataset.create() workload = WMWorkloadHelper() workload.load( os.path.join( getTestBase(), 'WMComponent_t/PhEDExInjector_t/specs/TestWorkload.pkl')) secondDataset.addSubscription( workload.getSubscriptionInformation() ['/BogusPrimary/Run2012Z-PromptReco-v1/RECO']) secondDataset.addSubscription( workload.getSubscriptionInformation() ['/BogusPrimary/Run2012Z-PromptReco-v1/RECO']) self.assertEqual(len(secondDataset['subscriptions']), 3) result = myThread.dbi.processData( "SELECT COUNT(id) FROM dbsbuffer_dataset_subscription" )[0].fetchall() self.assertEqual(result[0][0], 3) return
class WorkQueueTestCase(unittest.TestCase): def setSchema(self): "this can be override if the schema setting is different" self.schema = ["WMCore.WMBS","WMComponent.DBS3Buffer","WMCore.BossAir"] self.couchApps = ["WorkQueue"] def setUp(self): """ _setUp_ Setup the database and logging connection. Try to create all of the WMBS tables. Also add some dummy locations. """ self.queueDB = 'workqueue_t' self.queueInboxDB = 'workqueue_t_inbox' self.globalQDB = 'workqueue_t_global' self.globalQInboxDB = 'workqueue_t_global_inbox' self.localQDB = 'workqueue_t_local' self.localQInboxDB = 'workqueue_t_local_inbox' self.localQDB2 = 'workqueue_t_local2' self.localQInboxDB2 = 'workqueue_t_local2_inbox' self.configCacheDB = 'workqueue_t_config_cache' self.setSchema() self.testInit = TestInit('WorkQueueTest') self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = self.schema, useDefault = False) self.testInit.setupCouch(self.queueDB, *self.couchApps) self.testInit.setupCouch(self.queueInboxDB, *self.couchApps) self.testInit.setupCouch(self.globalQDB, *self.couchApps) self.testInit.setupCouch(self.globalQInboxDB , *self.couchApps) self.testInit.setupCouch(self.localQDB, *self.couchApps) self.testInit.setupCouch(self.localQInboxDB, *self.couchApps) self.testInit.setupCouch(self.localQDB2, *self.couchApps) self.testInit.setupCouch(self.localQInboxDB2, *self.couchApps) self.testInit.setupCouch(self.configCacheDB, 'ConfigCache') couchServer = CouchServer(os.environ.get("COUCHURL")) self.configCacheDBInstance = couchServer.connectDatabase(self.configCacheDB) self.workDir = self.testInit.generateWorkDir() return def tearDown(self): """ _tearDown_ Drop all the WMBS tables. """ #self.testInit.tearDownCouch() self.testInit.clearDatabase() self.testInit.delWorkDir()
class DaemonTest(unittest.TestCase): """ _Daemon_t_ Unit tests for message services: subscription, priority subscription, buffers, etc.. """ # minimum number of messages that need to be in queue _minMsg = 20 # number of publish and gets from queue _publishAndGet = 10 def setUp(self): "make a logger instance and create tables" self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.tempDir = tempfile.mkdtemp() def tearDown(self): """ Deletion of the databases """ self.testInit.clearDatabase() shutil.rmtree(self.tempDir, True) def testA(self): """ __testSubscribe__ Test daemon creation """ # keep the parent alive self.pid = createDaemon(self.tempDir, True) try: try: if self.pid != 0: time.sleep(2) details = Details(os.path.join(self.tempDir, "Daemon.xml")) time.sleep(10) details.killWithPrejudice() else: while True: time.sleep(1) except: pass finally: if self.pid == 0: os._exit(-1) else: os.system('kill -9 %s' % self.pid)
class DaemonTest(unittest.TestCase): """ _Daemon_t_ Unit tests for message services: subscription, priority subscription, buffers, etc.. """ # minimum number of messages that need to be in queue _minMsg = 20 # number of publish and gets from queue _publishAndGet = 10 def setUp(self): "make a logger instance and create tables" self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.tempDir = tempfile.mkdtemp() def tearDown(self): """ Deletion of the databases """ self.testInit.clearDatabase() shutil.rmtree( self.tempDir, True ) def testA(self): """ __testSubscribe__ Test daemon creation """ # keep the parent alive self.pid = createDaemon(self.tempDir, True) try: try: if self.pid != 0 : time.sleep(2) details = Details(os.path.join(self.tempDir,"Daemon.xml")) time.sleep(10) details.killWithPrejudice() else: while True: time.sleep(1) except: pass finally: if self.pid == 0: os._exit(-1) else: os.system('kill -9 %s' % self.pid)
class AlertGeneratorTest(unittest.TestCase): def setUp(self): self.testInit = TestInit(__file__) self.testInit.setLogging(logLevel = logging.DEBUG) self.testInit.clearDatabase() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = ["WMCore.WMBS",'WMCore.Agent.Database', "WMCore.ResourceControl"], useDefault = False) self.testDir = self.testInit.generateWorkDir() # AlertGenerator instance self.generator = None self.config = getConfig(self.testDir) self.config.section_("CoreDatabase") self.config.CoreDatabase.socket = os.environ.get("DBSOCK") self.config.CoreDatabase.connectUrl = os.environ.get("DATABASE") self.testProcesses = [] self.testComponentDaemonXml = "/tmp/TestComponent/Daemon.xml" def tearDown(self): self.testInit.clearDatabase() self.testInit.delWorkDir() self.generator = None utils.terminateProcesses(self.testProcesses) # if the directory and file "/tmp/TestComponent/Daemon.xml" after # ComponentsPoller test exist, then delete it d = os.path.dirname(self.testComponentDaemonXml) if os.path.exists(d): shutil.rmtree(d) def _startComponent(self): self.generator = AlertGenerator(self.config) try: # self.proc.startComponent() causes the flow to stop, Harness.py # the method just calls prepareToStart() and waits for ever # self.proc.startDaemon() no good for this either ... puts everything # on background self.generator.prepareToStart() except Exception, ex: print ex self.fail(str(ex)) print "AlertGenerator and its sub-components should be running now ..."
class JobTest(unittest.TestCase): def setUp(self): """ _setUp_ Setup the database and logging connection. Try to create all of the WMBS tables. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMCore.WMBS"], useDefault=False) myThread = threading.currentThread() self.daoFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi) locationNew = self.daoFactory(classname="Locations.New") locationNew.execute(siteName="test.site.ch", pnn="T2_CH_CERN") locationNew.execute(siteName="test2.site.ch", pnn="T2_CH_CERN") return def tearDown(self): """ _tearDown_ Drop all the WMBS tables. """ self.testInit.clearDatabase() def createTestJob(self, subscriptionType="Merge"): """ _createTestJob_ Create a test job with two files as input. This will also create the appropriate workflow, jobgroup and subscription. """ testWorkflow = Workflow(spec=makeUUID(), owner="Simon", name=makeUUID(), task="Test") testWorkflow.create() testWMBSFileset = Fileset(name="TestFileset") testWMBSFileset.create() testSubscription = Subscription(fileset=testWMBSFileset, workflow=testWorkflow, type=subscriptionType) testSubscription.create() testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10) testFileA.addRun(Run(1, *[45])) testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10) testFileB.addRun(Run(1, *[46])) testFileA.create() testFileB.create() testJob = Job(name=makeUUID(), files=[testFileA, testFileB]) testJob["couch_record"] = "somecouchrecord" testJob["location"] = "test.site.ch" testJob.create(group=testJobGroup) testJob.associateFiles() return testJob def testCreateDeleteExists(self): """ _testCreateDeleteExists_ Create and then delete a job. Use the job class's exists() method to determine if the job has been written to the database before it is created, after it has been created and after it has been deleted. """ testWorkflow = Workflow(spec="spec.xml", owner="Simon", name="wf001", task="Test") testWorkflow.create() testWMBSFileset = Fileset(name="TestFileset") testWMBSFileset.create() testSubscription = Subscription(fileset=testWMBSFileset, workflow=testWorkflow) testSubscription.create() testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10) testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10) testFileA.create() testFileB.create() testJob = Job(name="TestJob", files=[testFileA, testFileB]) assert testJob.exists() == False, \ "ERROR: Job exists before it was created" testJob.create(group=testJobGroup) assert testJob.exists() >= 0, \ "ERROR: Job does not exist after it was created" testJob.delete() assert testJob.exists() == False, \ "ERROR: Job exists after it was delete" return def testCreateTransaction(self): """ _testCreateTransaction_ Create a job and save it to the database. Roll back the database transaction and verify that the job is no longer in the database. """ testWorkflow = Workflow(spec="spec.xml", owner="Simon", name="wf001", task="Test") testWorkflow.create() testWMBSFileset = Fileset(name="TestFileset") testWMBSFileset.create() testSubscription = Subscription(fileset=testWMBSFileset, workflow=testWorkflow) testSubscription.create() testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10) testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10) testFileA.create() testFileB.create() myThread = threading.currentThread() myThread.transaction.begin() testJob = Job(name="TestJob", files=[testFileA, testFileB]) assert testJob.exists() == False, \ "ERROR: Job exists before it was created" testJob.create(group=testJobGroup) assert testJob.exists() >= 0, \ "ERROR: Job does not exist after it was created" myThread.transaction.rollback() assert testJob.exists() == False, \ "ERROR: Job exists after transaction was rolled back." return def testDeleteTransaction(self): """ _testDeleteTransaction_ Create a new job and commit it to the database. Start a new transaction and delete the file from the database. Verify that the file has been deleted. After that, roll back the transaction and verify that the job is once again in the database. """ testWorkflow = Workflow(spec="spec.xml", owner="Simon", name="wf001", task="Test") testWorkflow.create() testWMBSFileset = Fileset(name="TestFileset") testWMBSFileset.create() testSubscription = Subscription(fileset=testWMBSFileset, workflow=testWorkflow) testSubscription.create() testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10) testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10) testFileA.create() testFileB.create() testJob = Job(name="TestJob", files=[testFileA, testFileB]) assert testJob.exists() == False, \ "ERROR: Job exists before it was created" testJob.create(group=testJobGroup) assert testJob.exists() >= 0, \ "ERROR: Job does not exist after it was created" myThread = threading.currentThread() myThread.transaction.begin() testJob.delete() assert testJob.exists() == False, \ "ERROR: Job exists after it was delete" myThread.transaction.rollback() assert testJob.exists() >= 0, \ "ERROR: Job does not exist after transaction was rolled back." return def testCreateDeleteExistsNoFiles(self): """ _testCreateDeleteExistsNoFiles_ Create and then delete a job but don't add any input files to it. Use the job class's exists() method to determine if the job has been written to the database before it is created, after it has been created and after it has been deleted. """ testWorkflow = Workflow(spec="spec.xml", owner="Simon", name="wf001", task="Test") testWorkflow.create() testWMBSFileset = Fileset(name="TestFileset") testWMBSFileset.create() testSubscription = Subscription(fileset=testWMBSFileset, workflow=testWorkflow) testSubscription.create() testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() testJob = Job(name="TestJob") assert testJob.exists() == False, \ "ERROR: Job exists before it was created" testJob.create(group=testJobGroup) assert testJob.exists() >= 0, \ "ERROR: Job does not exist after it was created" testJob.delete() assert testJob.exists() == False, \ "ERROR: Job exists after it was delete" return def testLoad(self): """ _testLoad_ Create a job and save it to the database. Load it back from the database using the name and the id and then verify that all information was loaded correctly. """ testJobA = self.createTestJob() testJobB = Job(id=testJobA["id"]) testJobC = Job(name=testJobA["name"]) testJobB.load() testJobC.load() assert (testJobA["id"] == testJobB["id"]) and \ (testJobA["name"] == testJobB["name"]) and \ (testJobA["jobgroup"] == testJobB["jobgroup"]) and \ (testJobA["couch_record"] == testJobB["couch_record"]) and \ (testJobA["location"] == testJobB["location"]), \ "ERROR: Load from ID didn't load everything correctly" assert (testJobA["id"] == testJobC["id"]) and \ (testJobA["name"] == testJobC["name"]) and \ (testJobA["jobgroup"] == testJobC["jobgroup"]) and \ (testJobA["couch_record"] == testJobC["couch_record"]) and \ (testJobA["location"] == testJobC["location"]), \ "ERROR: Load from name didn't load everything correctly" self.assertEqual(testJobB['outcome'], 'failure') self.assertEqual(testJobC['outcome'], 'failure') self.assertEqual(testJobB['fwjr'], None) self.assertEqual(testJobC['fwjr'], None) return def testLoadData(self): """ _testLoadData_ Create a job and save it to the database. Load it back from the database using the name and the id. Verify that all job information is correct including input files and the job mask. """ testJobA = self.createTestJob() testJobA["mask"]["FirstEvent"] = 1 testJobA["mask"]["LastEvent"] = 2 testJobA["mask"]["FirstLumi"] = 3 testJobA["mask"]["LastLumi"] = 4 testJobA["mask"]["FirstRun"] = 5 testJobA["mask"]["LastRun"] = 6 testJobA.save() testJobB = Job(id=testJobA["id"]) testJobC = Job(name=testJobA["name"]) testJobB.loadData() testJobC.loadData() assert (testJobA["id"] == testJobB["id"]) and \ (testJobA["name"] == testJobB["name"]) and \ (testJobA["jobgroup"] == testJobB["jobgroup"]) and \ (testJobA["couch_record"] == testJobB["couch_record"]) and \ (testJobA["location"] == testJobB["location"]), \ "ERROR: Load from ID didn't load everything correctly" assert (testJobA["id"] == testJobC["id"]) and \ (testJobA["name"] == testJobC["name"]) and \ (testJobA["jobgroup"] == testJobC["jobgroup"]) and \ (testJobA["couch_record"] == testJobC["couch_record"]) and \ (testJobA["location"] == testJobC["location"]), \ "ERROR: Load from name didn't load everything correctly" assert testJobA["mask"] == testJobB["mask"], \ "ERROR: Job mask did not load properly" assert testJobA["mask"] == testJobC["mask"], \ "ERROR: Job mask did not load properly" goldenFiles = testJobA.getFiles() for testFile in testJobB.getFiles(): assert testFile in goldenFiles, \ "ERROR: Job loaded an unknown file" goldenFiles.remove(testFile) assert len(goldenFiles) == 0, \ "ERROR: Job didn't load all files" goldenFiles = testJobA.getFiles() for testFile in testJobC.getFiles(): assert testFile in goldenFiles, \ "ERROR: Job loaded an unknown file" goldenFiles.remove(testFile) assert len(goldenFiles) == 0, \ "ERROR: Job didn't load all files" return def testGetFiles(self): """ _testGetFiles_ Test the Job's getFiles() method. This should load the files from the database if they haven't been loaded already. """ testJobA = self.createTestJob() testJobB = Job(id=testJobA["id"]) testJobB.loadData() goldenFiles = testJobA.getFiles() for testFile in testJobB.getFiles(): assert testFile in goldenFiles, \ "ERROR: Job loaded an unknown file: %s" % testFile goldenFiles.remove(testFile) assert len(goldenFiles) == 0, \ "ERROR: Job didn't load all files" return def testSaveTransaction(self): """ _testSaveTransaction_ Create a job and a job mask and save them both to the database. Load the job from the database and verify that everything was written correctly. Begin a new transaction and update the job mask again. Load the mask and verify that it's correct. Finally, rollback the transaction and reload the mask to verify that it is in the correct state. """ testJobA = self.createTestJob() testJobA["mask"]["FirstEvent"] = 1 testJobA["mask"]["LastEvent"] = 2 testJobA["mask"]["FirstLumi"] = 3 testJobA["mask"]["LastLumi"] = 4 testJobA["mask"]["FirstRun"] = 5 testJobA["mask"]["LastRun"] = 6 testJobA.save() testJobB = Job(id=testJobA["id"]) testJobB.loadData() assert testJobA["mask"] == testJobB["mask"], \ "ERROR: Job mask did not load properly" myThread = threading.currentThread() myThread.transaction.begin() testJobA["mask"]["FirstEvent"] = 7 testJobA["mask"]["LastEvent"] = 8 testJobA["mask"]["FirstLumi"] = 9 testJobA["mask"]["LastLumi"] = 10 testJobA["mask"]["FirstRun"] = 11 testJobA["mask"]["LastRun"] = 12 testJobA["name"] = "stevesJob" testJobA["couch_record"] = "someCouchRecord" testJobA["location"] = "test2.site.ch" testJobA.save() testJobC = Job(id=testJobA["id"]) testJobC.loadData() assert testJobA["mask"] == testJobC["mask"], \ "ERROR: Job mask did not load properly" assert testJobC["name"] == "stevesJob", \ "ERROR: Job name did not save" assert testJobC["couch_record"] == "someCouchRecord", \ "ERROR: Job couch record did not save" assert testJobC["location"] == "test2.site.ch", \ "ERROR: Job site did not save" myThread.transaction.rollback() testJobD = Job(id=testJobA["id"]) testJobD.loadData() assert testJobB["mask"] == testJobD["mask"], \ "ERROR: Job mask did not load properly" return def testJobState(self): """ _testJobState_ Unittest to see if we can figure out what the jobState actually is and set it """ testJobA = self.createTestJob() value = testJobA.getState() self.assertEqual(value, 'new') return def testJobCacheDir(self): """ _testJobCacheDir_ Check retrieval of the jobCache directory. """ testJobA = self.createTestJob() value = testJobA.getCache() self.assertEqual(value, None) testJobA.setCache('UnderTheDeepBlueSea') value = testJobA.getCache() self.assertEqual(value, 'UnderTheDeepBlueSea') return def testGetOutputParentLFNs(self): """ _testGetOutputParentLFNs_ Verify that the getOutputDBSParentLFNs() method returns the correct parent LFNs. """ testWorkflow = Workflow(spec="spec.xml", owner="Simon", name="wf001", task="Test") testWorkflow.create() testWMBSFileset = Fileset(name="TestFileset") testWMBSFileset.create() testSubscription = Subscription(fileset=testWMBSFileset, workflow=testWorkflow) testSubscription.create() testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10, merged=True) testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10, merged=True) testFileC = File(lfn="/this/is/a/lfnC", size=1024, events=10, merged=False) testFileD = File(lfn="/this/is/a/lfnD", size=1024, events=10, merged=False) testFileE = File(lfn="/this/is/a/lfnE", size=1024, events=10, merged=True) testFileF = File(lfn="/this/is/a/lfnF", size=1024, events=10, merged=True) testFileA.create() testFileB.create() testFileC.create() testFileD.create() testFileE.create() testFileF.create() testFileE.addChild(testFileC["lfn"]) testFileF.addChild(testFileD["lfn"]) testJobA = Job(name="TestJob", files=[testFileA, testFileB]) testJobA["couch_record"] = "somecouchrecord" testJobA["location"] = "test.site.ch" testJobA.create(group=testJobGroup) testJobA.associateFiles() testJobB = Job(name="TestJobB", files=[testFileC, testFileD]) testJobB["couch_record"] = "somecouchrecord" testJobB["location"] = "test.site.ch" testJobB.create(group=testJobGroup) testJobB.associateFiles() goldenLFNs = ["/this/is/a/lfnA", "/this/is/a/lfnB"] parentLFNs = testJobA.getOutputDBSParentLFNs() for parentLFN in parentLFNs: assert parentLFN in goldenLFNs, \ "ERROR: Unknown lfn: %s" % parentLFN goldenLFNs.remove(parentLFN) assert len(goldenLFNs) == 0, \ "ERROR: LFNs are missing: %s" % goldenLFNs goldenLFNs = ["/this/is/a/lfnE", "/this/is/a/lfnF"] parentLFNs = testJobB.getOutputDBSParentLFNs() for parentLFN in parentLFNs: assert parentLFN in goldenLFNs, \ "ERROR: Unknown lfn: %s" % parentLFN goldenLFNs.remove(parentLFN) assert len(goldenLFNs) == 0, \ "ERROR: LFNs are missing..." return def testJobFWJRPath(self): """ _testJobFWJRPath_ Verify the correct operation of the Jobs.SetFWJRPath and Jobs.GetFWJRByState DAOs. """ testJobA = self.createTestJob() testJobA["state"] = "complete" testJobB = self.createTestJob() testJobB["state"] = "executing" testJobC = self.createTestJob() testJobC["state"] = "complete" myThread = threading.currentThread() setFWJRAction = self.daoFactory(classname="Jobs.SetFWJRPath") setFWJRAction.execute(jobID=testJobA["id"], fwjrPath="NonsenseA", conn=myThread.transaction.conn, transaction=True) setFWJRAction.execute(jobID=testJobB["id"], fwjrPath="NonsenseB", conn=myThread.transaction.conn, transaction=True) setFWJRAction.execute(jobID=testJobC["id"], fwjrPath="NonsenseC", conn=myThread.transaction.conn, transaction=True) changeStateAction = self.daoFactory(classname="Jobs.ChangeState") changeStateAction.execute(jobs=[testJobA, testJobB, testJobC], conn=myThread.transaction.conn, transaction=True) getJobsAction = self.daoFactory(classname="Jobs.GetFWJRByState") jobs = getJobsAction.execute(state="complete", conn=myThread.transaction.conn, transaction=True) goldenIDs = [testJobA["id"], testJobC["id"]] for job in jobs: assert job["id"] in goldenIDs, \ "Error: Unknown job: %s" % job["id"] goldenIDs.remove(job["id"]) if job["id"] == testJobA["id"]: assert job["fwjr_path"] == "NonsenseA", \ "Error: Wrong fwjr path: %s" % job["fwjr_path"] else: assert job["fwjr_path"] == "NonsenseC", \ "Error: Wrong fwjr path: %s" % job["fwjr_path"] assert len(goldenIDs) == 0, \ "Error: Jobs missing: %s" % len(goldenIDs) return def testFailJobInput(self): """ _testFailJobInput_ Test the Jobs.FailInput DAO and verify that it doesn't affect other jobs/subscriptions that run over the same files. """ testWorkflow = Workflow(spec="spec.xml", owner="Steve", name="wf001", task="Test") bogusWorkflow = Workflow(spec="spec1.xml", owner="Steve", name="wf002", task="Test") testWorkflow.create() bogusWorkflow.create() testFileset = Fileset(name="TestFileset") bogusFileset = Fileset(name="BogusFileset") testFileset.create() bogusFileset.create() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow) bogusSubscription = Subscription(fileset=bogusFileset, workflow=bogusWorkflow) testSubscription.create() bogusSubscription.create() testFileA = File(lfn=makeUUID(), locations="T2_CH_CERN") testFileB = File(lfn=makeUUID(), locations="T2_CH_CERN") testFileC = File(lfn=makeUUID(), locations="T2_CH_CERN") testFileA.create() testFileB.create() testFileC.create() testFileset.addFile([testFileA, testFileB, testFileC]) bogusFileset.addFile([testFileA, testFileB, testFileC]) testFileset.commit() bogusFileset.commit() testSubscription.completeFiles([testFileA, testFileB, testFileC]) bogusSubscription.acquireFiles([testFileA, testFileB, testFileC]) testJobGroup = JobGroup(subscription=testSubscription) bogusJobGroup = JobGroup(subscription=bogusSubscription) testJobGroup.create() bogusJobGroup.create() testJobA = Job(name="TestJobA", files=[testFileA, testFileB, testFileC]) testJobB = Job(name="TestJobB", files=[testFileA, testFileB, testFileC]) bogusJob = Job(name="BogusJob", files=[testFileA, testFileB, testFileC]) testJobA.create(group=testJobGroup) testJobB.create(group=testJobGroup) bogusJob.create(group=bogusJobGroup) testJobA.failInputFiles() testJobB.failInputFiles() self.assertEqual(len(testSubscription.filesOfStatus("Available")), 0) self.assertEqual(len(testSubscription.filesOfStatus("Acquired")), 0) self.assertEqual(len(testSubscription.filesOfStatus("Failed")), 3) self.assertEqual(len(testSubscription.filesOfStatus("Completed")), 0) changeStateAction = self.daoFactory(classname="Jobs.ChangeState") testJobB["state"] = "cleanout" changeStateAction.execute([testJobB]) # Try again testJobA.failInputFiles() # Should now be failed self.assertEqual(len(testSubscription.filesOfStatus("Available")), 0) self.assertEqual(len(testSubscription.filesOfStatus("Acquired")), 0) self.assertEqual(len(testSubscription.filesOfStatus("Failed")), 3) self.assertEqual(len(testSubscription.filesOfStatus("Completed")), 0) # bogus should be unchanged self.assertEqual(len(bogusSubscription.filesOfStatus("Available")), 0) self.assertEqual(len(bogusSubscription.filesOfStatus("Acquired")), 3) self.assertEqual(len(bogusSubscription.filesOfStatus("Failed")), 0) self.assertEqual(len(bogusSubscription.filesOfStatus("Completed")), 0) return def testCompleteJobInput(self): """ _testCompleteJobInput_ Verify the correct output of the CompleteInput DAO. This should mark the input for a job as complete once all the jobs that run over a particular file have complete successfully. """ testWorkflow = Workflow(spec="spec.xml", owner="Steve", name="wf001", task="Test") bogusWorkflow = Workflow(spec="spec1.xml", owner="Steve", name="wf002", task="Test") testWorkflow.create() bogusWorkflow.create() testFileset = Fileset(name="TestFileset") bogusFileset = Fileset(name="BogusFileset") testFileset.create() bogusFileset.create() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow) bogusSubscription = Subscription(fileset=bogusFileset, workflow=bogusWorkflow) testSubscription.create() bogusSubscription.create() testFileA = File(lfn=makeUUID(), locations="T2_CH_CERN") testFileB = File(lfn=makeUUID(), locations="T2_CH_CERN") testFileC = File(lfn=makeUUID(), locations="T2_CH_CERN") testFileA.create() testFileB.create() testFileC.create() testFileset.addFile([testFileA, testFileB, testFileC]) bogusFileset.addFile([testFileA, testFileB, testFileC]) testFileset.commit() bogusFileset.commit() testSubscription.acquireFiles([testFileA, testFileB, testFileC]) bogusSubscription.acquireFiles([testFileA, testFileB, testFileC]) testJobGroup = JobGroup(subscription=testSubscription) bogusJobGroup = JobGroup(subscription=bogusSubscription) testJobGroup.create() bogusJobGroup.create() testJobA = Job(name="TestJobA", files=[testFileA]) testJobB = Job(name="TestJobB", files=[testFileA, testFileB]) testJobC = Job(name="TestJobC", files=[testFileC]) bogusJob = Job(name="BogusJob", files=[testFileA, testFileB, testFileC]) testJobA.create(group=testJobGroup) testJobB.create(group=testJobGroup) testJobC.create(group=testJobGroup) bogusJob.create(group=bogusJobGroup) testJobA["outcome"] = "success" testJobB["outcome"] = "failure" testJobC["outcome"] = "success" testJobA.save() testJobB.save() testJobC.save() testJobA.completeInputFiles() compFiles = len(testSubscription.filesOfStatus("Completed")) assert compFiles == 0, \ "Error: test sub has wrong number of complete files: %s" % compFiles testJobB["outcome"] = "success" testJobB.save() testJobB.completeInputFiles(skipFiles=[testFileB["lfn"]]) availFiles = len(testSubscription.filesOfStatus("Available")) assert availFiles == 0, \ "Error: test sub has wrong number of available files: %s" % availFiles acqFiles = len(testSubscription.filesOfStatus("Acquired")) assert acqFiles == 1, \ "Error: test sub has wrong number of acquired files: %s" % acqFiles compFiles = len(testSubscription.filesOfStatus("Completed")) assert compFiles == 1, \ "Error: test sub has wrong number of complete files: %s" % compFiles failFiles = len(testSubscription.filesOfStatus("Failed")) assert failFiles == 1, \ "Error: test sub has wrong number of failed files: %s" % failFiles availFiles = len(bogusSubscription.filesOfStatus("Available")) assert availFiles == 0, \ "Error: test sub has wrong number of available files: %s" % availFiles acqFiles = len(bogusSubscription.filesOfStatus("Acquired")) assert acqFiles == 3, \ "Error: test sub has wrong number of acquired files: %s" % acqFiles compFiles = len(bogusSubscription.filesOfStatus("Completed")) assert compFiles == 0, \ "Error: test sub has wrong number of complete files: %s" % compFiles failFiles = len(bogusSubscription.filesOfStatus("Failed")) assert failFiles == 0, \ "Error: test sub has wrong number of failed files: %s" % failFiles return def testJobTypeDAO(self): """ _testJobTypeDAO_ Verify that the Jobs.GetType DAO returns the correct job type. The job type is retrieved from the subscription type. When only a single job is passed. """ testJob = self.createTestJob() jobTypeAction = self.daoFactory(classname="Jobs.GetType") jobType = jobTypeAction.execute(jobID=testJob["id"]) assert jobType == "Merge", \ "Error: GetJobType DAO returned the wrong job type." return def testJobTypeDAOBulk(self): """ _testJobTypeDAOBulk_ Verify that the Jobs.GetType DAO returns the correct job type. The job type is retrieved from the subscription type. When a list of jobs ids is passed. """ testJobA = self.createTestJob(subscriptionType="Merge") testJobB = self.createTestJob(subscriptionType="Processing") testJobC = self.createTestJob(subscriptionType="Production") testJobD = self.createTestJob(subscriptionType="Merge") testJobE = self.createTestJob(subscriptionType="Skim") jobIds = [] jobIds.append(testJobA["id"]) jobIds.append(testJobB["id"]) jobIds.append(testJobC["id"]) jobIds.append(testJobD["id"]) jobIds.append(testJobE["id"]) jobTypeAction = self.daoFactory(classname="Jobs.GetType") jobTypes = jobTypeAction.execute(jobID=jobIds) entryMap = {} for entry in jobTypes: entryMap[entry["id"]] = entry["type"] assert entryMap[testJobA["id"]] == "Merge", \ "Error: GetJobType DAO returned the wrong job type." assert entryMap[testJobB["id"]] == "Processing", \ "Error: GetJobType DAO returned the wrong job type." assert entryMap[testJobC["id"]] == "Production", \ "Error: GetJobType DAO returned the wrong job type." assert entryMap[testJobD["id"]] == "Merge", \ "Error: GetJobType DAO returned the wrong job type." assert entryMap[testJobE["id"]] == "Skim", \ "Error: GetJobType DAO returned the wrong job type." return def testGetOutputMapDAO(self): """ _testGetOutputMapDAO_ Verify the proper behavior of the GetOutputMapDAO for a variety of different processing chains. """ recoOutputFileset = Fileset(name="RECO") recoOutputFileset.create() mergedRecoOutputFileset = Fileset(name="MergedRECO") mergedRecoOutputFileset.create() alcaOutputFileset = Fileset(name="ALCA") alcaOutputFileset.create() mergedAlcaOutputFileset = Fileset(name="MergedALCA") mergedAlcaOutputFileset.create() dqmOutputFileset = Fileset(name="DQM") dqmOutputFileset.create() mergedDqmOutputFileset = Fileset(name="MergedDQM") mergedDqmOutputFileset.create() cleanupFileset = Fileset(name="Cleanup") cleanupFileset.create() testWorkflow = Workflow(spec="wf001.xml", owner="Steve", name="TestWF", task="None") testWorkflow.create() testWorkflow.addOutput("output", recoOutputFileset, mergedRecoOutputFileset) testWorkflow.addOutput("ALCARECOStreamCombined", alcaOutputFileset, mergedAlcaOutputFileset) testWorkflow.addOutput("DQM", dqmOutputFileset, mergedDqmOutputFileset) testWorkflow.addOutput("output", cleanupFileset) testWorkflow.addOutput("ALCARECOStreamCombined", cleanupFileset) testWorkflow.addOutput("DQM", cleanupFileset) testRecoMergeWorkflow = Workflow(spec="wf002.xml", owner="Steve", name="TestRecoMergeWF", task="None") testRecoMergeWorkflow.create() testRecoMergeWorkflow.addOutput("anything", mergedRecoOutputFileset, mergedRecoOutputFileset) testRecoProcWorkflow = Workflow(spec="wf004.xml", owner="Steve", name="TestRecoProcWF", task="None") testRecoProcWorkflow.create() testAlcaChildWorkflow = Workflow(spec="wf003.xml", owner="Steve", name="TestAlcaChildWF", task="None") testAlcaChildWorkflow.create() inputFile = File(lfn="/path/to/some/lfn", size=600000, events=60000, locations="cmssrm.fnal.gov") inputFile.create() testFileset = Fileset(name="TestFileset") testFileset.create() testFileset.addFile(inputFile) testFileset.commit() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow, split_algo="EventBased", type="Processing") testMergeRecoSubscription = Subscription( fileset=recoOutputFileset, workflow=testRecoMergeWorkflow, split_algo="WMBSMergeBySize", type="Merge") testProcRecoSubscription = Subscription(fileset=recoOutputFileset, workflow=testRecoProcWorkflow, split_algo="FileBased", type="Processing") testChildAlcaSubscription = Subscription( fileset=alcaOutputFileset, workflow=testAlcaChildWorkflow, split_algo="FileBased", type="Processing") testSubscription.create() testMergeRecoSubscription.create() testProcRecoSubscription.create() testChildAlcaSubscription.create() testSubscription.acquireFiles() testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() testJob = Job(name="SplitJobA", files=[inputFile]) testJob.create(group=testJobGroup) testJob["state"] = "complete" testJob.save() outputMapAction = self.daoFactory(classname="Jobs.GetOutputMap") outputMap = outputMapAction.execute(jobID=testJob["id"]) assert len(outputMap.keys()) == 3, \ "Error: Wrong number of outputs for primary workflow." goldenMap = { "output": (recoOutputFileset.id, mergedRecoOutputFileset.id), "ALCARECOStreamCombined": (alcaOutputFileset.id, mergedAlcaOutputFileset.id), "DQM": (dqmOutputFileset.id, mergedDqmOutputFileset.id) } for outputID in outputMap.keys(): for outputFilesets in outputMap[outputID]: if outputFilesets["merged_output_fileset"] == None: self.assertEqual(outputFilesets["output_fileset"], cleanupFileset.id, "Error: Cleanup fileset is wrong.") continue self.assertTrue(outputID in goldenMap.keys(), "Error: Output identifier is missing.") self.assertEqual(outputFilesets["output_fileset"], goldenMap[outputID][0], "Error: Output fileset is wrong.") self.assertEqual(outputFilesets["merged_output_fileset"], goldenMap[outputID][1], "Error: Merged output fileset is wrong.") del goldenMap[outputID] self.assertEqual(len(goldenMap.keys()), 0, "Error: Missing output maps.") return def testLocations(self): """ _testLocations_ Test setting and getting locations using DAO objects """ testJob = self.createTestJob() jobGetLocation = self.daoFactory(classname="Jobs.GetLocation") jobSetLocation = self.daoFactory(classname="Jobs.SetLocation") result = jobGetLocation.execute(jobid=testJob['id']) self.assertEqual(result, [['test.site.ch']]) jobSetLocation.execute(jobid=testJob['id'], location="test2.site.ch") result = jobGetLocation.execute(jobid=testJob['id']) self.assertEqual(result, [['test2.site.ch']]) testJob2 = self.createTestJob() testJob3 = self.createTestJob() binds = [{ 'jobid': testJob['id'] }, { 'jobid': testJob2['id'] }, { 'jobid': testJob3['id'] }] result = jobGetLocation.execute(jobid=binds) self.assertEqual(result, \ [{'site_name': 'test2.site.ch', 'id': 1}, \ {'site_name': 'test.site.ch', 'id': 2}, \ {'site_name': 'test.site.ch', 'id': 3}]) return def testGetDataStructsJob(self): """ _testGetDataStructsJob_ Test the ability to 'cast' as a DataStructs job type """ testJob = self.createTestJob() testJob['test'] = 'ThisIsATest' testJob.baggage.section_('TestSection') testJob.baggage.TestSection.test = 100 finalJob = testJob.getDataStructsJob() for key in finalJob.keys(): if key == 'input_files': for file in testJob['input_files']: self.assertEqual( file.returnDataStructsFile() in finalJob['input_files'], True) continue self.assertEqual(testJob[key], finalJob[key]) self.assertEqual(str(finalJob.__class__), "<class 'WMCore.DataStructs.Job.Job'>") self.assertEqual(str(finalJob["mask"].__class__), "<class 'WMCore.DataStructs.Mask.Mask'>") for key in testJob["mask"]: self.assertEqual(testJob["mask"][key], finalJob["mask"][key], "Error: The masks should be the same") self.assertEqual(finalJob.baggage.TestSection.test, 100) return def testLoadOutputID(self): """ _testLoadOutputID_ Test whether we can load an output ID for a job """ testWorkflow = Workflow(spec="spec.xml", owner="Steve", name="wf001", task="Test") testWorkflow.create() testFileset = Fileset(name="TestFileset") testFileset.create() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow) testSubscription.create() testFileA = File(lfn=makeUUID(), locations="test.site.ch") testFileB = File(lfn=makeUUID(), locations="test.site.ch") testFileA.create() testFileB.create() testFileset.addFile([testFileA, testFileB]) testFileset.commit() testSubscription.acquireFiles([testFileA, testFileB]) testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() testJob = Job() testJob.create(group=testJobGroup) self.assertEqual(testJob.loadOutputID(), testJobGroup.output.id) return def testLoadForTaskArchiver(self): """ _testLoadForTaskArchiver_ Tests the return of the DAO for the TaskArchiver """ #Create 2 jobs jobA = self.createTestJob() jobB = self.createTestJob() #Put a mask in one mask = Mask() mask.addRunAndLumis(1, [45]) mask.save(jobA['id']) #Execute the DAO taskArchiverDAO = self.daoFactory(classname="Jobs.LoadForTaskArchiver") jobs = taskArchiverDAO.execute([jobA['id'], jobB['id']]) #Sort the jobs and check the results, we care about id, input files and mask jobs.sort(key=lambda x: x['id']) jobAprime = jobs[0] lfns = [x['lfn'] for x in jobAprime['input_files']] self.assertTrue('/this/is/a/lfnA' in lfns, 'Missing LFN lfnA from the input files') self.assertTrue('/this/is/a/lfnB' in lfns, 'Missing LFN lfnB from the input files') for inFile in jobAprime['input_files']: if inFile['lfn'] == '/this/is/a/lfnA': run = inFile['runs'].pop() self.assertEqual(run.run, 1, 'The run number is wrong') self.assertEqual(run.lumis, [45], 'The lumis are wrong') else: run = inFile['runs'].pop() self.assertEqual(run.run, 1, 'The run number is wrong') self.assertEqual(run.lumis, [46], 'The lumis are wrong') mask = jobAprime['mask'] self.assertEqual(mask['runAndLumis'], {1: [[45, 45]]}, "Wrong run and lumis in mask") jobBprime = jobs[1] for inFile in jobBprime['input_files']: if inFile['lfn'] == '/this/is/a/lfnA': run = inFile['runs'].pop() self.assertEqual(run.run, 1, 'The run number is wrong') self.assertEqual(run.lumis, [45], 'The lumis are wrong') else: run = inFile['runs'].pop() self.assertEqual(run.run, 1, 'The run number is wrong') self.assertEqual(run.lumis, [46], 'The lumis are wrong') runs = [] for inputFile in jobBprime['input_files']: runs.extend(inputFile.getRuns()) self.assertEqual(jobBprime['mask'].filterRunLumisByMask(runs=runs), runs, "Wrong mask in jobB") return def testMask(self): """ _testMask_ Test the new mask setup """ testWorkflow = Workflow(spec="spec.xml", owner="Steve", name="wf001", task="Test") testWorkflow.create() testFileset = Fileset(name="TestFileset") testFileset.create() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow) testSubscription.create() testFileA = File(lfn=makeUUID(), locations="test.site.ch") testFileB = File(lfn=makeUUID(), locations="test.site.ch") testFileA.create() testFileB.create() testFileset.addFile([testFileA, testFileB]) testFileset.commit() testSubscription.acquireFiles([testFileA, testFileB]) testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() testJob = Job() testJob['mask'].addRunAndLumis(run=100, lumis=[101, 102]) testJob['mask'].addRunAndLumis(run=200, lumis=[201, 202]) testJob.create(group=testJobGroup) loadJob = Job(id=testJob.exists()) loadJob.loadData() runs = loadJob['mask'].getRunAndLumis() self.assertEqual(len(runs), 2) self.assertEqual(runs[100], [[101, 102]]) self.assertEqual(runs[200], [[201, 202]]) bigRun = Run(100, *[101, 102, 103, 104]) badRun = Run(300, *[1001, 1002]) result = loadJob['mask'].filterRunLumisByMask([bigRun, badRun]) self.assertEqual(len(result), 1) alteredRun = result.pop() self.assertEqual(alteredRun.run, 100) self.assertEqual(alteredRun.lumis, [101, 102]) run0 = Run(300, *[1001, 1002]) run1 = Run(300, *[1001, 1002]) loadJob['mask'].filterRunLumisByMask([run0, run1]) return def test_AutoIncrementCheck(self): """ _AutoIncrementCheck_ Test and see whether we can find and set the auto_increment values """ myThread = threading.currentThread() if not myThread.dialect.lower() == 'mysql': return testWorkflow = Workflow(spec="spec.xml", owner="Steve", name="wf001", task="Test") testWorkflow.create() testFileset = Fileset(name="TestFileset") testFileset.create() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow) testSubscription.create() testFileA = File(lfn=makeUUID(), locations="test.site.ch") testFileB = File(lfn=makeUUID(), locations="test.site.ch") testFileA.create() testFileB.create() testFileset.addFile([testFileA, testFileB]) testFileset.commit() testSubscription.acquireFiles([testFileA, testFileB]) testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() incrementDAO = self.daoFactory(classname="Jobs.AutoIncrementCheck") incrementDAO.execute() testJob = Job() testJob.create(group=testJobGroup) self.assertEqual(testJob.exists(), 1) incrementDAO.execute() testJob = Job() testJob.create(group=testJobGroup) self.assertEqual(testJob.exists(), 2) incrementDAO.execute(input=10) testJob = Job() testJob.create(group=testJobGroup) self.assertEqual(testJob.exists(), 11) incrementDAO.execute(input=5) testJob = Job() testJob.create(group=testJobGroup) self.assertEqual(testJob.exists(), 12) return
class DBFormatterTest(unittest.TestCase): """ _DBFormatterTest_ Unit tests for the DBFormatter class """ def setUp(self): "make a logger instance and create tables" self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection(destroyAllDatabase=True) self.testInit.setSchema(customModules=["WMQuality.TestDB"], useDefault=False) self.selectSQL = "SELECT * FROM test_tableb" def tearDown(self): """ Delete the databases """ self.testInit.clearDatabase() def stuffDB(self): """Populate one of the test tables""" insertSQL = "INSERT INTO test_tableb (column1, column2, column3) values (:bind1, :bind2, :bind3)" insertBinds = [{ 'bind1': u'value1a', 'bind2': 1, 'bind3': u'value2a' }, { 'bind1': 'value1b', 'bind2': 2, 'bind3': 'value2b' }, { 'bind1': b'value1c', 'bind2': 3, 'bind3': b'value2d' }] myThread = threading.currentThread() myThread.dbi.processData(insertSQL, insertBinds) def testBFormatting(self): """ Test various formats """ # fill the database with some initial data self.stuffDB() myThread = threading.currentThread() dbformatter = DBFormatter(myThread.logger, myThread.dbi) result = myThread.dbi.processData(self.selectSQL) output = dbformatter.format(result) self.assertEqual(output, [['value1a', 1, 'value2a'], ['value1b', 2, 'value2b'], ['value1c', 3, 'value2d']]) result = myThread.dbi.processData(self.selectSQL) output = dbformatter.formatOne(result) print('test1 ' + str(output)) self.assertEqual(output, ['value1a', 1, 'value2a']) result = myThread.dbi.processData(self.selectSQL) output = dbformatter.formatDict(result) self.assertEqual(output, [{ 'column3': 'value2a', 'column2': 1, 'column1': 'value1a' }, { 'column3': 'value2b', 'column2': 2, 'column1': 'value1b' }, { 'column3': 'value2d', 'column2': 3, 'column1': 'value1c' }]) result = myThread.dbi.processData(self.selectSQL) output = dbformatter.formatOneDict(result) self.assertEqual(output, { 'column3': 'value2a', 'column2': 1, 'column1': 'value1a' })
class ResourceControlTest(unittest.TestCase): def setUp(self): """ _setUp_ Install schema and create a DAO factory for WMBS. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=[ "WMCore.WMBS", "WMCore.ResourceControl", "WMCore.BossAir" ], useDefault=False) myThread = threading.currentThread() self.daoFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi) self.baDaoFactory = DAOFactory(package="WMCore.BossAir", logger=myThread.logger, dbinterface=myThread.dbi) self.insertRunJob = self.baDaoFactory(classname="NewJobs") self.insertState = self.baDaoFactory(classname="NewState") states = ['PEND', 'RUN', 'Idle', 'Running'] self.insertState.execute(states) self.tempDir = self.testInit.generateWorkDir() return def tearDown(self): """ _tearDown_ Clear the schema. """ self.testInit.clearDatabase() return def testInsert(self): """ _testInsert_ Verify that inserting sites and thresholds works correctly, even if the site or threshold already exists. """ myResourceControl = ResourceControl() myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1") myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1") myResourceControl.insertSite("testSite2", 100, 200, "testSE2", "testCE2") myResourceControl.insertThreshold("testSite1", "Processing", 20, 10) myResourceControl.insertThreshold("testSite1", "Merge", 200, 100) myResourceControl.insertThreshold("testSite1", "Merge", 250, 150) myResourceControl.insertThreshold("testSite2", "Processing", 50, 30) myResourceControl.insertThreshold("testSite2", "Merge", 135, 100) createThresholds = myResourceControl.listThresholdsForCreate() self.assertEqual(len(createThresholds.keys()), 2, "Error: Wrong number of site in Resource Control DB") self.assertTrue("testSite1" in createThresholds.keys(), "Error: Test Site 1 missing from thresholds.") self.assertTrue("testSite2" in createThresholds.keys(), "Error: Test Site 2 missing from thresholds.") self.assertEqual(createThresholds["testSite1"]["total_slots"], 10, "Error: Wrong number of total slots.") self.assertEqual( createThresholds["testSite1"]["pending_jobs"], 0, "Error: Wrong number of running jobs: %s" % createThresholds["testSite1"]["pending_jobs"]) self.assertEqual(createThresholds["testSite2"]["total_slots"], 100, "Error: Wrong number of total slots.") self.assertEqual(createThresholds["testSite2"]["pending_jobs"], 0, "Error: Wrong number of running jobs.") thresholds = myResourceControl.listThresholdsForSubmit() self.assertEqual( len(thresholds.keys()), 2, "Error: Wrong number of sites in Resource Control DB") self.assertTrue("testSite1" in thresholds.keys(), "Error: testSite1 missing from thresholds.") self.assertTrue("testSite2" in thresholds.keys(), "Error: testSite2 missing from thresholds.") site1Info = thresholds["testSite1"] site2Info = thresholds["testSite2"] site1Thresholds = site1Info["thresholds"] site2Thresholds = site2Info["thresholds"] procThreshold1 = None procThreshold2 = None mergeThreshold1 = None mergeThreshold2 = None for threshold in site1Thresholds: if threshold["task_type"] == "Merge": mergeThreshold1 = threshold elif threshold["task_type"] == "Processing": procThreshold1 = threshold for threshold in site2Thresholds: if threshold["task_type"] == "Merge": mergeThreshold2 = threshold elif threshold["task_type"] == "Processing": procThreshold2 = threshold self.assertEqual(len(site1Thresholds), 2, "Error: Wrong number of task types.") self.assertEqual(len(site2Thresholds), 2, "Error: Wrong number of task types.") self.assertNotEqual(procThreshold1, None) self.assertNotEqual(procThreshold2, None) self.assertNotEqual(mergeThreshold1, None) self.assertNotEqual(mergeThreshold2, None) self.assertEqual(site1Info["total_pending_slots"], 10, "Error: Site thresholds wrong") self.assertEqual(site1Info["total_running_slots"], 20, "Error: Site thresholds wrong") self.assertEqual(site1Info["total_running_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(site1Info["total_pending_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(procThreshold1["task_running_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(procThreshold1["task_pending_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(procThreshold1["max_slots"], 20, "Error: Site thresholds wrong") self.assertEqual(procThreshold1["pending_slots"], 10, "Error: Site thresholds wrong") self.assertEqual(mergeThreshold1["task_running_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(mergeThreshold1["task_pending_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(mergeThreshold1["max_slots"], 250, "Error: Site thresholds wrong") self.assertEqual(mergeThreshold1["pending_slots"], 150, "Error: Site thresholds wrong") self.assertEqual(site2Info["total_pending_slots"], 100, "Error: Site thresholds wrong") self.assertEqual(site2Info["total_running_slots"], 200, "Error: Site thresholds wrong") self.assertEqual(site2Info["total_running_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(site2Info["total_pending_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(procThreshold2["task_running_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(procThreshold2["task_pending_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(procThreshold2["max_slots"], 50, "Error: Site thresholds wrong") self.assertEqual(procThreshold2["pending_slots"], 30, "Error: Site thresholds wrong") self.assertEqual(mergeThreshold2["task_running_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(mergeThreshold2["task_pending_jobs"], 0, "Error: Site thresholds wrong") self.assertEqual(mergeThreshold2["max_slots"], 135, "Error: Site thresholds wrong") self.assertEqual(mergeThreshold2["pending_slots"], 100, "Error: Site thresholds wrong") def testList(self): """ _testList_ Test the functions that list thresholds for creating jobs and submitting jobs. """ myResourceControl = ResourceControl() myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1", "T1_US_FNAL", "LsfPlugin") myResourceControl.insertSite("testSite2", 20, 40, "testSE2", "testCE2") myResourceControl.insertThreshold("testSite1", "Processing", 20, 10) myResourceControl.insertThreshold("testSite1", "Merge", 200, 100) myResourceControl.insertThreshold("testSite2", "Processing", 50, 25) myResourceControl.insertThreshold("testSite2", "Merge", 135, 65) testWorkflow = Workflow(spec=makeUUID(), owner="Steve", name=makeUUID(), task="Test") testWorkflow.create() testFilesetA = Fileset(name="TestFilesetA") testFilesetA.create() testFilesetB = Fileset(name="TestFilesetB") testFilesetB.create() testFilesetC = Fileset(name="TestFilesetC") testFilesetC.create() testFileA = File(lfn="testFileA", locations=set(["testSE1", "testSE2"])) testFileA.create() testFilesetA.addFile(testFileA) testFilesetA.commit() testFilesetB.addFile(testFileA) testFilesetB.commit() testFilesetC.addFile(testFileA) testFilesetC.commit() testSubscriptionA = Subscription(fileset=testFilesetA, workflow=testWorkflow, type="Processing") testSubscriptionA.create() testSubscriptionA.addWhiteBlackList([{ "site_name": "testSite1", "valid": True }]) testSubscriptionB = Subscription(fileset=testFilesetB, workflow=testWorkflow, type="Processing") testSubscriptionB.create() testSubscriptionB.addWhiteBlackList([{ "site_name": "testSite1", "valid": False }]) testSubscriptionC = Subscription(fileset=testFilesetC, workflow=testWorkflow, type="Merge") testSubscriptionC.create() testJobGroupA = JobGroup(subscription=testSubscriptionA) testJobGroupA.create() testJobGroupB = JobGroup(subscription=testSubscriptionB) testJobGroupB.create() testJobGroupC = JobGroup(subscription=testSubscriptionC) testJobGroupC.create() # Site1, Has been assigned a location and is complete. testJobA = Job(name="testJobA", files=[testFileA]) testJobA["couch_record"] = makeUUID() testJobA.create(group=testJobGroupA) testJobA["state"] = "success" # Site 1, Has been assigned a location and is incomplete. testJobB = Job(name="testJobB", files=[testFileA]) testJobB["couch_record"] = makeUUID() testJobB.create(group=testJobGroupA) testJobB["state"] = "executing" runJobB = RunJob() runJobB.buildFromJob(testJobB) runJobB["status"] = "PEND" # Does not have a location, white listed to site 1 testJobC = Job(name="testJobC", files=[testFileA]) testJobC["couch_record"] = makeUUID() testJobC.create(group=testJobGroupA) testJobC["state"] = "new" # Site 2, Has been assigned a location and is complete. testJobD = Job(name="testJobD", files=[testFileA]) testJobD["couch_record"] = makeUUID() testJobD.create(group=testJobGroupB) testJobD["state"] = "success" # Site 2, Has been assigned a location and is incomplete. testJobE = Job(name="testJobE", files=[testFileA]) testJobE["couch_record"] = makeUUID() testJobE.create(group=testJobGroupB) testJobE["state"] = "executing" runJobE = RunJob() runJobE.buildFromJob(testJobE) runJobE["status"] = "RUN" # Does not have a location, site 1 is blacklisted. testJobF = Job(name="testJobF", files=[testFileA]) testJobF["couch_record"] = makeUUID() testJobF.create(group=testJobGroupB) testJobF["state"] = "new" # Site 3, Has been assigned a location and is complete. testJobG = Job(name="testJobG", files=[testFileA]) testJobG["couch_record"] = makeUUID() testJobG.create(group=testJobGroupC) testJobG["state"] = "cleanout" # Site 3, Has been assigned a location and is incomplete. testJobH = Job(name="testJobH", files=[testFileA]) testJobH["couch_record"] = makeUUID() testJobH.create(group=testJobGroupC) testJobH["state"] = "new" # Site 3, Does not have a location. testJobI = Job(name="testJobI", files=[testFileA]) testJobI["couch_record"] = makeUUID() testJobI.create(group=testJobGroupC) testJobI["state"] = "new" # Site 3, Does not have a location and is in cleanout. testJobJ = Job(name="testJobJ", files=[testFileA]) testJobJ["couch_record"] = makeUUID() testJobJ.create(group=testJobGroupC) testJobJ["state"] = "cleanout" changeStateAction = self.daoFactory(classname="Jobs.ChangeState") changeStateAction.execute(jobs=[ testJobA, testJobB, testJobC, testJobD, testJobE, testJobF, testJobG, testJobH, testJobI, testJobJ ]) self.insertRunJob.execute([runJobB, runJobE]) setLocationAction = self.daoFactory(classname="Jobs.SetLocation") setLocationAction.execute(testJobA["id"], "testSite1") setLocationAction.execute(testJobB["id"], "testSite1") setLocationAction.execute(testJobD["id"], "testSite1") setLocationAction.execute(testJobE["id"], "testSite1") setLocationAction.execute(testJobG["id"], "testSite1") setLocationAction.execute(testJobH["id"], "testSite1") createThresholds = myResourceControl.listThresholdsForCreate() submitThresholds = myResourceControl.listThresholdsForSubmit() self.assertEqual(len(createThresholds.keys()), 2, "Error: Wrong number of sites in create thresholds") self.assertEqual(createThresholds["testSite1"]["total_slots"], 10, "Error: Wrong number of slots for site 1") self.assertEqual(createThresholds["testSite2"]["total_slots"], 20, "Error: Wrong number of slots for site 2") # We should have two running jobs with locations at site one, # two running jobs without locations at site two, and one running # job without a location at site one and two. self.assertEqual(createThresholds["testSite1"]["pending_jobs"], 4, "Error: Wrong number of pending jobs for site 1") # We should have one running job with a location at site 2 and # another running job without a location. self.assertEqual(createThresholds["testSite2"]["pending_jobs"], 2, "Error: Wrong number of pending jobs for site 2") # We should also have a phedex_name self.assertEqual(createThresholds["testSite1"]["cms_name"], "T1_US_FNAL") self.assertEqual(createThresholds["testSite2"]["cms_name"], None) mergeThreshold1 = None mergeThreshold2 = None procThreshold1 = None procThreshold2 = None self.assertEqual(submitThresholds["testSite1"]['cms_name'], 'T1_US_FNAL') for threshold in submitThresholds["testSite1"]["thresholds"]: if threshold['task_type'] == "Merge": mergeThreshold1 = threshold elif threshold['task_type'] == "Processing": procThreshold1 = threshold self.assertEqual(submitThresholds["testSite2"]['cms_name'], None) for threshold in submitThresholds["testSite2"]["thresholds"]: if threshold['task_type'] == "Merge": mergeThreshold2 = threshold elif threshold['task_type'] == "Processing": procThreshold2 = threshold self.assertEqual( submitThresholds["testSite1"]["total_running_jobs"], 1, "Error: Wrong number of running jobs for submit thresholds.") self.assertEqual( submitThresholds["testSite2"]["total_running_jobs"], 0, "Error: Wrong number of running jobs for submit thresholds.") self.assertEqual( submitThresholds["testSite1"]["total_pending_jobs"], 1, "Error: Wrong number of pending jobs for submit thresholds.") self.assertEqual( submitThresholds["testSite2"]["total_pending_jobs"], 0, "Error: Wrong number of pending jobs for submit thresholds.") self.assertEqual( mergeThreshold1["task_running_jobs"], 0, "Error: Wrong number of task running jobs for submit thresholds.") self.assertEqual( mergeThreshold1["task_pending_jobs"], 0, "Error: Wrong number of task running jobs for submit thresholds.") self.assertEqual( procThreshold1["task_running_jobs"], 1, "Error: Wrong number of task running jobs for submit thresholds.") self.assertEqual( procThreshold1["task_pending_jobs"], 1, "Error: Wrong number of task running jobs for submit thresholds.") self.assertEqual( mergeThreshold2["task_running_jobs"], 0, "Error: Wrong number of task running jobs for submit thresholds.") self.assertEqual( mergeThreshold2["task_pending_jobs"], 0, "Error: Wrong number of task running jobs for submit thresholds.") self.assertEqual( procThreshold2["task_running_jobs"], 0, "Error: Wrong number of task running jobs for submit thresholds.") self.assertEqual( procThreshold2["task_pending_jobs"], 0, "Error: Wrong number of task running jobs for submit thresholds.") return def testListSiteInfo(self): """ _testListSiteInfo_ Verify that the listSiteInfo() methods works properly. """ myResourceControl = ResourceControl() myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1") myResourceControl.insertSite("testSite2", 100, 200, "testSE2", "testCE2") siteInfo = myResourceControl.listSiteInfo("testSite1") self.assertEqual(siteInfo["site_name"], "testSite1", "Error: Site name is wrong.") self.assertEqual(siteInfo["se_name"], ["testSE1"], "Error: SE name is wrong.") self.assertEqual(siteInfo["ce_name"], "testCE1", "Error: CE name is wrong.") self.assertEqual(siteInfo["pending_slots"], 10, "Error: Pending slots is wrong.") self.assertEqual(siteInfo["running_slots"], 20, "Error: Pending slots is wrong.") return def testUpdateJobSlots(self): """ _testUpdateJobSlots_ Verify that it is possible to update the number of job slots at a site. """ myResourceControl = ResourceControl() myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1") siteInfo = myResourceControl.listSiteInfo("testSite1") self.assertEqual(siteInfo["pending_slots"], 10, "Error: Pending slots is wrong.") self.assertEqual(siteInfo["running_slots"], 20, "Error: Running slots is wrong.") myResourceControl.setJobSlotsForSite("testSite1", pendingJobSlots=20) siteInfo = myResourceControl.listSiteInfo("testSite1") self.assertEqual(siteInfo["pending_slots"], 20, "Error: Pending slots is wrong.") myResourceControl.setJobSlotsForSite("testSite1", runningJobSlots=40) siteInfo = myResourceControl.listSiteInfo("testSite1") self.assertEqual(siteInfo["running_slots"], 40, "Error: Running slots is wrong.") myResourceControl.setJobSlotsForSite("testSite1", 5, 10) siteInfo = myResourceControl.listSiteInfo("testSite1") self.assertEqual(siteInfo["pending_slots"], 5, "Error: Pending slots is wrong.") self.assertEqual(siteInfo["running_slots"], 10, "Error: Running slots is wrong.") return def testThresholdsForSite(self): """ _testThresholdsForSite_ Check that we can get the thresholds in intelligible form for each site """ myResourceControl = ResourceControl() myResourceControl.insertSite("testSite1", 20, 40, "testSE1", "testCE1") myResourceControl.insertThreshold("testSite1", "Processing", 10, 8) myResourceControl.insertThreshold("testSite1", "Merge", 5, 3) result = myResourceControl.thresholdBySite(siteName="testSite1") procInfo = {} mergInfo = {} for res in result: if res['task_type'] == 'Processing': procInfo = res elif res['task_type'] == 'Merge': mergInfo = res self.assertEqual(procInfo.get('pending_slots', None), 20) self.assertEqual(procInfo.get('running_slots', None), 40) self.assertEqual(procInfo.get('max_slots', None), 10) self.assertEqual(procInfo.get('task_pending_slots', None), 8) self.assertEqual(mergInfo.get('pending_slots', None), 20) self.assertEqual(mergInfo.get('running_slots', None), 40) self.assertEqual(mergInfo.get('max_slots', None), 5) self.assertEqual(mergInfo.get('task_pending_slots', None), 3) return def testThresholdPriority(self): """ _testThresholdPriority_ Test that we get things back in priority order """ myResourceControl = ResourceControl() myResourceControl.insertSite("testSite1", 20, 40, "testSE1", "testCE1") myResourceControl.insertThreshold("testSite1", "Processing", 10, 8, priority=1) myResourceControl.insertThreshold("testSite1", "Merge", 5, 3, priority=2) result = myResourceControl.listThresholdsForSubmit() self.assertEqual(result['testSite1']['thresholds'][0]['task_type'], 'Merge') self.assertEqual(result['testSite1']['thresholds'][1]['task_type'], 'Processing') myResourceControl.insertThreshold("testSite1", "Processing", 10, 8, priority=2) myResourceControl.insertThreshold("testSite1", "Merge", 5, 3, priority=1) # Should now be in reverse order result = myResourceControl.listThresholdsForSubmit() self.assertEqual(result['testSite1']['thresholds'][1]['task_type'], 'Merge') self.assertEqual(result['testSite1']['thresholds'][0]['task_type'], 'Processing') myResourceControl.insertSite("testSite2", 20, 40, "testSE2", "testCE2") myResourceControl.insertThreshold("testSite2", "Processing", 10, 8, priority=1) myResourceControl.insertThreshold("testSite2", "Merge", 5, 3, priority=2) # Should be in proper order for site 2 result = myResourceControl.listThresholdsForSubmit() self.assertEqual(result['testSite2']['thresholds'][0]['task_type'], 'Merge') self.assertEqual(result['testSite2']['thresholds'][1]['task_type'], 'Processing') # Should now be in reverse order for site 1 self.assertEqual(result['testSite1']['thresholds'][1]['task_type'], 'Merge') self.assertEqual(result['testSite1']['thresholds'][0]['task_type'], 'Processing') myResourceControl.insertThreshold("testSite2", "Merge", 20, 10) result = myResourceControl.listThresholdsForSubmit() self.assertEqual(result['testSite2']['thresholds'][0]['priority'], 2) return def testChangeState(self): """ _testChangeState_ Check that we can change the state between different values and retrieve it through the threshold methods """ myResourceControl = ResourceControl() myResourceControl.insertSite("testSite1", 20, 40, "testSE1", "testCE1") myResourceControl.insertThreshold("testSite1", "Processing", 10, 5, priority=1) result = myResourceControl.listThresholdsForCreate() self.assertEqual(result['testSite1']['state'], 'Normal', 'Error: Wrong site state') myResourceControl.changeSiteState("testSite1", "Down") result = myResourceControl.listThresholdsForCreate() self.assertEqual(result['testSite1']['state'], 'Down', 'Error: Wrong site state') def createConfig(self): """ _createConfig_ Create a config and save it to the temp dir. Set the WMAGENT_CONFIG environment variable so the config gets picked up. """ config = Configuration() config.section_("General") config.General.workDir = os.getenv("TESTDIR", os.getcwd()) config.section_("Agent") config.Agent.componentName = "resource_control_t" config.section_("CoreDatabase") config.CoreDatabase.connectUrl = os.getenv("DATABASE") config.CoreDatabase.socket = os.getenv("DBSOCK") configHandle = open(os.path.join(self.tempDir, "config.py"), "w") configHandle.write(str(config)) configHandle.close() os.environ["WMAGENT_CONFIG"] = os.path.join(self.tempDir, "config.py") return def testInsertAllSEs(self): """ _testInsertAllSEs_ Test to see if we can insert all SEs and Thresholds at once Depending on the WMCore.Services.SiteDB interface """ self.createConfig() resControlPath = os.path.join(WMCore.WMBase.getTestBase(), "../../bin/wmagent-resource-control") env = os.environ env['PYTHONPATH'] = ":".join(sys.path) cmdline = [ resControlPath, "--add-all-sites", "--plugin=CondorPlugin", "--pending-slots=100", "--running-slots=500" ] retval = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) (output, _) = retval.communicate() myResourceControl = ResourceControl() result = myResourceControl.listThresholdsForSubmit() self.assertTrue('T1_US_FNAL' in result.keys()) for x in result.keys(): self.assertEqual(len(result[x]['thresholds']), 8) self.assertEqual(result[x]['total_pending_slots'], 100) self.assertEqual(result[x]['total_running_slots'], 500) for thresh in result[x]['thresholds']: if thresh['task_type'] == 'Processing': self.assertEqual(thresh['priority'], 1) self.assertEqual(thresh['max_slots'], 500) # Verify that sites with more than one SE were added correctly. nebInfo = myResourceControl.listSiteInfo("T2_US_Nebraska") self.assertTrue(len(nebInfo["se_name"]) == 3) return def testInsertAllSEs2(self): """ _testInsertAllSEs2_ Test to see if we can insert all SEs and Thresholds at once Depending on the WMCore.Services.SiteDB interface """ myResourceControl = ResourceControl() taskList = [{ 'taskType': 'Processing', 'maxSlots': 100, 'pendingSlots': 80, 'priority': 1 }, { 'taskType': 'Merge', 'maxSlots': 50, 'pendingSlots': 30, 'priority': 2 }] myResourceControl.insertAllSEs(siteName='test', pendingSlots=200, runningSlots=400, ceName='glidein-ce.fnal.gov', plugin='CondorPlugin', taskList=taskList) result = myResourceControl.listThresholdsForSubmit() self.assertTrue('test_cmssrm.fnal.gov' in result.keys()) self.assertEqual(result['test_cmssrm.fnal.gov']['cms_name'], 'T1_US_FNAL') for x in result.keys(): self.assertEqual(len(result[x]['thresholds']), 2) self.assertEqual(result[x]['total_pending_slots'], 200) self.assertEqual(result[x]['total_running_slots'], 400) for thresh in result[x]['thresholds']: if thresh['task_type'] == 'Processing': self.assertEqual(thresh['priority'], 1) self.assertEqual(thresh['max_slots'], 100) self.assertEqual(thresh['pending_slots'], 80) else: self.assertEqual(thresh['priority'], 2) self.assertEqual(thresh['max_slots'], 50) self.assertEqual(thresh['pending_slots'], 30) return def testInsertT0(self): """ _testInsertT0_ Test to see if we can insert the Tier-0 alone with a single option """ self.createConfig() resControlPath = os.path.join(WMCore.WMBase.getTestBase(), "../../bin/wmagent-resource-control") env = os.environ env['PYTHONPATH'] = ":".join(sys.path) cmdline = [resControlPath, "--add-T0"] retval = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) (_, _) = retval.communicate() myResourceControl = ResourceControl() result = myResourceControl.listThresholdsForSubmit() self.assertTrue(len(result), 1) self.assertTrue('CERN' in result) for x in result: self.assertEqual(len(result[x]['thresholds']), 10) self.assertEqual(result[x]['total_pending_slots'], 500) self.assertEqual(result[x]['total_running_slots'], -1) for thresh in result[x]['thresholds']: if thresh['task_type'] == 'Processing': self.assertEqual(thresh['priority'], 1) self.assertEqual(thresh['max_slots'], -1) # Verify that sites with more than one SE were added correctly. cernInfo = myResourceControl.listSiteInfo("CERN") self.assertTrue(len(cernInfo["se_name"]) == 2) return
class CursorLeakTest(unittest.TestCase): def setUp(self): """ _setUp_ Setup the database and logging connection. Try to create all of the WMBS tables. Also add some dummy locations. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMCore.WMBS"], useDefault=False) myThread = threading.currentThread() daofactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi) locationAction = daofactory(classname="Locations.New") locationAction.execute(siteName="se1.cern.ch") locationAction.execute(siteName="se1.fnal.gov") return def tearDown(self): """ _tearDown_ Drop all the WMBS tables. """ self.testInit.clearDatabase() def testCursor(self): """ _testCursor_ test the cursor closing is really affected create 100 files with 5 parents and loop 100 times. If the cursors are exhausted will crash.? TODO: improve for more effective testing. """ raise nose.SkipTest fileList = [] parentFile = None for i in range(100): testFile = File(lfn="/this/is/a/lfn%s" % i, size=1024, events=10, checksums={"cksum": "1"}) testFile.addRun(Run(1, *[i])) testFile.create() for j in range(5): parentFile = File(lfn="/this/is/a/lfnP%s" % j, size=1024, events=10, checksums={"cksum": "1"}) parentFile.addRun(Run(1, *[j])) parentFile.create() testFile.addParent(parentFile['lfn']) fileList.append(testFile) for i in range(100): for file in fileList: file.loadData() file.getAncestors(level=2) file.getAncestors(level=2, type="lfn") return def testLotsOfAncestors(self): """ _testLotsOfAncestors_ Create a file with 15 parents with each parent having 100 parents to verify that the query to return grandparents works correctly. """ raise nose.SkipTest testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10, checksums={"cksum": "1"}, locations="se1.fnal.gov") testFileA.create() for i in xrange(15): testParent = File(lfn=makeUUID(), size=1024, events=10, checksums={"cksum": "1"}, locations="se1.fnal.gov") testParent.create() testFileA.addParent(testParent["lfn"]) for i in xrange(100): testGParent = File(lfn=makeUUID(), size=1024, events=10, checksums={"cksum": "1"}, locations="se1.fnal.gov") testGParent.create() testParent.addParent(testGParent["lfn"]) assert len(testFileA.getAncestors(level=2, type="lfn")) == 1500, \ "ERROR: Incorrect grand parents returned" return
class ExpressMergeTest(unittest.TestCase): """ _ExpressMergeTest_ Test for ExpressMerge job splitter """ def setUp(self): """ _setUp_ """ import WMQuality.TestInit WMQuality.TestInit.deleteDatabaseAfterEveryTest("I'm Serious") self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema( customModules=["WMComponent.DBS3Buffer", "T0.WMBS"]) self.splitterFactory = SplitterFactory(package="T0.JobSplitting") myThread = threading.currentThread() daoFactory = DAOFactory(package="T0.WMBS", logger=logging, dbinterface=myThread.dbi) myThread.dbi.processData("""INSERT INTO wmbs_location (id, site_name, state, state_time) VALUES (1, 'SomeSite', 1, 1) """, transaction=False) myThread.dbi.processData("""INSERT INTO wmbs_pnns (id, pnn) VALUES (2, 'SomePNN') """, transaction=False) myThread.dbi.processData("""INSERT INTO wmbs_location_pnns (location, pnn) VALUES (1, 2) """, transaction=False) insertRunDAO = daoFactory(classname="RunConfig.InsertRun") insertRunDAO.execute(binds={ 'RUN': 1, 'HLTKEY': "someHLTKey" }, transaction=False) insertLumiDAO = daoFactory(classname="RunConfig.InsertLumiSection") for lumi in range(1, 5): insertLumiDAO.execute(binds={ 'RUN': 1, 'LUMI': lumi }, transaction=False) insertStreamDAO = daoFactory(classname="RunConfig.InsertStream") insertStreamDAO.execute(binds={'STREAM': "Express"}, transaction=False) insertStreamFilesetDAO = daoFactory( classname="RunConfig.InsertStreamFileset") insertStreamFilesetDAO.execute(1, "Express", "TestFileset1") fileset1 = Fileset(name="TestFileset1") self.fileset2 = Fileset(name="TestFileset2") fileset1.load() self.fileset2.create() workflow1 = Workflow(spec="spec.xml", owner="hufnagel", name="TestWorkflow1", task="Test") workflow2 = Workflow(spec="spec.xml", owner="hufnagel", name="TestWorkflow2", task="Test") workflow1.create() workflow2.create() self.subscription1 = Subscription(fileset=fileset1, workflow=workflow1, split_algo="Express", type="Express") self.subscription2 = Subscription(fileset=self.fileset2, workflow=workflow2, split_algo="ExpressMerge", type="ExpressMerge") self.subscription1.create() self.subscription2.create() myThread.dbi.processData("""INSERT INTO wmbs_workflow_output (WORKFLOW_ID, OUTPUT_IDENTIFIER, OUTPUT_FILESET) VALUES (%d, 'SOMEOUTPUT', %d) """ % (workflow1.id, self.fileset2.id), transaction=False) # keep for later self.insertSplitLumisDAO = daoFactory( classname="JobSplitting.InsertSplitLumis") # default split parameters self.splitArgs = {} self.splitArgs['maxInputSize'] = 2 * 1024 * 1024 * 1024 self.splitArgs['maxInputFiles'] = 500, self.splitArgs['maxLatency'] = 15 * 23 return def tearDown(self): """ _tearDown_ """ self.testInit.clearDatabase() return def deleteSplitLumis(self): """ _deleteSplitLumis_ """ myThread = threading.currentThread() myThread.dbi.processData("""DELETE FROM lumi_section_split_active """, transaction=False) return def test00(self): """ _test00_ Test that the job name prefix feature works Test latency trigger (wait and 0) """ mySplitArgs = self.splitArgs.copy() for lumi in [1]: for i in range(2): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave=False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription2) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxLatency'] = 0 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create a single job") job = jobGroups[0].jobs[0] self.assertTrue(job['name'].startswith("ExpressMerge-"), "ERROR: Job has wrong name") return def test01(self): """ _test01_ Test size and event triggers for single lumis (they are ignored) Test latency trigger (timed out) """ mySplitArgs = self.splitArgs.copy() for lumi in [1]: for i in range(2): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave=False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription2) mySplitArgs['maxInputSize'] = 1 mySplitArgs['maxInputFiles'] = 1 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") time.sleep(1) mySplitArgs['maxLatency'] = 1 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create a single job") return def test02(self): """ _test02_ Test input files threshold on multi lumis """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2]: for i in range(2): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave=False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription2) mySplitArgs['maxInputFiles'] = 1 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") time.sleep(1) mySplitArgs['maxLatency'] = 1 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 2, "ERROR: JobFactory didn't create two jobs") return def test03(self): """ _test03_ Test input size threshold on multi lumis """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2]: for i in range(2): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave=False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription2) mySplitArgs['maxInputSize'] = 1 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") time.sleep(1) mySplitArgs['maxLatency'] = 1 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 2, "ERROR: JobFactory didn't create two jobs") return def test04(self): """ _test04_ Test multi lumis express merges """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2]: for i in range(2): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave=False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription2) time.sleep(1) mySplitArgs['maxLatency'] = 1 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create a single job") return def test05(self): """ _test05_ Test multi lumis express merges with holes """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2, 4]: for i in range(2): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave=False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription2) time.sleep(1) mySplitArgs['maxLatency'] = 1 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 2, "ERROR: JobFactory didn't create two jobs") return def test06(self): """ _test06_ Test active split lumis """ mySplitArgs = self.splitArgs.copy() for lumi in [1]: for i in range(2): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave=False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription2) self.insertSplitLumisDAO.execute(binds={ 'SUB': self.subscription1['id'], 'LUMI': 1, 'NFILES': 5 }) mySplitArgs['maxLatency'] = 0 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") self.deleteSplitLumis() jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create a single job") return
class PhEDExInjectorPollerTest(unittest.TestCase): """ _PhEDExInjectorPollerTest_ Unit tests for the PhEDExInjector. Create some database inside DBSBuffer and then have the PhEDExInjector upload the data to PhEDEx. Pull the data back down and verify that everything is complete. """ def setUp(self): """ _setUp_ Install the DBSBuffer schema into the database and connect to PhEDEx. """ self.phedexURL = "https://cmsweb.cern.ch/phedex/datasvc/json/test" self.dbsURL = "http://vocms09.cern.ch:8880/cms_dbs_int_local_yy_writer/servlet/DBSServlet" self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection(destroyAllDatabase=True) self.testInit.setSchema(customModules=["WMComponent.DBS3Buffer"], useDefault=False) myThread = threading.currentThread() daofactory = DAOFactory(package="WMComponent.DBSBuffer.Database", logger=myThread.logger, dbinterface=myThread.dbi) locationAction = daofactory(classname="DBSBufferFiles.AddLocation") locationAction.execute(siteName="srm-cms.cern.ch") locationAction.execute(siteName="se.fnal.gov") self.testFilesA = [] self.testFilesB = [] self.testDatasetA = "/%s/PromptReco-v1/RECO" % makeUUID() self.testDatasetB = "/%s/CRUZET11-v1/RAW" % makeUUID() self.phedex = PhEDEx({"endpoint": self.phedexURL}, "json") return def tearDown(self): """ _tearDown_ Delete the database. """ self.testInit.clearDatabase() def stuffDatabase(self, spec="TestWorkload.pkl"): """ _stuffDatabase_ Fill the dbsbuffer with some files and blocks. We'll insert a total of 5 files spanning two blocks. There will be a total of two datasets inserted into the datbase. We'll inject files with the location set as an SE name as well as a PhEDEx node name as well. """ myThread = threading.currentThread() buffer3Factory = DAOFactory(package="WMComponent.DBS3Buffer", logger=myThread.logger, dbinterface=myThread.dbi) insertWorkflow = buffer3Factory(classname="InsertWorkflow") insertWorkflow.execute("BogusRequest", "BogusTask", 0, 0, 0, 0) checksums = {"adler32": "1234", "cksum": "5678"} testFileA = DBSBufferFile(lfn=makeUUID(), size=1024, events=10, checksums=checksums, locations=set(["srm-cms.cern.ch"])) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath(self.testDatasetA) testFileA.addRun(Run(2, *[45])) testFileA.create() testFileB = DBSBufferFile(lfn=makeUUID(), size=1024, events=10, checksums=checksums, locations=set(["srm-cms.cern.ch"])) testFileB.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileB.setDatasetPath(self.testDatasetA) testFileB.addRun(Run(2, *[45])) testFileB.create() testFileC = DBSBufferFile(lfn=makeUUID(), size=1024, events=10, checksums=checksums, locations=set(["srm-cms.cern.ch"])) testFileC.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileC.setDatasetPath(self.testDatasetA) testFileC.addRun(Run(2, *[45])) testFileC.create() self.testFilesA.append(testFileA) self.testFilesA.append(testFileB) self.testFilesA.append(testFileC) testFileD = DBSBufferFile(lfn=makeUUID(), size=1024, events=10, checksums=checksums, locations=set(["srm-cms.cern.ch"])) testFileD.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileD.setDatasetPath(self.testDatasetB) testFileD.addRun(Run(2, *[45])) testFileD.create() testFileE = DBSBufferFile(lfn=makeUUID(), size=1024, events=10, checksums=checksums, locations=set(["srm-cms.cern.ch"])) testFileE.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileE.setDatasetPath(self.testDatasetB) testFileE.addRun(Run(2, *[45])) testFileE.create() self.testFilesB.append(testFileD) self.testFilesB.append(testFileE) uploadFactory = DAOFactory(package="WMComponent.DBS3Buffer", logger=myThread.logger, dbinterface=myThread.dbi) datasetAction = uploadFactory(classname="NewDataset") createAction = uploadFactory(classname="CreateBlocks") datasetAction.execute(datasetPath=self.testDatasetA) datasetAction.execute(datasetPath=self.testDatasetB) self.blockAName = self.testDatasetA + "#" + makeUUID() self.blockBName = self.testDatasetB + "#" + makeUUID() newBlockA = DBSBlock(name=self.blockAName, location="srm-cms.cern.ch", das=None, workflow=None) newBlockA.setDataset(self.testDatasetA, 'data', 'VALID') newBlockA.status = 'Closed' newBlockB = DBSBlock(name=self.blockBName, location="srm-cms.cern.ch", das=None, workflow=None) newBlockB.setDataset(self.testDatasetB, 'data', 'VALID') newBlockB.status = 'Closed' createAction.execute(blocks=[newBlockA, newBlockB]) bufferFactory = DAOFactory(package="WMComponent.DBSBuffer.Database", logger=myThread.logger, dbinterface=myThread.dbi) setBlock = bufferFactory(classname="DBSBufferFiles.SetBlock") setBlock.execute(testFileA["lfn"], self.blockAName) setBlock.execute(testFileB["lfn"], self.blockAName) setBlock.execute(testFileC["lfn"], self.blockAName) setBlock.execute(testFileD["lfn"], self.blockBName) setBlock.execute(testFileE["lfn"], self.blockBName) fileStatus = bufferFactory(classname="DBSBufferFiles.SetStatus") fileStatus.execute(testFileA["lfn"], "LOCAL") fileStatus.execute(testFileB["lfn"], "LOCAL") fileStatus.execute(testFileC["lfn"], "LOCAL") fileStatus.execute(testFileD["lfn"], "LOCAL") fileStatus.execute(testFileE["lfn"], "LOCAL") associateWorkflow = buffer3Factory( classname="DBSBufferFiles.AssociateWorkflowToFile") associateWorkflow.execute(testFileA["lfn"], "BogusRequest", "BogusTask") associateWorkflow.execute(testFileB["lfn"], "BogusRequest", "BogusTask") associateWorkflow.execute(testFileC["lfn"], "BogusRequest", "BogusTask") associateWorkflow.execute(testFileD["lfn"], "BogusRequest", "BogusTask") associateWorkflow.execute(testFileE["lfn"], "BogusRequest", "BogusTask") return def createConfig(self): """ _createConfig_ Create a config for the PhEDExInjector with paths to the test DBS and PhEDEx instances. """ config = self.testInit.getConfiguration() config.component_("DBSInterface") config.DBSInterface.globalDBSUrl = self.dbsURL config.component_("PhEDExInjector") config.PhEDExInjector.phedexurl = self.phedexURL config.PhEDExInjector.subscribeMSS = True config.PhEDExInjector.group = "Saturn" config.PhEDExInjector.pollInterval = 30 config.PhEDExInjector.subscribeInterval = 60 return config def retrieveReplicaInfoForBlock(self, blockName): """ _retrieveReplicaInfoForBlock_ Retrieve the replica information for a block. It takes several minutes after a block is injected for the statistics to be calculated, so this will block until that information is available. """ attempts = 0 while attempts < 15: result = self.phedex.getReplicaInfoForFiles(block=blockName) if "phedex" in result: if "block" in result["phedex"]: if len(result["phedex"]["block"]) != 0: return result["phedex"]["block"][0] attempts += 1 time.sleep(20) logging.info("Could not retrieve replica info for block: %s" % blockName) return None @attr("integration") def testPoller(self): """ _testPoller_ Stuff the database and have the poller upload files to PhEDEx. Retrieve replica information for the uploaded blocks and verify that all files have been injected. """ return self.stuffDatabase() poller = PhEDExInjectorPoller(self.createConfig()) poller.setup(parameters=None) poller.algorithm(parameters=None) replicaInfo = self.retrieveReplicaInfoForBlock(self.blockAName) goldenLFNs = [] for file in self.testFilesA: goldenLFNs.append(file["lfn"]) for replicaFile in replicaInfo["file"]: assert replicaFile["name"] in goldenLFNs, \ "Error: Extra file in replica block: %s" % replicaFile["name"] goldenLFNs.remove(replicaFile["name"]) assert len(goldenLFNs) == 0, \ "Error: Files missing from PhEDEx replica: %s" % goldenLFNs replicaInfo = self.retrieveReplicaInfoForBlock(self.blockBName) goldenLFNs = [] for file in self.testFilesB: goldenLFNs.append(file["lfn"]) for replicaFile in replicaInfo["file"]: assert replicaFile["name"] in goldenLFNs, \ "Error: Extra file in replica block: %s" % replicaFile["name"] goldenLFNs.remove(replicaFile["name"]) assert len(goldenLFNs) == 0, \ "Error: Files missing from PhEDEx replica: %s" % goldenLFNs myThread = threading.currentThread() daofactory = DAOFactory(package="WMComponent.DBSUpload.Database", logger=myThread.logger, dbinterface=myThread.dbi) setBlock = daofactory(classname="SetBlockStatus") setBlock.execute(self.blockAName, locations=None, open_status="InGlobalDBS") poller.algorithm(parameters=None) replicaInfo = self.retrieveReplicaInfoForBlock(self.blockAName) assert replicaInfo["is_open"] == "n", \ "Error: block should be closed." replicaInfo = self.retrieveReplicaInfoForBlock(self.blockBName) assert replicaInfo["is_open"] == "y", \ "Error: block should be open." return def test_CustodialSiteA(self): """ _CustodialSiteA_ Check the custodialSite stuff by DAO, since I don't have a cert First make sure we properly handle having no custodialSite """ self.stuffDatabase() myThread = threading.currentThread() daofactory = DAOFactory(package="WMComponent.PhEDExInjector.Database", logger=myThread.logger, dbinterface=myThread.dbi) getUninjected = daofactory(classname="GetUninjectedFiles") uninjectedFiles = getUninjected.execute() self.assertEqual(uninjectedFiles.keys(), ['srm-cms.cern.ch']) return
class RunJobTest(unittest.TestCase): """ _RunJobTest_ Test the RunJob object and accessors """ def setUp(self): myThread = threading.currentThread() self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() #self.tearDown() self.testInit.setSchema(customModules=[ "WMCore.WMBS", "WMCore.BossAir", "WMCore.ResourceControl", "WMCore.Agent.Database" ], useDefault=False) self.daoFactory = DAOFactory(package="WMCore.BossAir", logger=myThread.logger, dbinterface=myThread.dbi) resourceControl = ResourceControl() resourceControl.insertSite(siteName='Xanadu', pnn='se.Xanadu', ceName='Xanadu', plugin="TestPlugin") resourceControl.insertThreshold(siteName = 'Xanadu', taskType = 'Processing', \ maxSlots = 10000, pendingSlots = 10000) # Create user wmbsFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi) newuser = wmbsFactory(classname="Users.New") newuser.execute(dn="mnorman", group_name="phgroup", role_name="cmsrole") if PY3: self.assertItemsEqual = self.assertCountEqual def tearDown(self): """ Database deletion """ self.testInit.clearDatabase(modules=[ "WMCore.WMBS", "WMCore.BossAir", "WMCore.ResourceControl", "WMCore.Agent.Database" ]) self.testInit.delWorkDir() return def createJobs(self, nJobs): """ Creates a series of jobGroups for submissions """ testWorkflow = Workflow(spec="dummy", owner="mnorman", name="dummy", task="basicWorkload/Production") testWorkflow.create() # Create Fileset, Subscription, jobGroup testFileset = Fileset(name="dummy") testFileset.create() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow, type="Processing", split_algo="FileBased") testSubscription.create() testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() # Create jobs for id in range(nJobs): testJob = Job(name='Job_%i' % (id)) testJob['owner'] = "mnorman" testJob['location'] = 'Xanadu' testJob.create(testJobGroup) testJobGroup.add(testJob) testFileset.commit() testJobGroup.commit() return testJobGroup def testA_BulkDAOs(self): """ _BulkDAOs_ Test the bulk DAO options, which is the only thing we should be using. """ myThread = threading.currentThread() jobGroup = self.createJobs(nJobs=10) runJobs = [] for job in jobGroup.jobs: runJob = RunJob(jobid=job.exists()) runJob['status'] = 'New' runJob['userdn'] = job['owner'] runJob['usergroup'] = 'phgroup' runJob['userrole'] = 'cmsrole' runJobs.append(runJob) statusDAO = self.daoFactory(classname="NewState") statusDAO.execute(states=['New', 'Gone', 'Dead']) result = myThread.dbi.processData( "SELECT name FROM bl_status")[0].fetchall() self.assertItemsEqual(result, [('Dead', ), ('Gone', ), ('New', )]) newJobDAO = self.daoFactory(classname="NewJobs") newJobDAO.execute(jobs=runJobs) result = myThread.dbi.processData( "SELECT wmbs_id FROM bl_runjob")[0].fetchall() self.assertEqual(result, [(1, ), (2, ), (3, ), (4, ), (5, ), (6, ), (7, ), (8, ), (9, ), (10, )]) loadJobsDAO = self.daoFactory(classname="LoadByStatus") loadJobs = loadJobsDAO.execute(status="New") self.assertEqual(len(loadJobs), 10) idList = [x['id'] for x in loadJobs] for job in loadJobs: job['bulkid'] = 1001 updateDAO = self.daoFactory(classname="UpdateJobs") updateDAO.execute(jobs=loadJobs) loadJobs = loadJobsDAO.execute(status='New') self.assertEqual(len(loadJobs), 10) for job in loadJobs: self.assertEqual(job['bulkid'], '1001') loadWMBSDAO = self.daoFactory(classname="LoadByWMBSID") for job in jobGroup.jobs: jDict = loadWMBSDAO.execute(jobs=[job]) self.assertEqual(job['id'], jDict[0]['jobid']) setStatusDAO = self.daoFactory(classname="SetStatus") setStatusDAO.execute(jobs=idList, status='Dead') result = loadJobsDAO.execute(status='Dead') self.assertEqual(len(result), 10) result = loadJobsDAO.execute(status='New') self.assertEqual(len(result), 0) runningJobDAO = self.daoFactory(classname="LoadRunning") runningJobs = runningJobDAO.execute() self.assertEqual(len(runningJobs), 10) completeDAO = self.daoFactory(classname="CompleteJob") completeDAO.execute(jobs=idList) result = loadJobsDAO.execute(status='Dead', complete='0') self.assertEqual(len(result), 10) deleteDAO = self.daoFactory(classname="DeleteJobs") deleteDAO.execute(jobs=idList) result = loadJobsDAO.execute(status='Dead') self.assertEqual(len(result), 0) return def testB_CheckWMBSBuild(self): """ _CheckWMBSBuild_ Trivial test that checks whether we can build runJobs from WMBS jobs """ jobGroup = self.createJobs(nJobs=10) for job in jobGroup.jobs: rj = RunJob() rj.buildFromJob(job=job) self.assertEqual(job['id'], rj['jobid']) self.assertEqual(job['retry_count'], rj['retry_count']) job2 = rj.buildWMBSJob() self.assertEqual(job['id'], job2['id']) self.assertEqual(job['retry_count'], job2['retry_count']) return def testC_CheckWMBSBuildRoleAndGroup(self): """ _CheckWMBSBuild_ Trivial test that checks whether we can build runJobs from WMBS jobs """ jobGroup = [] # Create jobs for id in range(10): testJob = Job(name='Job_%i' % (id)) testJob['owner'] = "mnorman" testJob['usergroup'] = "mygroup_%i" % id testJob['userrole'] = "myrole_%i" % id testJob['location'] = 'Xanadu' jobGroup.append(testJob) for job in jobGroup: rj = RunJob() rj.buildFromJob(job=job) self.assertEqual(job['usergroup'], rj['usergroup']) self.assertEqual(job['userrole'], rj['userrole']) job2 = rj.buildWMBSJob() self.assertEqual(job['usergroup'], job2['usergroup']) self.assertEqual(job['userrole'], job2['userrole']) return
class ParentlessMergeBySizeTest(unittest.TestCase): def setUp(self): """ _setUp_ Boiler plate DB setup. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMCore.WMBS"], useDefault=False) myThread = threading.currentThread() self.daoFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi) return def tearDown(self): """ _tearDown_ Clear out WMBS. """ self.testInit.clearDatabase() return def stuffWMBS(self): """ _stuffWMBS_ Insert some dummy jobs, jobgroups, filesets, files and subscriptions into WMBS to test job creation. Three completed job groups each containing several files are injected. Another incomplete job group is also injected. Also files are added to the "Mergeable" subscription as well as to the output fileset for their jobgroups. """ locationAction = self.daoFactory(classname="Locations.New") locationAction.execute(siteName="s1", seName="somese.cern.ch") locationAction.execute(siteName="s1", seName="somese2.cern.ch") changeStateDAO = self.daoFactory(classname="Jobs.ChangeState") self.mergeFileset = Fileset(name="mergeFileset") self.mergeFileset.create() self.bogusFileset = Fileset(name="bogusFileset") self.bogusFileset.create() mergeWorkflow = Workflow(name="mergeWorkflow", spec="bunk2", owner="Steve", task="Test") mergeWorkflow.create() markWorkflow = self.daoFactory( classname="Workflow.MarkInjectedWorkflows") markWorkflow.execute(names=[mergeWorkflow.name], injected=True) self.mergeSubscription = Subscription( fileset=self.mergeFileset, workflow=mergeWorkflow, split_algo="ParentlessMergeBySize") self.mergeSubscription.create() self.bogusSubscription = Subscription( fileset=self.bogusFileset, workflow=mergeWorkflow, split_algo="ParentlessMergeBySize") file1 = File(lfn="file1", size=1024, events=1024, first_event=0, locations=set(["somese.cern.ch"])) file1.addRun(Run(1, *[45])) file1.create() file2 = File(lfn="file2", size=1024, events=1024, first_event=1024, locations=set(["somese.cern.ch"])) file2.addRun(Run(1, *[45])) file2.create() file3 = File(lfn="file3", size=1024, events=1024, first_event=2048, locations=set(["somese.cern.ch"])) file3.addRun(Run(1, *[45])) file3.create() file4 = File(lfn="file4", size=1024, events=1024, first_event=3072, locations=set(["somese.cern.ch"])) file4.addRun(Run(1, *[45])) file4.create() fileA = File(lfn="fileA", size=1024, events=1024, first_event=0, locations=set(["somese.cern.ch"])) fileA.addRun(Run(1, *[46])) fileA.create() fileB = File(lfn="fileB", size=1024, events=1024, first_event=1024, locations=set(["somese.cern.ch"])) fileB.addRun(Run(1, *[46])) fileB.create() fileC = File(lfn="fileC", size=1024, events=1024, first_event=2048, locations=set(["somese.cern.ch"])) fileC.addRun(Run(1, *[46])) fileC.create() fileI = File(lfn="fileI", size=1024, events=1024, first_event=0, locations=set(["somese.cern.ch"])) fileI.addRun(Run(2, *[46])) fileI.create() fileII = File(lfn="fileII", size=1024, events=1024, first_event=1024, locations=set(["somese.cern.ch"])) fileII.addRun(Run(2, *[46])) fileII.create() fileIII = File(lfn="fileIII", size=1024, events=102400, first_event=2048, locations=set(["somese.cern.ch"])) fileIII.addRun(Run(2, *[46])) fileIII.create() fileIV = File(lfn="fileIV", size=102400, events=1024, first_event=3072, locations=set(["somese.cern.ch"])) fileIV.addRun(Run(2, *[46])) fileIV.create() for file in [ file1, file2, file3, file4, fileA, fileB, fileC, fileI, fileII, fileIII, fileIV ]: self.mergeFileset.addFile(file) self.bogusFileset.addFile(file) self.mergeFileset.commit() self.bogusFileset.commit() return def testMinMergeSize1(self): """ _testMinMergeSize1_ Set the minimum merge size to be 20,000 bytes which is more than the sum of all file sizes in the WMBS instance. Verify that no merge jobs will be produced. """ self.stuffWMBS() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.mergeSubscription) result = jobFactory(min_merge_size=200000, max_merge_size=2000000000, max_merge_events=200000000) assert len(result) == 0, \ "ERROR: No job groups should be returned." return def testMinMergeSize1(self): """ _testMinMergeSize1_ Set the minimum merge size to be 20,000 bytes which is more than the sum of all file sizes in the WMBS instance. Verify that no merge jobs will be produced. """ self.stuffWMBS() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.mergeSubscription) result = jobFactory(min_merge_size=200000, max_merge_size=2000000000, max_merge_events=200000000) assert len(result) == 0, \ "ERROR: No job groups should be returned." return def testMinMergeSize1a(self): """ _testMinMergeSize1a_ Set the minimum merge size to be 20,000 bytes which is more than the sum of all file sizes in the WMBS instance and mark the fileset as closed. Verify that one job containing all files is pushed out. """ self.stuffWMBS() self.mergeFileset.markOpen(False) splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.mergeSubscription) result = jobFactory(min_merge_size=200000, max_merge_size=2000000, max_merge_events=2000000) assert len(result) == 1, \ "ERROR: More than one JobGroup returned: %s" % len(result) assert len(result[0].jobs) == 1, \ "Error: One job should have been returned: %s" % len(result[0].jobs) goldenFiles = [ "file1", "file2", "file3", "file4", "fileA", "fileB", "fileC", "fileI", "fileII", "fileIII", "fileIV" ] jobFiles = result[0].jobs[0].getFiles() currentRun = 0 currentLumi = 0 currentEvent = 0 for file in jobFiles: file.loadData() assert file["lfn"] in goldenFiles, \ "Error: Unknown file: %s" % file["lfn"] self.assertTrue( file["locations"] == set(["somese.cern.ch", "somese2.cern.ch"]), "Error: File is missing a location.") goldenFiles.remove(file["lfn"]) fileRun = list(file["runs"])[0].run fileLumi = min(list(file["runs"])[0]) fileEvent = file["first_event"] if currentRun == 0: currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent continue assert fileRun >= currentRun, \ "ERROR: Files not sorted by run." if fileRun == currentRun: assert fileLumi >= currentLumi, \ "ERROR: Files not ordered by lumi" if fileLumi == currentLumi: assert fileEvent >= currentEvent, \ "ERROR: Files not ordered by first event" currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent return def testMaxMergeSize(self): """ _testMaxMergeSize_ Set the maximum merge size to be 100000 bytes. Verify that two merge jobs are created, one for the one large file and another for the rest of the files. Verify that each merge job contains the expected files and that we merge across runs. """ self.stuffWMBS() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.mergeSubscription) result = jobFactory(min_merge_size=1, max_merge_size=100000, max_merge_events=200000) assert len(result) == 1, \ "ERROR: More than one JobGroup returned: %s" % result assert len(result[0].jobs) == 2, \ "ERROR: Two jobs should have been returned." goldenFilesA = [ "file1", "file2", "file3", "file4", "fileA", "fileB", "fileC", "fileI", "fileII", "fileIII" ] goldenFilesB = ["fileIV"] for job in result[0].jobs: jobFiles = job.getFiles() if jobFiles[0]["lfn"] in goldenFilesA: goldenFiles = goldenFilesA elif jobFiles[0]["lfn"] in goldenFilesB: goldenFiles = goldenFilesB currentRun = 0 currentLumi = 0 currentEvent = 0 for file in jobFiles: assert file["lfn"] in goldenFiles, \ "Error: Unknown file in merge jobs." assert file["locations"] == set(["somese.cern.ch"]), \ "Error: File is missing a location." goldenFiles.remove(file["lfn"]) fileRun = list(file["runs"])[0].run fileLumi = min(list(file["runs"])[0]) fileEvent = file["first_event"] if currentRun == 0: currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent continue assert fileRun >= currentRun, \ "ERROR: Files not sorted by run." if fileRun == currentRun: assert fileLumi >= currentLumi, \ "ERROR: Files not ordered by lumi" if fileLumi == currentLumi: assert fileEvent >= currentEvent, \ "ERROR: Files not ordered by first event" currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent assert len(goldenFilesA) == 0 and len(goldenFilesB) == 0, \ "ERROR: Files missing from merge jobs." return def testMaxEvents(self): """ _testMaxEvents_ Verify the the max_merge_events parameter works and that we correctly merge across runs. """ self.stuffWMBS() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.mergeSubscription) result = jobFactory(min_merge_size=1, max_merge_size=20000000, max_merge_events=100000) assert len(result) == 1, \ "ERROR: More than one JobGroup returned: %s" % result assert len(result[0].jobs) == 2, \ "ERROR: Two jobs should have been returned: %s" % len(result[0].jobs) goldenFilesA = [ "file1", "file2", "file3", "file4", "fileA", "fileB", "fileC", "fileI", "fileII", "fileIV" ] goldenFilesB = ["fileIII"] for job in result[0].jobs: jobFiles = job.getFiles() if jobFiles[0]["lfn"] in goldenFilesA: goldenFiles = goldenFilesA elif jobFiles[0]["lfn"] in goldenFilesB: goldenFiles = goldenFilesB currentRun = 0 currentLumi = 0 currentEvent = 0 for file in jobFiles: assert file["lfn"] in goldenFiles, \ "Error: Unknown file in merge jobs." assert file["locations"] == set(["somese.cern.ch"]), \ "Error: File is missing a location: %s" % file["locations"] goldenFiles.remove(file["lfn"]) fileRun = list(file["runs"])[0].run fileLumi = min(list(file["runs"])[0]) fileEvent = file["first_event"] if currentRun == 0: currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent continue assert fileRun >= currentRun, \ "ERROR: Files not sorted by run: %s, %s" % (fileRun, currentRun) if fileRun == currentRun: assert fileLumi >= currentLumi, \ "ERROR: Files not ordered by lumi" if fileLumi == currentLumi: assert fileEvent >= currentEvent, \ "ERROR: Files not ordered by first event" currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent assert len(goldenFilesA) == 0 and len(goldenFilesB) == 0 and \ "ERROR: Files missing from merge jobs." return def testMinMergeSize1aNoRunMerge(self): """ _testMinMergeSize1aNoRunMerge_ Set the minimum merge size to be 20,000 bytes which is more than the sum of all file sizes in the WMBS instance and mark the fileset as closed. Verify that two jobs are pushed out and that we don't merge accross run boundaries. """ self.stuffWMBS() self.mergeFileset.markOpen(False) splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.mergeSubscription) result = jobFactory(min_merge_size=200000, max_merge_size=2000000, max_merge_events=2000000, merge_across_runs=False) assert len(result) == 1, \ "ERROR: More than one JobGroup returned: %s" % len(result) assert len(result[0].jobs) == 2, \ "Error: Two jobs should have been returned: %s" % len(result[0].jobs) goldenFilesA = [ "file1", "file2", "file3", "file4", "fileA", "fileB", "fileC" ] goldenFilesB = ["fileI", "fileII", "fileIII", "fileIV"] goldenFilesA.sort() goldenFilesB.sort() for job in result[0].jobs: currentRun = 0 currentLumi = 0 currentEvent = 0 jobLFNs = [] for file in job.getFiles(): file.loadData() jobLFNs.append(file["lfn"]) self.assertTrue( file["locations"] == set( ["somese.cern.ch", "somese2.cern.ch"]), "Error: File is missing a location.") fileRun = list(file["runs"])[0].run fileLumi = min(list(file["runs"])[0]) fileEvent = file["first_event"] if currentRun == 0: currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent continue assert fileRun >= currentRun, \ "ERROR: Files not sorted by run." if fileRun == currentRun: assert fileLumi >= currentLumi, \ "ERROR: Files not ordered by lumi" if fileLumi == currentLumi: assert fileEvent >= currentEvent, \ "ERROR: Files not ordered by first event" currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent jobLFNs.sort() if jobLFNs == goldenFilesA: goldenFilesA = [] else: self.assertEqual(jobLFNs, goldenFilesB, "Error: LFNs do not match.") goldenFilesB = [] return def testMaxMergeSizeNoRunMerge(self): """ _testMaxMergeSizeNoRunMerge_ Set the maximum merge size to be 100000 bytes. Verify that two merge jobs are created, one for the one large file and another for the rest of the files. Verify that each merge job contains the expected files and that we don't merge across run boundaries. """ self.stuffWMBS() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.mergeSubscription) result = jobFactory(min_merge_size=1, max_merge_size=100000, max_merge_events=200000, merge_across_runs=False) assert len(result) == 1, \ "ERROR: More than one JobGroup returned: %s" % result assert len(result[0].jobs) == 3, \ "ERROR: Three jobs should have been returned." goldenFilesA = [ "file1", "file2", "file3", "file4", "fileA", "fileB", "fileC" ] goldenFilesB = ["fileI", "fileII", "fileIII"] goldenFilesC = ["fileIV"] for job in result[0].jobs: jobFiles = job.getFiles() if jobFiles[0]["lfn"] in goldenFilesA: goldenFiles = goldenFilesA elif jobFiles[0]["lfn"] in goldenFilesB: goldenFiles = goldenFilesB else: goldenFiles = goldenFilesC currentRun = 0 currentLumi = 0 currentEvent = 0 for file in jobFiles: self.assertTrue(file["lfn"] in goldenFiles, "Error: Unknown file in merge jobs.") self.assertTrue(file["locations"] == set(["somese.cern.ch"]), "Error: File is missing a location.") goldenFiles.remove(file["lfn"]) fileRun = list(file["runs"])[0].run fileLumi = min(list(file["runs"])[0]) fileEvent = file["first_event"] if currentRun == 0: currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent continue self.assertTrue(fileRun >= currentRun, "ERROR: Files not sorted by run.") if fileRun == currentRun: self.assertTrue(fileLumi >= currentLumi, "ERROR: Files not ordered by lumi") if fileLumi == currentLumi: self.assertTrue(fileEvent >= currentEvent, "ERROR: Files not ordered by first event") currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent self.assertTrue( len(goldenFilesA) == 0 and len(goldenFilesB) == 0, "ERROR: Files missing from merge jobs.") return def testMaxEventsNoRunMerge(self): """ _testMaxEventsNoRunMerge_ Verify that the max events merge parameter works correctly and that we don't merge accross run boundaries. """ self.stuffWMBS() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.mergeSubscription) result = jobFactory(min_merge_size=1, max_merge_size=20000000, max_merge_events=100000, merge_across_runs=False) self.assertTrue( len(result) == 1, "ERROR: More than one JobGroup returned: %s" % result) self.assertTrue( len(result[0].jobs) == 3, "ERROR: Three jobs should have been returned: %s" % len(result[0].jobs)) goldenFilesA = [ "file1", "file2", "file3", "file4", "fileA", "fileB", "fileC", ] goldenFilesB = ["fileI", "fileII", "fileIV"] goldenFilesC = ["fileIII"] for job in result[0].jobs: jobFiles = job.getFiles() if jobFiles[0]["lfn"] in goldenFilesA: goldenFiles = goldenFilesA elif jobFiles[0]["lfn"] in goldenFilesB: goldenFiles = goldenFilesB else: goldenFiles = goldenFilesC currentRun = 0 currentLumi = 0 currentEvent = 0 for file in jobFiles: self.assertTrue(file["lfn"] in goldenFiles, "Error: Unknown file in merge jobs.") self.assertTrue( file["locations"] == set(["somese.cern.ch"]), "Error: File is missing a location: %s" % file["locations"]) goldenFiles.remove(file["lfn"]) fileRun = list(file["runs"])[0].run fileLumi = min(list(file["runs"])[0]) fileEvent = file["first_event"] if currentRun == 0: currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent continue self.assertTrue( fileRun >= currentRun, "ERROR: Files not sorted by run: %s, %s" % (fileRun, currentRun)) if fileRun == currentRun: self.assertTrue(fileLumi >= currentLumi, "ERROR: Files not ordered by lumi") if fileLumi == currentLumi: self.assertTrue( fileEvent >= currentEvent, "ERROR: Files not ordered by first event") currentRun = fileRun currentLumi = fileLumi currentEvent = fileEvent self.assertTrue( len(goldenFilesA) == 0 and len(goldenFilesB) == 0 and len(goldenFilesC) == 0, "ERROR: Files missing from merge jobs.") return def testLocationMerging(self): """ _testLocationMerging_ Verify that files residing on different SEs are not merged together in the same job. """ self.stuffWMBS() locationAction = self.daoFactory(classname="Locations.New") locationAction.execute(siteName="s2", seName="somese2.cern.ch") fileSite2 = File(lfn="fileSite2", size=4098, events=1024, first_event=0, locations=set(["somese2.cern.ch"])) fileSite2.addRun(Run(1, *[46])) fileSite2.create() self.mergeFileset.addFile(fileSite2) self.mergeFileset.commit() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.mergeSubscription) result = jobFactory(min_merge_size=4097, max_merge_size=99999999, max_merge_events=999999999, merge_across_runs=False) assert len(result) == 1, \ "ERROR: More than one JobGroup returned." assert len(result[0].jobs) == 3, \ "ERROR: Three jobs should have been returned." for job in result[0].jobs: firstInputFile = job.getFiles()[0] baseLocation = list(firstInputFile["locations"])[0] for inputFile in job.getFiles(): assert len(inputFile["locations"]) == 1, \ "Error: Wrong number of locations" assert list(inputFile["locations"])[0] == baseLocation, \ "Error: Wrong location." return def testMaxWaitTime(self): """ _testMaxWaitTime_ Set the max wait times to be negative - this should force all files to merge out immediately Using the first setup as the first merge test which should normally produce no jobGroups """ self.stuffWMBS() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.mergeSubscription) result = jobFactory(min_merge_size=200000, max_merge_size=2000000000, max_merge_events=200000000, max_wait_time=-10) # Everything should be in one, small jobGroup self.assertEqual(len(result), 1) self.assertEqual(len(result[0].jobs), 1) job = result[0].jobs[0] # All files should be in one job self.assertEqual(len(job.getFiles()), 11) return def testDifferentSubscritionIDs(self): """ _testDifferentSubscriptionIDs_ Make sure that the merge splitting still runs if the subscription ID is not equal to the workflow ID. """ myThread = threading.currentThread() myThread.transaction.begin() dummyWorkflow = Workflow(name="dummyWorkflow", spec="bunk49", owner="Steve", task="Test2") dummyWorkflow.create() dummyFileset = Fileset(name="dummyFileset") dummyFileset.create() dummySubscription1 = Subscription(fileset=dummyFileset, workflow=dummyWorkflow, split_algo="ParentlessMergeBySize") dummySubscription2 = Subscription(fileset=dummyFileset, workflow=dummyWorkflow, split_algo="ParentlessMergeBySize") dummySubscription1.create() dummySubscription2.create() myThread.transaction.commit() self.stuffWMBS() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.mergeSubscription) result = jobFactory(min_merge_size=4097, max_merge_size=99999999, max_merge_events=999999999, merge_across_runs=False) self.assertEqual(len(result), 1) jobGroup = result[0] self.assertEqual(len(jobGroup.jobs), 2) return
class RepackMergeTest(unittest.TestCase): """ _RepackMergeTest_ Test for RepackMerge job splitter """ def setUp(self): """ _setUp_ """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer", "T0.WMBS"]) self.splitterFactory = SplitterFactory(package = "T0.JobSplitting") myThread = threading.currentThread() daoFactory = DAOFactory(package = "T0.WMBS", logger = logging, dbinterface = myThread.dbi) wmbsDaoFactory = DAOFactory(package = "WMCore.WMBS", logger = logging, dbinterface = myThread.dbi) myThread.dbi.processData("""INSERT INTO wmbs_location (id, site_name, state, state_time) VALUES (1, 'SomeSite', 1, 1) """, transaction = False) myThread.dbi.processData("""INSERT INTO wmbs_pnns (id, pnn) VALUES (2, 'SomePNN') """, transaction = False) myThread.dbi.processData("""INSERT INTO wmbs_location_pnns (location, pnn) VALUES (1, 2) """, transaction = False) insertRunDAO = daoFactory(classname = "RunConfig.InsertRun") insertRunDAO.execute(binds = { 'RUN' : 1, 'HLTKEY' : "someHLTKey" }, transaction = False) insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection") insertLumiDAO.execute(binds = { 'RUN' : 1, 'LUMI' : 1 }, transaction = False) insertLumiDAO.execute(binds = { 'RUN' : 1, 'LUMI' : 2 }, transaction = False) insertLumiDAO.execute(binds = { 'RUN' : 1, 'LUMI' : 3 }, transaction = False) insertLumiDAO.execute(binds = { 'RUN' : 1, 'LUMI' : 4 }, transaction = False) insertLumiDAO.execute(binds = { 'RUN' : 1, 'LUMI' : 5 }, transaction = False) insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream") insertStreamDAO.execute(binds = { 'STREAM' : "A" }, transaction = False) insertCMSSVersionDAO = daoFactory(classname = "RunConfig.InsertCMSSWVersion") insertCMSSVersionDAO.execute(binds = { 'VERSION' : "CMSSW_4_2_7" }, transaction = False) insertStreamCMSSWVersionDAO = daoFactory(classname = "RunConfig.InsertStreamCMSSWVersion") insertStreamCMSSWVersionDAO.execute(binds = { 'RUN' : 1, 'STREAM' : 'A', 'VERSION' : "CMSSW_4_2_7" }, transaction = False) insertStreamerDAO = daoFactory(classname = "RunConfig.InsertStreamer") insertStreamerDAO.execute(streamerPNN = "SomePNN", binds = { 'RUN' : 1, 'P5_ID' : 1, 'LUMI' : 4, 'STREAM' : "A", 'LFN' : "/testLFN/A", 'FILESIZE' : 100, 'EVENTS' : 100, 'TIME' : int(time.time()) }, transaction = False) insertStreamFilesetDAO = daoFactory(classname = "RunConfig.InsertStreamFileset") insertStreamFilesetDAO.execute(1, "A", "TestFileset1") self.fileset1 = Fileset(name = "TestFileset1") self.fileset2 = Fileset(name = "TestFileset2") self.fileset1.load() self.fileset2.create() workflow1 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow1", task="Test") workflow2 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow2", task="Test") workflow1.create() workflow2.create() self.subscription1 = Subscription(fileset = self.fileset1, workflow = workflow1, split_algo = "Repack", type = "Repack") self.subscription2 = Subscription(fileset = self.fileset2, workflow = workflow2, split_algo = "RepackMerge", type = "RepackMerge") self.subscription1.create() self.subscription2.create() myThread.dbi.processData("""INSERT INTO wmbs_workflow_output (WORKFLOW_ID, OUTPUT_IDENTIFIER, OUTPUT_FILESET) VALUES (%d, 'SOMEOUTPUT', %d) """ % (workflow1.id, self.fileset2.id), transaction = False) # keep for later self.insertSplitLumisDAO = daoFactory(classname = "JobSplitting.InsertSplitLumis") self.insertClosedLumiDAO = daoFactory(classname = "RunLumiCloseout.InsertClosedLumi") self.feedStreamersDAO = daoFactory(classname = "Tier0Feeder.FeedStreamers") self.acquireFilesDAO = wmbsDaoFactory(classname = "Subscriptions.AcquireFiles") self.completeFilesDAO = wmbsDaoFactory(classname = "Subscriptions.CompleteFiles") self.currentTime = int(time.time()) # default split parameters self.splitArgs = {} self.splitArgs['minInputSize'] = 2.1 * 1024 * 1024 * 1024 self.splitArgs['maxInputSize'] = 4.0 * 1024 * 1024 * 1024 self.splitArgs['maxInputEvents'] = 100000000 self.splitArgs['maxInputFiles'] = 1000 self.splitArgs['maxEdmSize'] = 20 * 1024 * 1024 * 1024 self.splitArgs['maxOverSize'] = 10 * 1024 * 1024 * 1024 self.SplitArgs['maxLatency'] = 50000 return def tearDown(self): """ _tearDown_ """ self.testInit.clearDatabase() return def deleteSplitLumis(self): """ _deleteSplitLumis_ """ myThread = threading.currentThread() myThread.dbi.processData("""DELETE FROM lumi_section_split_active """, transaction = False) return def test00(self): """ _test00_ Test that the job name prefix feature works Test max edm size threshold for single lumi small lumi, followed by over-large lumi expect 1 job for small lumi and 4 jobs for over-large """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2]: for i in range(2 * lumi): newFile = File(makeUUID(), size = 1000 * lumi * lumi, events = 100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave = False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription2) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxEdmSize'] = 13000 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 3, "ERROR: JobFactory didn't create three jobs") job = jobGroups[0].jobs[0] self.assertTrue(job['name'].startswith("RepackMerge-"), "ERROR: Job has wrong name") self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") job = jobGroups[0].jobs[1] self.assertEqual(len(job.getFiles()), 3, "ERROR: Job does not process 3 files") job = jobGroups[0].jobs[2] self.assertEqual(len(job.getFiles()), 1, "ERROR: Job does not process 1 file") return def test01(self): """ _test01_ Test max size threshold for single lumi small lumi, followed by large lumi expect 1 job for small lumi and 1 job for large """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2]: for i in range(2): newFile = File(makeUUID(), size = 1000 * lumi, events = 100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave = False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription2) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxInputSize'] = 3000 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 2, "ERROR: JobFactory didn't create two jobs") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") job = jobGroups[0].jobs[1] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") return def test02(self): """ _test02_ Test max event threshold for single lumi small lumi, followed by large lumi expect 1 job for small lumi and 1 job for large """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2]: for i in range(2): newFile = File(makeUUID(), size = 1000, events = 100 * lumi) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave = False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription2) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxInputEvents'] = 300 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 2, "ERROR: JobFactory didn't create two jobs") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") job = jobGroups[0].jobs[1] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") return def test03(self): """ _test03_ Test max input files threshold for single lumi small lumi, followed by large lumi expect 1 job for small lumi and 1 job for large """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2]: for i in range(lumi * 2): newFile = File(makeUUID(), size = 1000, events = 100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave = False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription2) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxInputFiles'] = 3 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 2, "ERROR: JobFactory didn't create two jobs") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") job = jobGroups[0].jobs[1] self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") return def test04(self): """ _test04_ Test max size threshold for multi lumi 3 same size lumis """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2, 3]: for i in range(2): newFile = File(makeUUID(), size = 1000, events = 100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave = False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription2) mySplitArgs['minInputSize'] = 3000 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxInputSize'] = 5000 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") self.fileset2.markOpen(False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") return def test05(self): """ _test05_ Test max event threshold for multi lumi 3 same size lumis """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2, 3]: for i in range(2): newFile = File(makeUUID(), size = 1000, events = 100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave = False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription2) mySplitArgs['minInputSize'] = 3000 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxInputEvents'] = 500 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") self.fileset2.markOpen(False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") return def test06(self): """ _test06_ Test max input files threshold for multi lumi 3 same size lumis """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2, 3]: for i in range(2): newFile = File(makeUUID(), size = 1000, events = 100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave = False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription2) mySplitArgs['minInputSize'] = 3000 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxInputFiles'] = 5 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") self.fileset2.markOpen(False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") return def test07(self): """ _test07_ Test over merge one small lumi, one large lumi (small below min size, large below max size, but both together above max size) """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2]: for i in range(2): newFile = File(makeUUID(), size = 1000 * lumi * lumi, events = 100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave = False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription2) mySplitArgs['minInputSize'] = 3000 mySplitArgs['maxInputSize'] = 9000 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") return def test08(self): """ _test08_ Test under merge (over merge size threshold) one small lumi, one large lumi (small below min size, large below max size, but both together above max size) """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2]: for i in range(2): newFile = File(makeUUID(), size = 1000 * lumi * lumi, events = 100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave = False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription2) mySplitArgs['minInputSize'] = 3000 mySplitArgs['maxInputSize'] = 9000 mySplitArgs['maxOverSize'] = 9500 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") self.fileset2.markOpen(False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") return def test09(self): """ _test09_ Test under merge (over merge event threshold) one small lumi, one large lumi (small below min size, large below max size, but both together above max size) It was changed due to maxinputevents not being used anymore. """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2]: for i in range(2): newFile = File(makeUUID(), size = 1000 * lumi * lumi, events = 100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave = False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription2) mySplitArgs['minInputSize'] = 1500 mySplitArgs['maxInputSize'] = 9000 mySplitArgs['maxOverSize'] = 9500 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") self.fileset2.markOpen(False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") return def test10(self): """ _test10_ Test merging of multiple lumis with holes in the lumi sequence Hole is due to no streamer files for the lumi Multi lumi input It only works with a single hole, as it creates a merged file even with it being of a smaller size than the mininputsize. It was changed due to the maxinputevents not being used anymore """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2, 4]: for i in range(2): newFile = File(makeUUID(), size = 1000, events = 100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomePNN", immediateSave = False) newFile.create() self.fileset2.addFile(newFile) self.fileset2.commit() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription2) mySplitArgs['minInputSize'] = 100000 mySplitArgs['maxInputSize'] = 200000 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") self.insertClosedLumiDAO.execute(binds = { 'RUN' : 1, 'LUMI' : 3, 'STREAM' : "A", 'FILECOUNT' : 0, 'INSERT_TIME' : self.currentTime, 'CLOSE_TIME' : self.currentTime }, transaction = False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") return
class HeartbeatTest(unittest.TestCase): def setUp(self): """ _setUp_ Setup the database and logging connection. Try to create all of the Heartbeat tables. Also add some dummy locations. """ self.testInit = TestInit(__file__) self.testInit.setLogging() # logLevel = logging.SQLDEBUG self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMCore.Agent.Database"], useDefault=False) self.heartbeat = HeartbeatAPI("testComponent") def tearDown(self): """ _tearDown_ Drop all the Heartbeat tables. """ self.testInit.clearDatabase() def testHeartbeat(self): testComponent = HeartbeatAPI("testComponent") testComponent.registerComponent() self.assertEqual(testComponent.getHeartbeatInfo(), []) testComponent.updateWorkerHeartbeat("testWorker") result = testComponent.getHeartbeatInfo() self.assertEqual(len(result), 1) self.assertEqual(result[0]['worker_name'], "testWorker") time.sleep(1) testComponent.updateWorkerHeartbeat("testWorker2") result = testComponent.getHeartbeatInfo() self.assertEqual(len(result), 1) self.assertEqual(result[0]['worker_name'], "testWorker2") time.sleep(1) testComponent.updateWorkerHeartbeat("testWorker") result = testComponent.getHeartbeatInfo() self.assertEqual(len(result), 1) self.assertEqual(result[0]['worker_name'], "testWorker") testComponent = HeartbeatAPI("test2Component") testComponent.registerComponent() time.sleep(1) testComponent.updateWorkerHeartbeat("test2Worker") result = testComponent.getHeartbeatInfo() self.assertEqual(len(result), 2) self.assertEqual(result[0]['worker_name'], "testWorker") self.assertEqual(result[1]['worker_name'], "test2Worker") time.sleep(1) testComponent.updateWorkerHeartbeat("test2Worker2") result = testComponent.getHeartbeatInfo() self.assertEqual(len(result), 2) self.assertEqual(result[0]['worker_name'], "testWorker") self.assertEqual(result[1]['worker_name'], "test2Worker2") time.sleep(1) testComponent.updateWorkerHeartbeat("test2Worker") result = testComponent.getHeartbeatInfo() self.assertEqual(len(result), 2) self.assertEqual(result[0]['worker_name'], "testWorker") self.assertEqual(result[1]['worker_name'], "test2Worker") testComponent.updateWorkerError("test2Worker", "Error1") result = testComponent.getHeartbeatInfo() self.assertEqual(result[1]['error_message'], "Error1")
class ConditionTest(unittest.TestCase): """ _ExpressTest_ Test for Express job splitter """ def setUp(self): """ _setUp_ """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = ["T0.WMBS"]) self.splitterFactory = SplitterFactory(package = "T0.JobSplitting") myThread = threading.currentThread() daoFactory = DAOFactory(package = "T0.WMBS", logger = logging, dbinterface = myThread.dbi) wmbsDaoFactory = DAOFactory(package = "WMCore.WMBS", logger = logging, dbinterface = myThread.dbi) myThread.dbi.processData("""INSERT INTO wmbs_location (id, site_name, state) VALUES (1, 'SomeSite', 1) """, transaction = False) myThread.dbi.processData("""INSERT INTO wmbs_location_senames (location, se_name) VALUES (1, 'SomeSE') """, transaction = False) insertRunDAO = daoFactory(classname = "RunConfig.InsertRun") insertRunDAO.execute(binds = { 'RUN' : 1, 'TIME' : int(time.time()), 'HLTKEY' : "someHLTKey" }, transaction = False) insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection") insertLumiDAO.execute(binds = { 'RUN' : 1, 'LUMI' : 1 }, transaction = False) insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream") insertStreamDAO.execute(binds = { 'STREAM' : "Express" }, transaction = False) insertStreamFilesetDAO = daoFactory(classname = "RunConfig.InsertStreamFileset") insertStreamFilesetDAO.execute(1, "Express", "TestFileset1") insertStreamerDAO = daoFactory(classname = "RunConfig.InsertStreamer") insertStreamerDAO.execute(binds = { 'RUN' : 1, 'LUMI' : 1, 'STREAM' : "Express", 'TIME' : int(time.time()), 'LFN' : "/streamer", 'FILESIZE' : 0, 'EVENTS' : 0 }, transaction = False) insertPromptCalibrationDAO = daoFactory(classname = "RunConfig.InsertPromptCalibration") insertPromptCalibrationDAO.execute( { 'RUN' : 1, 'STREAM' : "Express" }, transaction = False) self.fileset1 = Fileset(name = "TestFileset1") self.fileset1.create() workflow1 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow1", task="Test") workflow1.create() self.subscription1 = Subscription(fileset = self.fileset1, workflow = workflow1, split_algo = "Condition", type = "Condition") self.subscription1.create() # set parentage chain and sqlite fileset alcaRecoFile = File("/alcareco", size = 0, events = 0) alcaRecoFile.addRun(Run(1, *[1])) alcaRecoFile.setLocation("SomeSE", immediateSave = False) alcaRecoFile.create() alcaPromptFile = File("/alcaprompt", size = 0, events = 0) alcaPromptFile.addRun(Run(1, *[1])) alcaPromptFile.setLocation("SomeSE", immediateSave = False) alcaPromptFile.create() sqliteFile = File("/sqlite", size = 0, events = 0) sqliteFile.create() self.fileset1.addFile(sqliteFile) self.fileset1.commit() results = myThread.dbi.processData("""SELECT lfn FROM wmbs_file_details """, transaction = False)[0].fetchall() setParentageDAO = wmbsDaoFactory(classname = "Files.SetParentage") setParentageDAO.execute(binds = [ { 'parent' : "/streamer", 'child' : "/alcareco" }, { 'parent' : "/alcareco", 'child' : "/alcaprompt" }, { 'parent' : "/alcaprompt", 'child' : "/sqlite" } ], transaction = False) # default split parameters self.splitArgs = {} self.splitArgs['runNumber'] = 1 self.splitArgs['streamName'] = "Express" return def tearDown(self): """ _tearDown_ """ self.testInit.clearDatabase() return def isPromptCalibFinished(self): """ _isPromptCalibFinished_ """ myThread = threading.currentThread() result = myThread.dbi.processData("""SELECT finished FROM prompt_calib """, transaction = False)[0].fetchall()[0][0] return result def countPromptCalibFiles(self): """ _deleteSplitLumis_ """ myThread = threading.currentThread() result = myThread.dbi.processData("""SELECT COUNT(*) FROM prompt_calib_file """, transaction = False)[0].fetchall()[0][0] return result def test00(self): """ _test00_ Make sure the job splitter behaves correctly. Just make sure the job splitter does nothing when the fileset is open and populates t0ast data structures when it's closed. In the later case all input files should be marked as acquired without creating a job as well. """ mySplitArgs = self.splitArgs.copy() jobFactory = self.splitterFactory(package = "WMCore.WMBS", subscription = self.subscription1) self.assertEqual(self.isPromptCalibFinished(), 0, "ERROR: prompt_calib should not be finished") self.assertEqual(self.countPromptCalibFiles(), 0, "ERROR: there should be no prompt_calib_file") jobGroups = jobFactory(**mySplitArgs) self.assertEqual(self.isPromptCalibFinished(), 0, "ERROR: prompt_calib should not be finished") self.assertEqual(self.countPromptCalibFiles(), 1, "ERROR: there should be one prompt_calib_file") self.fileset1.markOpen(False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") self.assertEqual(self.isPromptCalibFinished(), 1, "ERROR: prompt_calib should be finished") self.assertEqual(self.countPromptCalibFiles(), 1, "ERROR: there should be one prompt_calib_file") return
class EventAwareLumiBasedTest(unittest.TestCase): """ _EventAwareLumiBasedTest_ Test event based job splitting. """ def setUp(self): """ _setUp_ Create two subscriptions: One that contains a single file and one that contains multiple files. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMCore.WMBS"], useDefault=False) myThread = threading.currentThread() daofactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi) locationAction = daofactory(classname="Locations.New") locationAction.execute(siteName="T1_US_FNAL", pnn="T1_US_FNAL_Disk") locationAction.execute(siteName="T2_CH_CERN", pnn="T2_CH_CERN") self.testWorkflow = Workflow(spec="spec.xml", owner="Steve", name="wf001", task="Test") self.testWorkflow.create() self.performanceParams = { 'timePerEvent': 12, 'memoryRequirement': 2300, 'sizePerEvent': 400 } return def tearDown(self): """ _tearDown_ Clear out WMBS. """ self.testInit.clearDatabase() return def createSubscription(self, nFiles, lumisPerFile, twoSites=False, nEventsPerFile=100): """ _createSubscription_ Create a subscription for testing """ baseName = makeUUID() testFileset = Fileset(name=baseName) testFileset.create() for i in range(nFiles): newFile = self.createFile('%s_%i' % (baseName, i), nEventsPerFile, i, lumisPerFile, 'T1_US_FNAL_Disk') newFile.create() testFileset.addFile(newFile) if twoSites: for i in range(nFiles): newFile = self.createFile('%s_%i_2' % (baseName, i), nEventsPerFile, i, lumisPerFile, 'T2_CH_CERN') newFile.create() testFileset.addFile(newFile) testFileset.commit() testSubscription = Subscription(fileset=testFileset, workflow=self.testWorkflow, split_algo="EventAwareLumiBased", type="Processing") testSubscription.create() return testSubscription def createFile(self, lfn, events, run, lumis, location, lumiMultiplier=None): """ _createFile_ Create a file for testing """ if lumiMultiplier is None: lumiMultiplier = run newFile = File(lfn=lfn, size=1000, events=events) lumiList = [] for lumi in range(lumis): lumiList.append((lumiMultiplier * lumis) + lumi) newFile.addRun(Run(run, *lumiList)) newFile.setLocation(location) return newFile def testA_FileSplitNoHardLimit(self): """ _testA_FileSplitNoHardLimit_ Simplest use case, there is only a self limit of events per job which the algorithm must adapt to on a file by file basis. At most one file per job so we don't have to pass information between files. """ splitter = SplitterFactory() # Create 5 files with 7 lumi per file and 100 events per lumi on average. testSubscription = self.createSubscription(nFiles=5, lumisPerFile=7, twoSites=False, nEventsPerFile=700) jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) # First test, the optimal settings are 360 events per job # As we have files with 100 events per lumi, this will configure the splitting to # 3.6 lumis per job, which rounds to 3, the algorithm always approximates to the lower integer. jobGroups = jobFactory(halt_job_on_file_boundaries=True, splitOnRun=True, events_per_job=360, performance=self.performanceParams) self.assertEqual(len(jobGroups), 1, "There should be only one job group") jobs = jobGroups[0].jobs self.assertEqual(len(jobs), 15, "There should be 15 jobs") for idx, job in enumerate(jobs, start=1): # Jobs may have 1 lumi or 2 check performance figures accordingly self.assertEqual(job['estimatedMemoryUsage'], 2300) if idx % 3 == 0: self.assertEqual(job['estimatedDiskUsage'], 100 * 400) self.assertEqual(job['estimatedJobTime'], 100 * 12) else: self.assertEqual(job['estimatedDiskUsage'], 3 * 100 * 400) self.assertEqual(job['estimatedJobTime'], 3 * 100 * 12) testSubscription = self.createSubscription(nFiles=5, lumisPerFile=7, twoSites=False, nEventsPerFile=700) jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) # Now set the average to 200 events per job # This results in the algorithm reducing the lumis per job to 2 jobGroups = jobFactory(halt_job_on_file_boundaries=True, splitOnRun=True, events_per_job=200, performance=self.performanceParams) self.assertEqual(len(jobGroups), 1, "There should be only one job group") jobs = jobGroups[0].jobs self.assertEqual(len(jobs), 20, "There should be 20 jobs") for idx, job in enumerate(jobs, start=1): # Jobs may have 1 lumi or 2 check performance figures accordingly self.assertEqual(job['estimatedMemoryUsage'], 2300) if idx % 4 == 0: self.assertEqual(job['estimatedDiskUsage'], 100 * 400) self.assertEqual(job['estimatedJobTime'], 100 * 12) else: self.assertEqual(job['estimatedDiskUsage'], 2 * 100 * 400) self.assertEqual(job['estimatedJobTime'], 2 * 100 * 12) # Check extremes, process a zero event files with lumis. It must be processed in one job testSubscription = self.createSubscription(nFiles=5, lumisPerFile=100, twoSites=False, nEventsPerFile=0) jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) jobGroups = jobFactory(halt_job_on_file_boundaries=True, events_per_job=5000, performance=self.performanceParams) self.assertEqual( len(jobGroups), 0, "There are not enough events, so it should be 0 instead") # we close this fileset to get it moving fileset = testSubscription.getFileset() fileset.markOpen(False) jobGroups = jobFactory(halt_job_on_file_boundaries=True, events_per_job=5000, performance=self.performanceParams) self.assertEqual(len(jobGroups), 1, "There should be only one job group") jobs = jobGroups[0].jobs self.assertEqual(len(jobs), 5, "There should be 5 jobs") for job in jobs: self.assertEqual(job['estimatedMemoryUsage'], 2300) self.assertEqual(job['estimatedDiskUsage'], 0) self.assertEqual(job['estimatedJobTime'], 0) # Process files with 10k events per lumi, fallback to one lumi per job. We can't do better testSubscription = self.createSubscription(nFiles=5, lumisPerFile=5, twoSites=False, nEventsPerFile=50000) jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) jobGroups = jobFactory(halt_job_on_file_boundaries=True, splitOnRun=True, events_per_job=5000, performance=self.performanceParams) self.assertEqual(len(jobGroups), 1, "There should be only one job group") jobs = jobGroups[0].jobs self.assertEqual(len(jobs), 25, "There should be 5 jobs") for job in jobs: self.assertEqual(job['estimatedMemoryUsage'], 2300) self.assertEqual(job['estimatedDiskUsage'], 10000 * 400) self.assertEqual(job['estimatedJobTime'], 10000 * 12) def testB_NoFileSplitNoHardLimit(self): """ _testB_NoFileSplitNoHardLimit_ In this case we don't split on file boundaries, check different combination of files make sure we make the most of the splitting, e.g. include many zero event files in a single job. """ splitter = SplitterFactory() # Create 100 files with 7 lumi per file and 0 events per lumi on average. testSubscription = self.createSubscription(nFiles=100, lumisPerFile=7, twoSites=False, nEventsPerFile=0) jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) # First test, the optimal settings are 360 events per job # As we have files with 0 events per lumi, this will configure the splitting to # a single job containing all files jobGroups = jobFactory(halt_job_on_file_boundaries=False, splitOnRun=False, events_per_job=360, performance=self.performanceParams) self.assertEqual( len(jobGroups), 0, "There aren't enough events, so it should have 0 job groups") # we close this fileset to get it moving fileset = testSubscription.getFileset() fileset.markOpen(False) jobGroups = jobFactory(halt_job_on_file_boundaries=False, splitOnRun=False, events_per_job=360, performance=self.performanceParams) self.assertEqual(len(jobGroups), 1, "There should be only one job group") jobs = jobGroups[0].jobs self.assertEqual(len(jobs), 1, "There should be 1 job") self.assertEqual(len(jobs[0]['input_files']), 100, "All 100 files must be in the job") self.assertEqual(jobs[0]['estimatedMemoryUsage'], 2300) self.assertEqual(jobs[0]['estimatedDiskUsage'], 0) self.assertEqual(jobs[0]['estimatedJobTime'], 0) # Create 7 files, each one with different lumi/event distributions testFileset = Fileset(name="FilesetA") testFileset.create() testFileA = self.createFile("/this/is/file1", 250, 0, 5, "T2_CH_CERN") testFileB = self.createFile("/this/is/file2", 600, 1, 1, "T2_CH_CERN") testFileC = self.createFile("/this/is/file3", 1200, 2, 2, "T2_CH_CERN") testFileD = self.createFile("/this/is/file4", 100, 3, 1, "T2_CH_CERN") testFileE = self.createFile("/this/is/file5", 30, 4, 1, "T2_CH_CERN") testFileF = self.createFile("/this/is/file6", 10, 5, 1, "T2_CH_CERN") testFileG = self.createFile("/this/is/file7", 151, 6, 3, "T2_CH_CERN") testFileset.addFile(testFileA) testFileset.addFile(testFileB) testFileset.addFile(testFileC) testFileset.addFile(testFileD) testFileset.addFile(testFileE) testFileset.addFile(testFileF) testFileset.addFile(testFileG) testFileset.commit() testSubscription = Subscription(fileset=testFileset, workflow=self.testWorkflow, split_algo="EventAwareLumiBased", type="Processing") testSubscription.create() jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) # Optimal settings are: jobs with 150 events per job # This means, the first file must be splitted in 3 lumis per job which would leave room # for another lumi in the second job, but the second file has a lumi too big for that # The 3rd job only contains the second file, the fourth and fifth job split the third file jobGroups = jobFactory(halt_job_on_file_boundaries=False, splitOnRun=False, events_per_job=150, performance=self.performanceParams) self.assertEqual(len(jobGroups), 1, "There should be only one job group") jobs = jobGroups[0].jobs self.assertEqual(len(jobs), 7, "7 jobs must be in the jobgroup") self.assertEqual(jobs[0]["mask"].getRunAndLumis(), {0: [[0, 2]]}, "Wrong mask for the first job") self.assertEqual(jobs[0]["estimatedJobTime"], 150 * 12) self.assertEqual(jobs[0]["estimatedDiskUsage"], 150 * 400) self.assertEqual(jobs[1]["mask"].getRunAndLumis(), {0: [[3, 4]]}, "Wrong mask for the second job") self.assertEqual(jobs[1]["estimatedJobTime"], 100 * 12) self.assertEqual(jobs[1]["estimatedDiskUsage"], 100 * 400) self.assertEqual(jobs[2]["mask"].getRunAndLumis(), {1: [[1, 1]]}, "Wrong mask for the third job") self.assertEqual(jobs[2]["estimatedJobTime"], 600 * 12) self.assertEqual(jobs[2]["estimatedDiskUsage"], 600 * 400) self.assertEqual(jobs[3]["mask"].getRunAndLumis(), {2: [[4, 4]]}, "Wrong mask for the fourth job") self.assertEqual(jobs[3]["estimatedJobTime"], 600 * 12) self.assertEqual(jobs[3]["estimatedDiskUsage"], 600 * 400) self.assertEqual(jobs[4]["mask"].getRunAndLumis(), {2: [[5, 5]]}, "Wrong mask for the fifth job") self.assertEqual(jobs[4]["estimatedJobTime"], 600 * 12) self.assertEqual(jobs[4]["estimatedDiskUsage"], 600 * 400) self.assertEqual(jobs[5]["mask"].getRunAndLumis(), { 3: [[3, 3]], 4: [[4, 4]], 5: [[5, 5]] }, "Wrong mask for the sixth job") self.assertEqual(jobs[5]["estimatedJobTime"], 140 * 12) self.assertEqual(jobs[5]["estimatedDiskUsage"], 140 * 400) self.assertEqual(jobs[6]["mask"].getRunAndLumis(), {6: [[18, 20]]}, "Wrong mask for the seventh job") self.assertEqual(jobs[6]["estimatedJobTime"], 150 * 12) self.assertEqual(jobs[6]["estimatedDiskUsage"], 150 * 400) for job in jobs: self.assertEqual(job["estimatedMemoryUsage"], 2300) # Test interactions of this algorithm with splitOnRun = True # Make 2 files, one with 3 runs and a second one with the last run of the first fileA = File(lfn="/this/is/file1a", size=1000, events=2400) lumiListA = [] lumiListB = [] lumiListC = [] for lumi in range(8): lumiListA.append(1 + lumi) lumiListB.append(1 + lumi) lumiListC.append(1 + lumi) fileA.addRun(Run(1, *lumiListA)) fileA.addRun(Run(2, *lumiListA)) fileA.addRun(Run(3, *lumiListA)) fileA.setLocation("T1_US_FNAL_Disk") fileB = self.createFile('/this/is/file2a', 200, 3, 5, "T1_US_FNAL_Disk") testFileset = Fileset(name='FilesetB') testFileset.create() testFileset.addFile(fileA) testFileset.addFile(fileB) testFileset.commit() testSubscription = Subscription(fileset=testFileset, workflow=self.testWorkflow, split_algo="EventAwareLumiBased", type="Processing") testSubscription.create() jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) # The settings for this splitting are 700 events per job jobGroups = jobFactory(splitOnRun=True, halt_job_on_file_boundaries=False, events_per_job=700, performance=self.performanceParams) self.assertEqual(len(jobGroups), 1, "There should be only one job group") jobs = jobGroups[0].jobs self.assertEqual(len(jobs), 6, "Six jobs must be in the jobgroup") self.assertEqual(jobs[0]["estimatedJobTime"], 700 * 12) self.assertEqual(jobs[0]["estimatedDiskUsage"], 700 * 400) self.assertEqual(jobs[1]["estimatedJobTime"], 100 * 12) self.assertEqual(jobs[1]["estimatedDiskUsage"], 100 * 400) self.assertEqual(jobs[2]["estimatedJobTime"], 700 * 12) self.assertEqual(jobs[2]["estimatedDiskUsage"], 700 * 400) self.assertEqual(jobs[3]["estimatedJobTime"], 100 * 12) self.assertEqual(jobs[3]["estimatedDiskUsage"], 100 * 400) self.assertEqual(jobs[4]["estimatedJobTime"], 700 * 12) self.assertEqual(jobs[4]["estimatedDiskUsage"], 700 * 400) self.assertEqual(jobs[5]["estimatedJobTime"], 300 * 12) self.assertEqual(jobs[5]["estimatedDiskUsage"], 300 * 400) def testC_HardLimitSplitting(self): """ _testC_HardLimitSplitting_ Test that we can specify a event limit, the algorithm shall take single lumi files with more events than the limit and mark them for failure """ splitter = SplitterFactory() # Create 3 files, the one in the middle is a "bad" file testFileset = Fileset(name="FilesetA") testFileset.create() testFileA = self.createFile("/this/is/file1", 1000, 0, 5, "T1_US_FNAL_Disk") testFileB = self.createFile("/this/is/file2", 1000, 1, 1, "T1_US_FNAL_Disk") testFileC = self.createFile("/this/is/file3", 1000, 2, 2, "T1_US_FNAL_Disk") testFileset.addFile(testFileA) testFileset.addFile(testFileB) testFileset.addFile(testFileC) testFileset.commit() testSubscription = Subscription(fileset=testFileset, workflow=self.testWorkflow, split_algo="EventAwareLumiBased", type="Processing") testSubscription.create() jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) # Settings are to split on job boundaries, to fail sing lumis with more than 800 events # and to put 550 events per job jobGroups = jobFactory(halt_job_on_file_boundaries=True, splitOnRun=True, events_per_job=550, max_events_per_lumi=800, performance=self.performanceParams) self.assertEqual(len(jobGroups), 1, "There should be only one job group") jobs = jobGroups[0].jobs self.assertEqual(len(jobs), 6, "Six jobs must be in the jobgroup") self.assertTrue( jobs[3]['failedOnCreation'], "The job processing the second file should me marked for failure") self.assertEqual( jobs[3]['failedReason'], "File /this/is/file2 has too many events (1000) in 1 lumi(s)", "The reason for the failure is not accurate") def testD_HardLimitSplittingOnly(self): """ _testD_HardLimitSplittingOnly_ Checks that we can split a set of files where every file has a single lumi too big to fit in a runnable job """ splitter = SplitterFactory() # Create 3 single-big-lumi files testFileset = Fileset(name="FilesetA") testFileset.create() testFileA = self.createFile("/this/is/file1", 1000, 0, 1, "T1_US_FNAL_Disk") testFileB = self.createFile("/this/is/file2", 1000, 1, 1, "T1_US_FNAL_Disk") testFileC = self.createFile("/this/is/file3", 1000, 2, 1, "T1_US_FNAL_Disk") testFileset.addFile(testFileA) testFileset.addFile(testFileB) testFileset.addFile(testFileC) testFileset.commit() testSubscription = Subscription(fileset=testFileset, workflow=self.testWorkflow, split_algo="EventAwareLumiBased", type="Processing") testSubscription.create() jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) # Settings are to split on job boundaries, to fail sing lumis with more than 800 events # and to put 550 events per job jobGroups = jobFactory(halt_job_on_file_boundaries=True, splitOnRun=True, events_per_job=550, max_events_per_lumi=800, performance=self.performanceParams) self.assertEqual(len(jobGroups), 1, "There should be only one job group") jobs = jobGroups[0].jobs self.assertEqual(len(jobs), 3, "Three jobs must be in the jobgroup") for i in range(1, 4): self.assertTrue( jobs[i - 1]['failedOnCreation'], "The job processing the second file should me marked for failure" ) self.assertEqual( jobs[i - 1]['failedReason'], "File /this/is/file%d has too many events (1000) in 1 lumi(s)" % i, "The reason for the failure is not accurate") return def test_NotEnoughEvents(self): """ _test_NotEnoughEvents_ Checks whether jobs are not created when there are not enough files (actually, events) according to the events_per_job requested to the splitter algorithm """ splitter = SplitterFactory() # Very small fileset (single file) without enough events testSubscription = self.createSubscription(nFiles=1, lumisPerFile=2, nEventsPerFile=200) jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) jobGroups = jobFactory(events_per_job=500, performance=self.performanceParams, splitOnRun=False) self.assertEqual(len(jobGroups), 0) # Still a small fileset (two files) without enough events testSubscription = self.createSubscription(nFiles=2, lumisPerFile=2, nEventsPerFile=200) jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) jobGroups = jobFactory(events_per_job=500, performance=self.performanceParams, splitOnRun=False) self.assertEqual(len(jobGroups), 0) # Finally an acceptable fileset size (three files) with enough events testSubscription = self.createSubscription(nFiles=3, lumisPerFile=2, nEventsPerFile=200) jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) jobGroups = jobFactory(events_per_job=500, performance=self.performanceParams, splitOnRun=False) self.assertEqual(len(jobGroups), 1) jobs = jobGroups[0].jobs self.assertEqual(len(jobs), 2) self.assertEqual(len(jobs[0]['input_files']), 3) self.assertEqual(len(jobs[1]['input_files']), 1) self.assertEqual(jobs[0]['mask'].getRunAndLumis(), { 0: [[0, 1]], 1: [[2, 3]], 2: [[4, 4]] }) self.assertEqual(jobs[1]['mask'].getRunAndLumis(), {2: [[5, 5]]}) # Test fileset with a single run and splitOnRun=True testFileset = Fileset(name="FilesetA") testFileA = self.createFile("/this/is/file1", 200, 1, 2, "T1_US_FNAL_Disk", lumiMultiplier=0) testFileB = self.createFile("/this/is/file2", 200, 1, 2, "T1_US_FNAL_Disk", lumiMultiplier=1) testFileC = self.createFile("/this/is/file3", 200, 1, 2, "T1_US_FNAL_Disk", lumiMultiplier=2) testFileset.addFile(testFileA) testFileset.addFile(testFileB) testFileset.addFile(testFileC) testFileset.create() testSubscription = Subscription(fileset=testFileset, workflow=self.testWorkflow, split_algo="EventAwareLumiBased", type="Processing") testSubscription.create() jobFactory = splitter(package="WMCore.WMBS", subscription=testSubscription) jobGroups = jobFactory(events_per_job=500, performance=self.performanceParams) self.assertEqual(len(jobGroups), 1) jobs = jobGroups[0].jobs self.assertEqual(len(jobs), 2) self.assertEqual(len(jobs[0]['input_files']), 3) self.assertEqual(len(jobs[1]['input_files']), 1) self.assertEqual(jobs[0]['mask'].getRunAndLumis(), {1: [[0, 1], [2, 3], [4, 4]]}) self.assertEqual(jobs[1]['mask'].getRunAndLumis(), {1: [[5, 5]]}) return
class FixedDelayTest(unittest.TestCase): def setUp(self): """ _setUp_ Create two subscriptions: One that contains a single file and one that contains multiple files. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMCore.WMBS"], useDefault=False) myThread = threading.currentThread() daofactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi) locationAction = daofactory(classname="Locations.New") locationAction.execute(siteName="site1", pnn="T2_CH_CERN") self.multipleFileFileset = Fileset(name="TestFileset1") self.multipleFileFileset.create() for i in range(10): newFile = File(makeUUID(), size=1000, events=100, locations=set(["T2_CH_CERN"])) newFile.addRun(Run(i, *[45 + i])) newFile.create() self.multipleFileFileset.addFile(newFile) self.multipleFileFileset.commit() self.singleFileFileset = Fileset(name="TestFileset2") self.singleFileFileset.create() newFile = File("/some/file/name", size=1000, events=100, locations=set(["T2_CH_CERN"])) newFile.addRun(Run(1, *[45])) newFile.create() self.singleFileFileset.addFile(newFile) self.singleFileFileset.commit() self.multipleFileLumiset = Fileset(name="TestFileset3") self.multipleFileLumiset.create() for i in range(10): newFile = File(makeUUID(), size=1000, events=100, locations=set(["T2_CH_CERN"])) newFile.addRun(Run(1, *[45 + i / 3])) newFile.create() self.multipleFileLumiset.addFile(newFile) self.multipleFileLumiset.commit() self.singleLumiFileset = Fileset(name="TestFileset4") self.singleLumiFileset.create() for i in range(10): newFile = File(makeUUID(), size=1000, events=100, locations=set(["T2_CH_CERN"])) newFile.addRun(Run(1, *[45])) newFile.create() self.singleLumiFileset.addFile(newFile) self.singleLumiFileset.commit() testWorkflow = Workflow(spec="spec.xml", owner="mnorman", name="wf001", task="Test") testWorkflow.create() self.multipleFileSubscription = Subscription( fileset=self.multipleFileFileset, workflow=testWorkflow, split_algo="FixedDelay", type="Processing") self.singleFileSubscription = Subscription( fileset=self.singleFileFileset, workflow=testWorkflow, split_algo="FixedDelay", type="Processing") self.multipleLumiSubscription = Subscription( fileset=self.multipleFileLumiset, workflow=testWorkflow, split_algo="FixedDelay", type="Processing") self.singleLumiSubscription = Subscription( fileset=self.singleLumiFileset, workflow=testWorkflow, split_algo="FixedDelay", type="Processing") self.multipleFileSubscription.create() self.singleFileSubscription.create() self.multipleLumiSubscription.create() self.singleLumiSubscription.create() return def tearDown(self): """ _tearDown_ Nothing to do... """ self.testInit.clearDatabase() return def testNone(self): """ _testNone_ Since the subscriptions are open, we shouldn't get any jobs back """ splitter = SplitterFactory() jobFactory = splitter(self.singleFileSubscription) jobGroups = jobFactory(trigger_time=int(time.time()) * 2) self.assertEquals(jobGroups, [], "Should have returned a null set") jobFactory = splitter(self.multipleFileSubscription) jobGroups = jobFactory(trigger_time=int(time.time()) * 2) self.assertEquals(jobGroups, [], "Should have returned a null set") jobFactory = splitter(self.multipleLumiSubscription) jobGroups = jobFactory(trigger_time=int(time.time()) * 2) self.assertEquals(jobGroups, [], "Should have returned a null set") jobFactory = splitter(self.singleLumiSubscription) jobGroups = jobFactory(trigger_time=int(time.time()) * 2) self.assertEquals(jobGroups, [], "Should have returned a null set") return def testClosed(self): """ _testClosed_ Since the subscriptions are closed and none of the files have been acquired, all of the files should show up """ splitter = SplitterFactory() self.singleFileSubscription.getFileset().markOpen(False) jobFactory = splitter(self.singleFileSubscription) jobGroups = jobFactory(trigger_time=1) assert len(jobGroups) == 1, \ "ERROR: JobFactory didn't return one JobGroup." assert len(jobGroups[0].jobs) == 1, \ "ERROR: JobFactory didn't create a single job." job = jobGroups[0].jobs.pop() assert job.getFiles(type = "lfn") == ["/some/file/name"], \ "ERROR: Job contains unknown files." self.multipleFileSubscription.getFileset().markOpen(False) jobFactory = splitter(self.multipleFileSubscription) jobGroups = jobFactory(trigger_time=1) self.assertEquals(len(jobGroups), 1) self.assertEquals(len(jobGroups[0].jobs), 1) myfiles = jobGroups[0].jobs[0].getFiles() self.assertEquals(len(myfiles), 10) self.multipleLumiSubscription.getFileset().markOpen(False) jobFactory = splitter(self.multipleLumiSubscription) jobGroups = jobFactory(trigger_time=1) self.assertEquals(len(jobGroups), 1) self.assertEquals(len(jobGroups[0].jobs), 1) myfiles = jobGroups[0].jobs[0].getFiles() self.assertEquals(len(myfiles), 10) #self.assertEquals(jobGroups, [], "Should have returned a null set") self.singleLumiSubscription.getFileset().markOpen(False) jobFactory = splitter(self.singleLumiSubscription) jobGroups = jobFactory(trigger_time=1) assert len(jobGroups) == 1, \ "ERROR: JobFactory didn't return one JobGroup." assert len(jobGroups[0].jobs) == 1, \ "ERROR: JobFactory didn't create a single job." myfiles = jobGroups[0].jobs[0].getFiles() self.assertEquals(len(myfiles), 10) def testAllAcquired(self): """ _testAllAcquired_ should all return no job groups """ splitter = SplitterFactory() self.singleFileSubscription.acquireFiles( self.singleFileSubscription.availableFiles()) jobFactory = splitter(self.singleFileSubscription) jobGroups = jobFactory(trigger_time=1) self.assertEquals(jobGroups, [], "Should have returned a null set") self.multipleFileSubscription.acquireFiles( self.multipleFileSubscription.availableFiles()) jobFactory = splitter(self.multipleFileSubscription) jobGroups = jobFactory(trigger_time=1) self.assertEquals(jobGroups, [], "Should have returned a null set") self.multipleLumiSubscription.acquireFiles( self.multipleLumiSubscription.availableFiles()) jobFactory = splitter(self.multipleLumiSubscription) jobGroups = jobFactory(trigger_time=1) self.assertEquals(jobGroups, [], "Should have returned a null set") self.singleLumiSubscription.acquireFiles( self.singleLumiSubscription.availableFiles()) jobFactory = splitter(self.singleLumiSubscription) jobGroups = jobFactory(trigger_time=1) self.assertEquals(jobGroups, [], "Should have returned a null set") def testClosedSomeAcquired(self): """ _testClosedSomeAcquired_ since the subscriptions are closed and none of the files ahve been acquired, all of the files should show up """ splitter = SplitterFactory() self.multipleFileSubscription.getFileset().markOpen(False) self.singleFileSubscription.acquireFiles( [self.singleFileSubscription.availableFiles().pop()]) jobFactory = splitter(self.singleFileSubscription) jobGroups = jobFactory(trigger_time=1) self.assertEquals(jobGroups, [], "Should have returned a null set") self.multipleFileSubscription.getFileset().markOpen(False) self.multipleFileSubscription.acquireFiles( [self.multipleFileSubscription.availableFiles().pop()]) jobFactory = splitter(package="WMCore.WMBS", subscription=self.multipleFileSubscription) jobGroups = jobFactory(trigger_time=1) self.assertEquals(len(jobGroups), 1, "Should have gotten one jobGroup") self.assertEquals(len(jobGroups[0].jobs), 1, \ "JobFactory should have made one job") myfiles = jobGroups[0].jobs[0].getFiles() self.assertEquals(len(myfiles), 9, \ "JobFactory should have provides us with 9 files") self.multipleLumiSubscription.getFileset().markOpen(False) self.multipleLumiSubscription.acquireFiles( [self.multipleLumiSubscription.availableFiles().pop()]) jobFactory = splitter(self.multipleLumiSubscription) jobGroups = jobFactory(trigger_time=1) self.assertEquals(len(jobGroups), 1, "Should have gotten one jobGroup") self.assertEquals(len(jobGroups[0].jobs), 1, \ "JobFactory should have made one job") myfiles = jobGroups[0].jobs[0].getFiles() self.assertEquals(len(myfiles), 9, \ "JobFactory should have provides us with 9 files") self.singleLumiSubscription.getFileset().markOpen(False) self.singleLumiSubscription.acquireFiles( [self.singleLumiSubscription.availableFiles().pop()]) jobFactory = splitter(self.singleLumiSubscription) jobGroups = jobFactory(trigger_time=1) self.assertEquals(len(jobGroups), 1, "Should have gotten one jobGroup") self.assertEquals(len(jobGroups[0].jobs), 1, \ "JobFactory should have made one job") myfiles = jobGroups[0].jobs[0].getFiles() self.assertEquals(len(myfiles), 9, \ "JobFactory should have provides us with 9 files") self.assertEquals(len(myfiles), 9)
class DBCoreTest(unittest.TestCase): def setUp(self): self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMQuality.TestDB"], useDefault=False) return def tearDown(self): """ Delete the databases """ self.testInit.clearDatabase() return def testBuildBinds(self): """ Test class for DBCore.buildbinds() """ #This class may become obselete soon. There is a TODO stuck onto DBCore.buildbinds() #This just checks to see that the sequence properly packages the first value is set #So that it sets the key seqname to the proper name in the files list #Also right now tests that it sets the keys right in each dict, but this seems redundant # -mnorman seqname = 'file' dictbinds = {'lumi': 123, 'test': 100} files = ['testA', 'testB', 'testC'] myThread = threading.currentThread() testInterface = myThread.dbi binds = testInterface.buildbinds(files, seqname, dictbinds) #This should return a dict for every value in files with additional elements #from dictbinds. We then loop over every dict (which should be equal to the #number of elements in files), and look to see that the filename matches and that #At least one of the dictbinds keys matches to its proper element for i in range(len(files)): self.assertEqual(binds[i][seqname], files[i]) self.assertEqual(binds[i][dictbinds.keys()[0]], dictbinds[dictbinds.keys()[0]]) return def testProcessDataNoBinds(self): """ _testProcessDataNoBinds_ Verify that insert and select queries work when no binds are used. """ insertSQL = "INSERT INTO test_tablea VALUES (1, 2, 'three')" selectSQL = "SELECT column1, column2, column3 from test_tablea" myThread = threading.currentThread() myThread.dbi.processData(insertSQL) resultSets = myThread.dbi.processData(selectSQL) assert len(resultSets) == 1, \ "Error: Wrong number of ResultSets returned." results = resultSets[0].fetchall() assert len(results) == 1, \ "Error: Wrong number of rows returned." assert len(results[0]) == 3, \ "Error: Wrong number of columns returned." assert results[0][0] == 1, \ "Error: Column one is wrong." assert results[0][1] == 2, \ "Error: Column two is wrong." assert results[0][2] == "three", \ "Error: Column three is wrong." return def testProcessDataOneBind(self): """ _testProcessDataOneBind_ Verify that insert and select queries work with one set of bind variables. """ bindsA = {"one": 1, "two": 2, "three": "three"} bindsB = {"one": 3, "two": 2, "three": "one"} insertSQL = "INSERT INTO test_tablea VALUES (:one, :two, :three)" selectSQL = \ """SELECT column1, column2, column3 FROM test_tablea WHERE column1 = :one AND column2 = :two AND column3 = :three""" myThread = threading.currentThread() myThread.dbi.processData(insertSQL, binds=bindsA) myThread.dbi.processData(insertSQL, binds=bindsB) resultSets = myThread.dbi.processData(selectSQL, bindsA) assert len(resultSets) == 1, \ "Error: Wrong number of ResultSets returned." results = resultSets[0].fetchall() assert len(results) == 1, \ "Error: Wrong number of rows returned." assert len(results[0]) == 3, \ "Error: Wrong number of columns returned." assert results[0][0] == 1, \ "Error: Column one is wrong." assert results[0][1] == 2, \ "Error: Column two is wrong." assert results[0][2] == "three", \ "Error: Column three is wrong." resultSets = myThread.dbi.processData(selectSQL, bindsB) assert len(resultSets) == 1, \ "Error: Wrong number of ResultSets returned." results = resultSets[0].fetchall() assert len(results) == 1, \ "Error: Wrong number of rows returned." assert len(results[0]) == 3, \ "Error: Wrong number of columns returned." assert results[0][0] == 3, \ "Error: Column one is wrong." assert results[0][1] == 2, \ "Error: Column two is wrong." assert results[0][2] == "one", \ "Error: Column three is wrong." return def testProcessDataSeveralBinds(self): """ _testProcessDataSeveralBinds_ Verify that insert and select queries work with several binds. """ bindsA = [{ "one": 1, "two": 2, "three": "three" }, { "one": 3, "two": 2, "three": "one" }, { "one": 4, "two": 5, "three": "six" }, { "one": 6, "two": 5, "three": "four" }] bindsB = [{ "one": 10, "two": 11, "three": "twelve" }, { "one": 12, "two": 11, "three": "ten" }] insertSQL = "INSERT INTO test_tablea VALUES (:one, :two, :three)" selectSQL = \ """SELECT column1, column2, column3 FROM test_tablea WHERE column1 = :one AND column2 = :two AND column3 = :three""" myThread = threading.currentThread() myThread.dbi.processData(insertSQL, binds=bindsA) myThread.dbi.processData(insertSQL, binds=bindsB) resultSets = myThread.dbi.processData(selectSQL, bindsA) assert len(resultSets) == 1, \ "Error: Wrong number of ResultSets returned." results = resultSets[0].fetchall() assert len(results) == 4, \ "Error: Wrong number of rows returned." for result in results: assert len(result) == 3, \ "Error: Wrong number of columns returned." for bind in bindsA: if bind["one"] == result[0] and bind["two"] == result[1] and \ bind["three"] == result[2]: bindsA.remove(bind) break assert len(bindsA) == 0, \ "Error: Missing rows from select." resultSets = myThread.dbi.processData(selectSQL, bindsB) assert len(resultSets) == 1, \ "Error: Wrong number of ResultSets returned." results = resultSets[0].fetchall() assert len(results) == 2, \ "Error: Wrong number of rows returned." for result in results: assert len(result) == 3, \ "Error: Wrong number of columns returned." for bind in bindsB: if bind["one"] == result[0] and bind["two"] == result[1] and \ bind["three"] == result[2]: bindsB.remove(bind) break assert len(bindsB) == 0, \ "Error: Missing rows from select." return def testProcessDataHugeBinds(self): """ _testProcessDataHugeBinds_ Verify that select and insert queries work with several thousand binds. """ bindsA = [] bindsB = [] for i in range(3001): bindsA.append({"one": i, "two": i * 2, "three": str(i * 3)}) for i in range(1501): bindsB.append({"one": (i + 1) * 2, "two": i, "three": str(i * 5)}) insertSQL = "INSERT INTO test_tablea VALUES (:one, :two, :three)" selectSQL = \ """SELECT column1, column2, column3 FROM test_tablea WHERE column1 = :one AND column2 = :two AND column3 = :three""" myThread = threading.currentThread() myThread.dbi.processData(insertSQL, binds=bindsA) myThread.dbi.processData(insertSQL, binds=bindsB) resultSets = myThread.dbi.processData(selectSQL, bindsA) results = [] for resultSet in resultSets: results.extend(resultSet.fetchall()) assert len(results) == 3001, \ "Error: Wrong number of rows returned: %d" % len(results) for result in results: assert len(result) == 3, \ "Error: Wrong number of columns returned." for bind in bindsA: if bind["one"] == result[0] and bind["two"] == result[1] and \ bind["three"] == result[2]: bindsA.remove(bind) break assert len(bindsA) == 0, \ "Error: Missing rows from select." resultSets = myThread.dbi.processData(selectSQL, bindsB) results = [] for resultSet in resultSets: results.extend(resultSet.fetchall()) assert len(results) == 1501, \ "Error: Wrong number of rows returned." for result in results: assert len(result) == 3, \ "Error: Wrong number of columns returned." for bind in bindsB: if bind["one"] == result[0] and bind["two"] == result[1] and \ bind["three"] == result[2]: bindsB.remove(bind) break assert len(bindsB) == 0, \ "Error: Missing rows from select." return def testInsertHugeNumber(self): """ _testInsertHugeNumber_ Verify that we can insert and select huge numbers. """ insertSQL = "INSERT INTO test_bigcol VALUES(:val1)" selectSQL = "SELECT * FROM test_bigcol" bindsA = {"val1": 2012211901} bindsB = {"val1": 20122119010} myThread = threading.currentThread() myThread.dbi.processData(insertSQL, binds=bindsA) myThread.dbi.processData(insertSQL, binds=bindsB) resultSets = myThread.dbi.processData(selectSQL) results = [] for resultSet in resultSets: for row in resultSet.fetchall(): results.append(row[0]) assert len(results) == 2, \ "Error: Wrong number of results." assert bindsA["val1"] in results, \ "Error: Value one is missing." assert bindsB["val1"] in results, \ "Error: Value one is missing." return
class EventBasedTest(unittest.TestCase): """ _EventBasedTest_ Test event based job splitting. """ def setUp(self): """ _setUp_ Create two subscriptions: One that contains a single file and one that contains multiple files. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMCore.WMBS"], useDefault=False) myThread = threading.currentThread() daofactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi) locationAction = daofactory(classname="Locations.New") locationAction.execute(siteName="site1", pnn="T2_CH_CERN") self.multipleFileFileset = Fileset(name="TestFileset1") self.multipleFileFileset.create() for i in range(10): newFile = File(makeUUID(), size=1000, events=100, locations="T2_CH_CERN") newFile.addRun(Run(i, *[45 + i])) newFile.create() self.multipleFileFileset.addFile(newFile) self.multipleFileFileset.commit() self.singleFileFileset = Fileset(name="TestFileset2") self.singleFileFileset.create() newFile = File("/some/file/name", size=1000, events=100, locations="T2_CH_CERN") newFile.addRun(Run(1, *[45])) newFile.create() self.singleFileFileset.addFile(newFile) self.singleFileFileset.commit() self.multipleFileRunset = Fileset(name="TestFileset3") self.multipleFileRunset.create() for i in range(10): newFile = File(makeUUID(), size=1000, events=100, locations="T2_CH_CERN") newFile.addRun(Run(i / 3, *[45])) newFile.create() self.multipleFileRunset.addFile(newFile) self.multipleFileRunset.commit() self.singleRunFileset = Fileset(name="TestFileset4") self.singleRunFileset.create() for i in range(10): newFile = File(makeUUID(), size=1000, events=100, locations="T2_CH_CERN") newFile.addRun(Run(1, *[45])) newFile.create() self.singleRunFileset.addFile(newFile) self.singleRunFileset.commit() self.singleRunMultipleLumi = Fileset(name="TestFileset5") self.singleRunMultipleLumi.create() for i in range(10): newFile = File(makeUUID(), size=1000, events=100, locations="T2_CH_CERN") newFile.addRun(Run(1, *[45 + i])) newFile.create() self.singleRunMultipleLumi.addFile(newFile) self.singleRunMultipleLumi.commit() testWorkflow = Workflow(spec="spec.xml", owner="mnorman", name="wf001", task="Test") testWorkflow.create() self.multipleFileSubscription = Subscription( fileset=self.multipleFileFileset, workflow=testWorkflow, split_algo="RunBased", type="Processing") self.singleFileSubscription = Subscription( fileset=self.singleFileFileset, workflow=testWorkflow, split_algo="RunBased", type="Processing") self.multipleRunSubscription = Subscription( fileset=self.multipleFileRunset, workflow=testWorkflow, split_algo="RunBased", type="Processing") self.singleRunSubscription = Subscription( fileset=self.singleRunFileset, workflow=testWorkflow, split_algo="RunBased", type="Processing") self.singleRunMultipleLumiSubscription = Subscription( fileset=self.singleRunMultipleLumi, workflow=testWorkflow, split_algo="RunBased", type="Processing") self.multipleFileSubscription.create() self.singleFileSubscription.create() self.multipleRunSubscription.create() self.singleRunSubscription.create() self.singleRunMultipleLumiSubscription.create() return def tearDown(self): """ _tearDown_ Tear down WMBS architechture. """ self.testInit.clearDatabase() return def testExactRuns(self): """ _testExactRuns_ Test run based job splitting when the number of events per job is exactly the same as the number of events in the input file. """ splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.singleFileSubscription) jobGroups = jobFactory(files_per_job=1) assert len(jobGroups) == 1, \ "ERROR: JobFactory didn't return one JobGroup." assert len(jobGroups[0].jobs) == 1, \ "ERROR: JobFactory didn't create a single job." job = jobGroups[0].jobs.pop() assert job.getFiles(type = "lfn") == ["/some/file/name"], \ "ERROR: Job contains unknown files." return def testMoreRuns(self): """ _testMoreEvents_ Test run based job splitting when the number of runs per job is greater than the number of runs in the input file. """ splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.singleFileSubscription) jobGroups = jobFactory(files_per_job=2) assert len(jobGroups) == 1, \ "ERROR: JobFactory didn't return one JobGroup." assert len(jobGroups[0].jobs) == 1, \ "ERROR: JobFactory didn't create a single job." job = jobGroups[0].jobs.pop() assert job.getFiles(type = "lfn") == ["/some/file/name"], \ "ERROR: Job contains unknown files." return def testMultipleRuns(self): """ _testMultipleRuns_ Test run based job splitting when the number of runs is equal to the number in each input file, with multiple files """ splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.multipleFileSubscription) jobGroups = jobFactory(files_per_job=1) assert len(jobGroups) == 10, \ "ERROR: JobFactory didn't return one JobGroup per run." assert len(jobGroups[0].jobs) == 1, \ "ERROR: JobFactory didn't put each run in a file." self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 1) return def testMultipleRunsCombine(self): """ _testMultipleRunsCombine_ Test run based job splitting when the number of jobs is less then the number of files, with multiple files """ splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.multipleRunSubscription) jobGroups = jobFactory(files_per_job=2) assert len(jobGroups) == 4, \ "ERROR: JobFactory didn't return one JobGroup per run." assert len(jobGroups[1].jobs) == 2, \ "ERROR: JobFactory didn't put only one job in the first job" #Last one in the queue should have one job, previous two (three files per run) self.assertEqual(len(jobGroups[1].jobs.pop().getFiles(type="lfn")), 1) self.assertEqual(len(jobGroups[1].jobs.pop().getFiles(type="lfn")), 2) return def testSingleRunsCombineUneven(self): """ _testSingleRunsCombineUneven_ Test run based job splitting when the number of jobs is less then and indivisible by the number of files, with multiple files. """ #This should return two jobs, one with 8 and one with 2 files splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.singleRunSubscription) jobGroups = jobFactory(files_per_job=8) self.assertEqual(len(jobGroups), 1) self.assertEqual(len(jobGroups[0].jobs), 2) self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 2) self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 8) return def testPersistSingleRunsCombineUneven(self): """ _testPerisistSingleRunsCombineUneven_ Test run based job splitting when the number of jobs is less then and indivisible by the number of files, with multiple files. """ #This should return two jobs, one with 8 and one with 2 files splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=self.singleRunSubscription) jobGroups = jobFactory(files_per_job=8) self.assertEqual(len(jobGroups), 1) self.assertEqual(len(jobGroups[0].jobs), 2) self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 2) self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 8) return def testSingleRunsMultipleLumiCombineUneven(self): """ _testSingleRunsMultipeLumiCombineUneven_ Test run based job splitting when the number of jobs is less then and indivisible by the number of files, with multiple files. """ #This should return two jobs, one with 8 and one with 2 files splitter = SplitterFactory() jobFactory = splitter( package="WMCore.WMBS", subscription=self.singleRunMultipleLumiSubscription) jobGroups = jobFactory(files_per_job=8) self.assertEqual(len(jobGroups), 1) self.assertEqual(len(jobGroups[0].jobs), 2) self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 2) self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 8) return
class StoreResultsTest(unittest.TestCase): def setUp(self): """ _setUp_ Initialize the database. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMCore.WMBS"], useDefault=False) self.testDir = self.testInit.generateWorkDir() myThread = threading.currentThread() self.daoFactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi) self.listTasksByWorkflow = self.daoFactory( classname="Workflow.LoadFromName") self.listFilesets = self.daoFactory(classname="Fileset.List") self.listSubsMapping = self.daoFactory( classname="Subscriptions.ListSubsAndFilesetsFromWorkflow") return def tearDown(self): """ _tearDown_ Clear out the database. """ self.testInit.clearDatabase() self.testInit.delWorkDir() return def testStoreResults(self): """ _testStoreResults_ Create a StoreResults workflow and verify it installs into WMBS correctly. """ arguments = StoreResultsWorkloadFactory.getTestArguments() factory = StoreResultsWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction( "TestWorkload", arguments) testWMBSHelper = WMBSHelper(testWorkload, "StoreResults", "SomeBlock", cachepath=self.testDir) testWMBSHelper.createTopLevelFileset() testWMBSHelper._createSubscriptionsInWMBS( testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset) testWorkflow = Workflow(name="TestWorkload", task="/TestWorkload/StoreResults") testWorkflow.load() self.assertEqual(len(testWorkflow.outputMap.keys()), 2, "Error: Wrong number of WF outputs.") goldenOutputMods = {"Merged": "USER"} for goldenOutputMod, tier in goldenOutputMods.items(): fset = goldenOutputMod + tier mergedOutput = testWorkflow.outputMap[fset][0][ "merged_output_fileset"] unmergedOutput = testWorkflow.outputMap[fset][0]["output_fileset"] mergedOutput.loadData() unmergedOutput.loadData() self.assertEqual( mergedOutput.name, "/TestWorkload/StoreResults/merged-%s" % fset, "Error: Merged output fileset is wrong: %s" % mergedOutput.name) self.assertEqual( unmergedOutput.name, "/TestWorkload/StoreResults/merged-%s" % fset, "Error: Unmerged output fileset is wrong: %s." % unmergedOutput.name) logArchOutput = testWorkflow.outputMap["logArchive"][0][ "merged_output_fileset"] unmergedLogArchOutput = testWorkflow.outputMap["logArchive"][0][ "output_fileset"] logArchOutput.loadData() unmergedLogArchOutput.loadData() self.assertEqual(logArchOutput.name, "/TestWorkload/StoreResults/merged-logArchive", "Error: LogArchive output fileset is wrong.") self.assertEqual(unmergedLogArchOutput.name, "/TestWorkload/StoreResults/merged-logArchive", "Error: LogArchive output fileset is wrong.") topLevelFileset = Fileset(name="TestWorkload-StoreResults-SomeBlock") topLevelFileset.loadData() procSubscription = Subscription(fileset=topLevelFileset, workflow=testWorkflow) procSubscription.loadData() self.assertEqual(procSubscription["type"], "Merge", "Error: Wrong subscription type.") self.assertEqual(procSubscription["split_algo"], "ParentlessMergeBySize", "Error: Wrong split algo.") return def testFilesets(self): """ Test workflow tasks, filesets and subscriptions creation """ # expected tasks, filesets, subscriptions, etc expOutTasks = ['/TestWorkload/StoreResults'] expWfTasks = [ '/TestWorkload/StoreResults', '/TestWorkload/StoreResults/StoreResultsLogCollect' ] expFsets = [ 'TestWorkload-StoreResults-/MinimumBias/ComissioningHI-v1/RAW', '/TestWorkload/StoreResults/merged-MergedUSER', '/TestWorkload/StoreResults/merged-logArchive' ] subMaps = [ (2, '/TestWorkload/StoreResults/merged-logArchive', '/TestWorkload/StoreResults/StoreResultsLogCollect', 'MinFileBased', 'LogCollect'), (1, 'TestWorkload-StoreResults-/MinimumBias/ComissioningHI-v1/RAW', '/TestWorkload/StoreResults', 'ParentlessMergeBySize', 'Merge') ] testArguments = StoreResultsWorkloadFactory.getTestArguments() factory = StoreResultsWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction( "TestWorkload", testArguments) testWMBSHelper = WMBSHelper(testWorkload, "StoreResults", blockName=testArguments['InputDataset'], cachepath=self.testInit.testDir) testWMBSHelper.createTopLevelFileset() testWMBSHelper._createSubscriptionsInWMBS( testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset) self.assertItemsEqual(testWorkload.listOutputProducingTasks(), expOutTasks) workflows = self.listTasksByWorkflow.execute(workflow="TestWorkload") self.assertItemsEqual([item['task'] for item in workflows], expWfTasks) # returns a tuple of id, name, open and last_update filesets = self.listFilesets.execute() self.assertItemsEqual([item[1] for item in filesets], expFsets) subscriptions = self.listSubsMapping.execute(workflow="TestWorkload", returnTuple=True) self.assertItemsEqual(subscriptions, subMaps)
class ThreadPoolTest(unittest.TestCase): """ _ThreadPool_t_ Unit tests for threadpool """ _nrOfThreads = 10 _nrOfPools = 5 def setUp(self): "make a logger instance and create tables" self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema() def tearDown(self): """ Deletion of database """ # FIXME: this might not work if your not using socket. self.testInit.clearDatabase() def testA(self): """ __testSubscribe__ Test subscription of a component. """ raise nose.SkipTest myThread = threading.currentThread() # create a 'fake' component that contains a arg dictionary. component = Dummy() # load default parameters. config = self.testInit.getConfiguration() # normally assigned by the harness of the test component. config.Agent.componentName = "TestComponent" component.config = config threadPools = [] for i in xrange(0, ThreadPoolTest._nrOfPools): threadPool = ThreadPool("WMCore.ThreadPool.ThreadSlave", \ component, 'MyPool_'+str(i), ThreadPoolTest._nrOfThreads) threadPools.append(threadPool) # this is how you would use the threadpool. The threadpool retrieves # events/payloads from the message service. If a thread is available # it is dispatched, otherwise it is stored in the trheadpool. # make the number of tasks bigger than number of threads to tesT # the persistent queue. for i in xrange(0, ThreadPoolTest._nrOfThreads * 10): event = 'eventNr_' + str(i) payload = 'payloadNr_' + str(i) # normally you would have different events per threadpool and # even different objects per pool. the payload part will be # pickled into the database enabling flexibility in passing # information. for j in xrange(0, ThreadPoolTest._nrOfPools): threadPools[j].enqueue(event, \ {'event' : event, 'payload' : payload}) # this commit you want to be in the agent harness, so the message is # actual removed from the msgService. we can do this as the threadpool # acts as a dispatcher and is a shortlived action: dispatch to thread # or queue and tell agent harness it is finished. finished = False timeout = 60 # secs currenttime = 0 while not finished: print('waiting for threads to finishs. Work left:') for j in xrange(0, ThreadPoolTest._nrOfPools): print('pool_' + str(j) + ':' + str(threadPools[j].callQueue)) time.sleep(1) finished = True currenttime += 1 if (timeout == currenttime): raise RuntimeError for j in xrange(0, ThreadPoolTest._nrOfPools): if (len(threadPools[j].resultsQueue) < ThreadPoolTest._nrOfThreads * 10): finished = False break # check if the tables are really empty and all messages # have been processed. for j in xrange(0, ThreadPoolTest._nrOfPools): assert len(threadPools[j].resultsQueue) == \ ThreadPoolTest._nrOfThreads*10 myThread.transaction.begin() for j in xrange(0, ThreadPoolTest._nrOfPools): self.assertEqual(threadPools[j].countMessages(), 0) myThread.transaction.commit()
class WMAgentTest(unittest.TestCase): """ _WMAgentTest_ Global unittest for all WMAgent components """ # This is an integration test __integration__ = "Any old bollocks" sites = ['T2_US_Florida', 'T2_US_UCSD', 'T2_TW_Taiwan', 'T1_CH_CERN'] components = ['JobCreator', 'JobSubmitter', 'JobTracker', 'JobAccountant', 'JobArchiver', 'TaskArchiver', 'RetryManager', 'ErrorHandler'] def setUp(self): """ _setUp_ Set up vital components """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = ["WMCore.WMBS",'WMCore.MsgService', 'WMCore.ResourceControl', 'WMCore.ThreadPool', 'WMCore.Agent.Database'], useDefault = False) myThread = threading.currentThread() self.daoFactory = DAOFactory(package = "WMCore.WMBS", logger = myThread.logger, dbinterface = myThread.dbi) locationAction = self.daoFactory(classname = "Locations.New") pendingSlots = self.daoFactory(classname = "Locations.SetPendingSlots") for site in self.sites: locationAction.execute(siteName = site, seName = 'se.%s' % (site), ceName = site) pendingSlots.execute(siteName = site, pendingSlots = 1000) #Create sites in resourceControl resourceControl = ResourceControl() for site in self.sites: resourceControl.insertSite(siteName = site, seName = 'se.%s' % (site), ceName = site) resourceControl.insertThreshold(siteName = site, taskType = 'Processing', \ maxSlots = 10000, pendingSlots = 10000) self.testDir = self.testInit.generateWorkDir() # Set heartbeat for component in self.components: heartbeatAPI = HeartbeatAPI(component) heartbeatAPI.registerComponent() return def tearDown(self): """ _tearDown_ Tear down everything and go home. """ self.testInit.clearDatabase() self.testInit.delWorkDir() return def createTestWorkload(self, workloadName = 'Test', emulator = True): """ _createTestWorkload_ Creates a test workload for us to run on, hold the basic necessities. """ workload = testWorkload("TestWorkload") rereco = workload.getTask("ReReco") taskMaker = TaskMaker(workload, os.path.join(self.testDir, 'workloadTest')) taskMaker.skipSubscription = True taskMaker.processWorkload() workload.save(workloadName) return workload def getConfig(self): """ _getConfig_ This is the global test configuration object """ config = Configuration() config.component_("Agent") config.Agent.WMSpecDirectory = self.testDir config.Agent.agentName = 'testAgent' config.Agent.componentName = 'test' # First the general stuff config.section_("General") config.General.workDir = os.getenv("TESTDIR", self.testDir) # Now the CoreDatabase information # This should be the dialect, dburl, etc config.section_("CoreDatabase") config.CoreDatabase.connectUrl = os.getenv("DATABASE") config.CoreDatabase.socket = os.getenv("DBSOCK") # JobCreator config.component_("JobCreator") config.JobCreator.namespace = 'WMComponent.JobCreator.JobCreator' config.JobCreator.logLevel = 'DEBUG' config.JobCreator.maxThreads = 1 config.JobCreator.UpdateFromResourceControl = True config.JobCreator.pollInterval = 10 config.JobCreator.jobCacheDir = self.testDir config.JobCreator.defaultJobType = 'processing' #Type of jobs that we run, used for resource control config.JobCreator.workerThreads = 2 config.JobCreator.componentDir = os.path.join(os.getcwd(), 'Components') # JobSubmitter config.component_("JobSubmitter") config.JobSubmitter.namespace = 'WMComponent.JobSubmitter.JobSubmitter' config.JobSubmitter.logLevel = 'INFO' config.JobSubmitter.maxThreads = 1 config.JobSubmitter.pollInterval = 10 config.JobSubmitter.pluginName = 'CondorGlobusPlugin' config.JobSubmitter.pluginDir = 'JobSubmitter.Plugins' config.JobSubmitter.submitDir = os.path.join(self.testDir, 'submit') config.JobSubmitter.submitNode = os.getenv("HOSTNAME", 'badtest.fnal.gov') config.JobSubmitter.submitScript = os.path.join(getWMBASE(), 'test/python/WMComponent_t/JobSubmitter_t', 'submit.sh') config.JobSubmitter.componentDir = os.path.join(os.getcwd(), 'Components') config.JobSubmitter.workerThreads = 2 config.JobSubmitter.jobsPerWorker = 200 # JobTracker config.component_("JobTracker") config.JobTracker.logLevel = 'DEBUG' config.JobTracker.pollInterval = 10 config.JobTracker.trackerName = 'CondorTracker' config.JobTracker.pluginDir = 'WMComponent.JobTracker.Plugins' config.JobTracker.componentDir = os.path.join(os.getcwd(), 'Components') config.JobTracker.runTimeLimit = 7776000 #Jobs expire after 90 days config.JobTracker.idleTimeLimit = 7776000 config.JobTracker.heldTimeLimit = 7776000 config.JobTracker.unknTimeLimit = 7776000 # JobAccountant config.component_("JobAccountant") config.JobAccountant.pollInterval = 60 config.JobAccountant.componentDir = os.path.join(os.getcwd(), 'Components') config.JobAccountant.logLevel = 'INFO' # JobArchiver config.component_("JobArchiver") config.JobArchiver.pollInterval = 60 config.JobArchiver.logLevel = 'INFO' config.JobArchiver.logDir = os.path.join(self.testDir, 'logs') config.JobArchiver.componentDir = os.path.join(os.getcwd(), 'Components') config.JobArchiver.numberOfJobsToCluster = 1000 # Task Archiver config.component_("TaskArchiver") config.TaskArchiver.componentDir = self.testInit.generateWorkDir() config.TaskArchiver.WorkQueueParams = {} config.TaskArchiver.pollInterval = 60 config.TaskArchiver.logLevel = 'INFO' config.TaskArchiver.timeOut = 0 # JobStateMachine config.component_('JobStateMachine') config.JobStateMachine.couchurl = os.getenv('COUCHURL', 'mnorman:[email protected]:5984') config.JobStateMachine.couchDBName = "mnorman_test" # Needed, because this is a test os.makedirs(config.JobSubmitter.submitDir) return config def createFileCollection(self, name, nSubs, nFiles, workflowURL = 'test', site = None): """ _createFileCollection_ Create a collection of files for splitting into jobs """ myThread = threading.currentThread() testWorkflow = Workflow(spec = workflowURL, owner = "mnorman", name = name, task="/TestWorkload/ReReco") testWorkflow.create() for sub in range(nSubs): nameStr = '%s-%i' % (name, sub) testFileset = Fileset(name = nameStr) testFileset.create() for f in range(nFiles): # pick a random site if not site: tmpSite = 'se.%s' % (random.choice(self.sites)) else: tmpSite = 'se.%s' % (site) testFile = File(lfn = "/lfn/%s/%i" % (nameStr, f), size = 1024, events = 10) testFile.setLocation(tmpSite) testFile.create() testFileset.addFile(testFile) testFileset.commit() testFileset.markOpen(isOpen = 0) testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow, type = "Processing", split_algo = "FileBased") testSubscription.create() return def createReports(self, jobs, retryCount = 0): """ _createReports_ Create some dummy job reports for each job """ report = Report() report.addStep('testStep', 0) for job in jobs: #reportPath = os.path.join(job['cache_dir'], 'Report.%i.pkl' % (retryCount)) reportPath = job['fwjr_path'] if os.path.exists(reportPath): os.remove(reportPath) report.save(reportPath) return def testA_StraightThrough(self): """ _StraightThrough_ Just run everything straight through without any variations """ # Do pre-submit job check nRunning = getCondorRunningJobs() self.assertEqual(nRunning, 0, "User currently has %i running jobs. Test will not continue" % (nRunning)) myThread = threading.currentThread() workload = self.createTestWorkload() config = self.getConfig() name = 'WMAgent_Test1' site = self.sites[0] nSubs = 5 nFiles = 10 workloadPath = os.path.join(self.testDir, 'workloadTest', 'TestWorkload', 'WMSandbox', 'WMWorkload.pkl') # Create a collection of files self.createFileCollection(name = name, nSubs = nSubs, nFiles = nFiles, workflowURL = workloadPath, site = site) ############################################################ # Test the JobCreator config.Agent.componentName = 'JobCreator' testJobCreator = JobCreatorPoller(config = config) testJobCreator.algorithm() time.sleep(5) # Did all jobs get created? getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs") result = getJobsAction.execute(state = 'Created', jobType = "Processing") self.assertEqual(len(result), nSubs*nFiles) # Count database objects result = myThread.dbi.processData('SELECT * FROM wmbs_sub_files_acquired')[0].fetchall() self.assertEqual(len(result), nSubs * nFiles) # Find the test directory testDirectory = os.path.join(self.testDir, 'TestWorkload', 'ReReco') self.assertTrue('JobCollection_1_0' in os.listdir(testDirectory)) self.assertTrue(len(os.listdir(testDirectory)) <= 20) groupDirectory = os.path.join(testDirectory, 'JobCollection_1_0') # First job should be in here self.assertTrue('job_1' in os.listdir(groupDirectory)) jobFile = os.path.join(groupDirectory, 'job_1', 'job.pkl') self.assertTrue(os.path.isfile(jobFile)) f = open(jobFile, 'r') job = cPickle.load(f) f.close() self.assertEqual(job['workflow'], name) self.assertEqual(len(job['input_files']), 1) self.assertEqual(os.path.basename(job['sandbox']), 'TestWorkload-Sandbox.tar.bz2') ############################################################### # Now test the JobSubmitter config.Agent.componentName = 'JobSubmitter' testJobSubmitter = JobSubmitterPoller(config = config) testJobSubmitter.algorithm() # Check that jobs are in the right state result = getJobsAction.execute(state = 'Created', jobType = "Processing") self.assertEqual(len(result), 0) result = getJobsAction.execute(state = 'Executing', jobType = "Processing") self.assertEqual(len(result), nSubs * nFiles) # Check assigned locations getLocationAction = self.daoFactory(classname = "Jobs.GetLocation") for id in result: loc = getLocationAction.execute(jobid = id) self.assertEqual(loc, [[site]]) # Check to make sure we have running jobs nRunning = getCondorRunningJobs() self.assertEqual(nRunning, nFiles * nSubs) ################################################################# # Now the JobTracker config.Agent.componentName = 'JobTracker' testJobTracker = JobTrackerPoller(config = config) testJobTracker.setup() testJobTracker.algorithm() # Running the algo without removing the jobs should do nothing result = getJobsAction.execute(state = 'Executing', jobType = "Processing") self.assertEqual(len(result), nSubs * nFiles) condorRM() time.sleep(1) # All jobs gone? nRunning = getCondorRunningJobs() self.assertEqual(nRunning, 0) testJobTracker.algorithm() time.sleep(5) # Running the algo without removing the jobs should do nothing result = getJobsAction.execute(state = 'Executing', jobType = "Processing") self.assertEqual(len(result), 0) result = getJobsAction.execute(state = 'Complete', jobType = "Processing") self.assertEqual(len(result), nSubs * nFiles) ################################################################# # Now the JobAccountant # First you need to load all jobs self.getFWJRAction = self.daoFactory(classname = "Jobs.GetFWJRByState") completeJobs = self.getFWJRAction.execute(state = "complete") # Create reports for all jobs self.createReports(jobs = completeJobs, retryCount = 0) config.Agent.componentName = 'JobAccountant' testJobAccountant = JobAccountantPoller(config = config) testJobAccountant.setup() # It should do something with the jobs testJobAccountant.algorithm() # All the jobs should be done now result = getJobsAction.execute(state = 'Complete', jobType = "Processing") self.assertEqual(len(result), 0) result = getJobsAction.execute(state = 'Success', jobType = "Processing") self.assertEqual(len(result), nSubs * nFiles) ####################################################################### # Now the JobArchiver config.Agent.componentName = 'JobArchiver' testJobArchiver = JobArchiverPoller(config = config) testJobArchiver.algorithm() # All the jobs should be cleaned up result = getJobsAction.execute(state = 'Success', jobType = "Processing") self.assertEqual(len(result), 0) result = getJobsAction.execute(state = 'Cleanout', jobType = "Processing") self.assertEqual(len(result), nSubs * nFiles) logDir = os.path.join(self.testDir, 'logs') for job in completeJobs: self.assertFalse(os.path.exists(job['fwjr_path'])) jobFolder = 'JobCluster_%i' \ % (int(job['id']/config.JobArchiver.numberOfJobsToCluster)) jobPath = os.path.join(logDir, jobFolder, 'Job_%i.tar' %(job['id'])) self.assertTrue(os.path.isfile(jobPath)) self.assertTrue(os.path.getsize(jobPath) > 0) ########################################################################### # Now the TaskAchiver config.Agent.componentName = 'TaskArchiver' testTaskArchiver = TaskArchiverPoller(config = config) testTaskArchiver.algorithm() result = getJobsAction.execute(state = 'Cleanout', jobType = "Processing") self.assertEqual(len(result), 0) for jdict in completeJobs: job = Job(id = jdict['id']) self.assertFalse(job.exists()) if os.path.isdir('testDir'): shutil.rmtree('testDir') shutil.copytree('%s' %self.testDir, os.path.join(os.getcwd(), 'testDir')) return
class FeederManagerTest(unittest.TestCase): """ TestCase for TestFeederManager module """ _maxMessage = 10 def setUp(self): """ _setUp_ Setup the database and logging connection. Try to create all needed tables. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.generateWorkDir() self.testInit.setSchema(customModules = \ ['WMCore.Agent.Database', 'WMComponent.FeederManager.Database', 'WMCore.ThreadPool', 'WMCore.WMBS'], useDefault = False) return def tearDown(self): """ _tearDown_ Database deletion """ self.testInit.clearDatabase() return def getConfig(self): """ _createConfig_ Create a config for the JobAccountant. This config needs to include information for connecting to the database as the component will create it's own database connections. These parameters are still pulled from the environment. """ config = self.testInit.getConfiguration() self.testInit.generateWorkDir(config) config.component_("FeederManager") config.FeederManager.logLevel = "INFO" config.FeederManager.componentName = "FeederManager" config.FeederManager.componentDir = \ os.path.join(os.getenv("TESTDIR"), "FeederManager") config.FeederManager.addDatasetWatchHandler = \ 'WMComponent.FeederManager.Handler.DefaultAddDatasetWatch' # The maximum number of threads to process each message type config.FeederManager.maxThreads = 10 # The poll interval at which to look for new fileset/feeder association config.FeederManager.pollInterval = 60 return config def testA(self): """ _testA_ Handle AddDatasetWatch events """ myThread = threading.currentThread() config = self.getConfig() testFeederManager = FeederManager(config) testFeederManager.prepareToStart() for i in xrange(0, FeederManagerTest._maxMessage): for j in xrange(0, 3): feederManagerdict = {'payload':{'FeederType':'NO Feeder', 'dataset' : 'NO DATASET', 'FileType' : 'NO FILE TYPE', 'StartRun' : 'NO START RUN' }} testFeederManager.handleMessage( type = 'AddDatasetWatch', payload = feederManagerdict ) time.sleep(30) myThread.workerThreadManager.terminateWorkers() while threading.activeCount() > 1: print('Currently: '+str(threading.activeCount())+\ ' Threads. Wait until all our threads have finished') time.sleep(1)
class FileBasedTest(unittest.TestCase): """ _FileBasedTest_ Test file based job splitting. """ def setUp(self): """ _setUp_ Create two subscriptions: One that contains a single file and one that contains multiple files. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMCore.WMBS"], useDefault=False) myThread = threading.currentThread() daofactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi) self.nSites = 2 locationAction = daofactory(classname="Locations.New") for site in range(self.nSites): locationAction.execute(siteName="site%i" % site, pnn="T2_CH_CERN_%i" % site) return def tearDown(self): """ _tearDown_ Clear out WMBS. """ self.testInit.clearDatabase(modules=["WMCore.WMBS"]) return def createTestSubscription(self, nFiles, nSites=1, closeFileset=False): """ _createTestSubscription_ Create a set of test subscriptions for testing purposes. """ if nSites > self.nSites: nSites = self.nSites testFileset = Fileset(name="TestFileset") testFileset.create() # Create a testWorkflow testWorkflow = Workflow(spec="spec.xml", owner="Steve", name="wf001", task="Test") testWorkflow.create() # Create the files for each site for s in range(nSites): for i in range(nFiles): newFile = File(makeUUID(), size=1024, events=100, locations=set(["T2_CH_CERN_%i" % s])) newFile.create() testFileset.addFile(newFile) testFileset.commit() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow, split_algo="MinFileBased", type="Processing") testSubscription.create() # Close the fileset if closeFileset: testFileset.markOpen(isOpen=False) return testSubscription def testA_ExactFiles(self): """ _testExactFiles_ Test file based job splitting when the number of files per job is exactly the same as the number of files in the input fileset. """ nFiles = 5 sub = self.createTestSubscription(nFiles=nFiles) splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=sub) jobGroups = jobFactory(files_per_job=nFiles) self.assertEqual(len(jobGroups), 1) self.assertEqual(len(jobGroups[0].jobs), 1) self.assertEqual(len(jobGroups[0].jobs[0]['input_files']), nFiles) return def testB_LessFilesOpen(self): """ _LessFilesOpen_ Test with less files then required. If the fileset is open, this should produce no jobs. """ sub = self.createTestSubscription(nFiles=5) splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=sub) jobGroups = jobFactory(files_per_job=10) self.assertEqual(len(jobGroups), 0) return def testC_LessFilesClosed(self): """ _LessFilesClosed_ Test with less files then required. If the fileset is closed, this should produce one job. """ sub = self.createTestSubscription(nFiles=5, closeFileset=True) splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=sub) jobGroups = jobFactory(files_per_job=10) self.assertEqual(len(jobGroups), 1) self.assertEqual(len(jobGroups[0].jobs), 1) self.assertEqual(len(jobGroups[0].jobs[0]['input_files']), 5) return def testD_MoreFilesOpen(self): """ _MoreFilesOpen_ If you pass it more files then files_per_job, it should produce jobs until it hits the limit, then stop. """ sub = self.createTestSubscription(nFiles=10) splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=sub) jobGroups = jobFactory(files_per_job=3) self.assertEqual(len(jobGroups), 1) self.assertEqual(len(jobGroups[0].jobs), 3) for job in jobGroups[0].jobs: self.assertEqual(len(job['input_files']), 3) return def testE_MoreFilesClosed(self): """ _MoreFilesClosed_ If you pass it more files then files_per_job, it should produce jobs enough to hold all the files in the fileset if the fileset is closed """ sub = self.createTestSubscription(nFiles=10, closeFileset=True) splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=sub) jobGroups = jobFactory(files_per_job=3) self.assertEqual(len(jobGroups), 1) self.assertEqual(len(jobGroups[0].jobs), 4) for job in jobGroups[0].jobs[:3]: self.assertEqual(len(job['input_files']), 3) self.assertEqual(len(jobGroups[0].jobs[3]['input_files']), 1) return
class DBSUploadTest(unittest.TestCase): """ TestCase for DBSUpload module """ def setUp(self): """ _setUp_ setUp function for unittest """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"], useDefault = False) self.testDir = self.testInit.generateWorkDir(deleteOnDestruction = False) self.configFile = EmulatorSetup.setupWMAgentConfig() myThread = threading.currentThread() self.bufferFactory = DAOFactory(package = "WMComponent.DBS3Buffer", logger = myThread.logger, dbinterface = myThread.dbi) locationAction = self.bufferFactory(classname = "DBSBufferFiles.AddLocation") locationAction.execute(siteName = "se1.cern.ch") locationAction.execute(siteName = "se1.fnal.gov") locationAction.execute(siteName = "malpaquet") self.dbsUrl = "https://*****:*****@attr("integration") def testBasicUpload(self): """ _testBasicUpload_ Verify that we can successfully upload to DBS3. Also verify that the uploader correctly handles files parentage when uploading. """ self.dbsApi = DbsApi(url = self.dbsUrl) config = self.getConfig() dbsUploader = DBSUploadPoller(config = config) # First test verifies that uploader will poll and then not do anything # as the database is empty. dbsUploader.algorithm() acqEra = "Summer%s" % (int(time.time())) parentFiles = self.createParentFiles(acqEra) # The algorithm needs to be run twice. On the first iteration it will # create all the blocks and upload one. On the second iteration it will # timeout and upload the second block. dbsUploader.algorithm() time.sleep(5) dbsUploader.algorithm() time.sleep(5) # Verify the files made it into DBS3. self.verifyData(parentFiles[0]["datasetPath"], parentFiles) # Inject some more parent files and some child files into DBSBuffer. # Run the uploader twice, only the parent files should be added to DBS3. (moreParentFiles, childFiles) = \ self.createFilesWithChildren(parentFiles, acqEra) dbsUploader.algorithm() time.sleep(5) dbsUploader.algorithm() time.sleep(5) self.verifyData(parentFiles[0]["datasetPath"], parentFiles + moreParentFiles) # Run the uploader another two times to upload the child files. Verify # that the child files were uploaded. dbsUploader.algorithm() time.sleep(5) dbsUploader.algorithm() time.sleep(5) self.verifyData(childFiles[0]["datasetPath"], childFiles) return @attr("integration") def testDualUpload(self): """ _testDualUpload_ Verify that the dual upload mode works correctly. """ self.dbsApi = DbsApi(url = self.dbsUrl) config = self.getConfig() dbsUploader = DBSUploadPoller(config = config) dbsUtil = DBSBufferUtil() # First test verifies that uploader will poll and then not do anything # as the database is empty. dbsUploader.algorithm() acqEra = "Summer%s" % (int(time.time())) parentFiles = self.createParentFiles(acqEra) (moreParentFiles, childFiles) = \ self.createFilesWithChildren(parentFiles, acqEra) allFiles = parentFiles + moreParentFiles allBlocks = [] for i in range(4): DBSBufferDataset(parentFiles[0]["datasetPath"]).create() blockName = parentFiles[0]["datasetPath"] + "#" + makeUUID() dbsBlock = DBSBufferBlock(blockName, location = "malpaquet", datasetpath = None) dbsBlock.status = "Open" dbsBlock.setDataset(parentFiles[0]["datasetPath"], 'data', 'VALID') dbsUtil.createBlocks([dbsBlock]) for file in allFiles[i * 5 : (i * 5) + 5]: dbsBlock.addFile(file, 'data', 'VALID') dbsUtil.setBlockFiles({"block": blockName, "filelfn": file["lfn"]}) if i < 2: dbsBlock.status = "InDBS" dbsUtil.updateBlocks([dbsBlock]) dbsUtil.updateFileStatus([dbsBlock], "InDBS") allBlocks.append(dbsBlock) DBSBufferDataset(childFiles[0]["datasetPath"]).create() blockName = childFiles[0]["datasetPath"] + "#" + makeUUID() dbsBlock = DBSBufferBlock(blockName, location = "malpaquet", datasetpath = None) dbsBlock.status = "InDBS" dbsBlock.setDataset(childFiles[0]["datasetPath"], 'data', 'VALID') dbsUtil.createBlocks([dbsBlock]) for file in childFiles: dbsBlock.addFile(file, 'data', 'VALID') dbsUtil.setBlockFiles({"block": blockName, "filelfn": file["lfn"]}) dbsUtil.updateFileStatus([dbsBlock], "InDBS") dbsUploader.algorithm() time.sleep(5) dbsUploader.algorithm() time.sleep(5) self.verifyData(parentFiles[0]["datasetPath"], parentFiles) # Change the status of the rest of the parent blocks so we can upload # them and the children. for dbsBlock in allBlocks: dbsBlock.status = "InDBS" dbsUtil.updateBlocks([dbsBlock]) dbsUploader.algorithm() time.sleep(5) self.verifyData(parentFiles[0]["datasetPath"], parentFiles + moreParentFiles) # Run the uploader one more time to upload the children. dbsUploader.algorithm() time.sleep(5) self.verifyData(childFiles[0]["datasetPath"], childFiles) return def testCloseSettingsPerWorkflow(self): """ _testCloseSettingsPerWorkflow_ Test the block closing mechanics in the DBS3 uploader, this uses a fake dbs api to avoid reliance on external services. """ # Signal trapExit that we are a friend os.environ["DONT_TRAP_EXIT"] = "True" try: # Monkey patch the imports of DbsApi from WMComponent.DBS3Buffer import DBSUploadPoller as MockDBSUploadPoller MockDBSUploadPoller.DbsApi = MockDbsApi # Set the poller and the dbsUtil for verification myThread = threading.currentThread() (_, dbsFilePath) = mkstemp(dir = self.testDir) self.dbsUrl = dbsFilePath config = self.getConfig() dbsUploader = MockDBSUploadPoller.DBSUploadPoller(config = config) dbsUtil = DBSBufferUtil() # First test is event based limits and timeout with no new files. # Set the files and workflow acqEra = "TropicalSeason%s" % (int(time.time())) workflowName = 'TestWorkload%s' % (int(time.time())) taskPath = '/%s/TestProcessing' % workflowName self.injectWorkflow(workflowName, taskPath, MaxWaitTime = 2, MaxFiles = 100, MaxEvents = 150) self.createParentFiles(acqEra, nFiles = 20, workflowName = workflowName, taskPath = taskPath) # The algorithm needs to be run twice. On the first iteration it will # create all the blocks and upload one with less than 150 events. # On the second iteration the second block is uploaded. dbsUploader.algorithm() dbsUploader.checkBlocks() openBlocks = dbsUtil.findOpenBlocks() self.assertEqual(len(openBlocks), 1) globalFiles = myThread.dbi.processData("SELECT id FROM dbsbuffer_file WHERE status = 'InDBS'")[0].fetchall() notUploadedFiles = myThread.dbi.processData("SELECT id FROM dbsbuffer_file WHERE status = 'NOTUPLOADED'")[0].fetchall() self.assertEqual(len(globalFiles), 14) self.assertEqual(len(notUploadedFiles), 6) # Check the fake DBS for data fakeDBS = open(self.dbsUrl, 'r') fakeDBSInfo = json.load(fakeDBS) fakeDBS.close() self.assertEqual(len(fakeDBSInfo), 2) for block in fakeDBSInfo: self.assertTrue('block_events' not in block['block']) self.assertEqual(block['block']['file_count'], 7) self.assertEqual(block['block']['open_for_writing'], 0) self.assertTrue('close_settings' not in block) time.sleep(3) dbsUploader.algorithm() dbsUploader.checkBlocks() openBlocks = dbsUtil.findOpenBlocks() self.assertEqual(len(openBlocks), 0) fakeDBS = open(self.dbsUrl, 'r') fakeDBSInfo = json.load(fakeDBS) fakeDBS.close() self.assertEqual(len(fakeDBSInfo), 3) for block in fakeDBSInfo: if block['block']['file_count'] != 6: self.assertEqual(block['block']['file_count'], 7) self.assertTrue('block_events' not in block['block']) self.assertEqual(block['block']['open_for_writing'], 0) self.assertTrue('close_settings' not in block) # Now check the limit by size and timeout with new files acqEra = "TropicalSeason%s" % (int(time.time())) workflowName = 'TestWorkload%s' % (int(time.time())) taskPath = '/%s/TestProcessing' % workflowName self.injectWorkflow(workflowName, taskPath, MaxWaitTime = 2, MaxFiles = 5, MaxEvents = 200000000) self.createParentFiles(acqEra, nFiles = 16, workflowName = workflowName, taskPath = taskPath) dbsUploader.algorithm() dbsUploader.checkBlocks() openBlocks = dbsUtil.findOpenBlocks() self.assertEqual(len(openBlocks), 1) fakeDBS = open(self.dbsUrl, 'r') fakeDBSInfo = json.load(fakeDBS) fakeDBS.close() self.assertEqual(len(fakeDBSInfo), 6) for block in fakeDBSInfo: if acqEra in block['block']['block_name']: self.assertEqual(block['block']['file_count'], 5) self.assertTrue('block_events' not in block['block']) self.assertTrue('close_settings' not in block) self.assertEqual(block['block']['open_for_writing'], 0) # Put more files, they will go into the same block and then it will be closed # after timeout time.sleep(3) self.createParentFiles(acqEra, nFiles = 3, workflowName = workflowName, taskPath = taskPath) dbsUploader.algorithm() dbsUploader.checkBlocks() openBlocks = dbsUtil.findOpenBlocks() self.assertEqual(len(openBlocks), 0) fakeDBS = open(self.dbsUrl, 'r') fakeDBSInfo = json.load(fakeDBS) fakeDBS.close() self.assertEqual(len(fakeDBSInfo), 7) for block in fakeDBSInfo: if acqEra in block['block']['block_name']: if block['block']['file_count'] < 5: self.assertEqual(block['block']['file_count'], 4) else: self.assertEqual(block['block']['file_count'], 5) self.assertTrue('block_events' not in block['block']) self.assertEqual(block['block']['open_for_writing'], 0) self.assertTrue('close_settings' not in block) # Finally test size limits acqEra = "TropicalSeason%s" % (int(time.time())) workflowName = 'TestWorkload%s' % (int(time.time())) taskPath = '/%s/TestProcessing' % workflowName self.injectWorkflow(workflowName, taskPath, MaxWaitTime = 1, MaxFiles = 500, MaxEvents = 200000000, MaxSize = 2048) self.createParentFiles(acqEra, nFiles = 7, workflowName = workflowName, taskPath = taskPath) dbsUploader.algorithm() dbsUploader.checkBlocks() time.sleep(2) dbsUploader.algorithm() dbsUploader.checkBlocks() self.assertEqual(len(openBlocks), 0) fakeDBS = open(self.dbsUrl, 'r') fakeDBSInfo = json.load(fakeDBS) fakeDBS.close() self.assertEqual(len(fakeDBSInfo), 11) for block in fakeDBSInfo: if acqEra in block['block']['block_name']: if block['block']['file_count'] != 1: self.assertEqual(block['block']['block_size'], 2048) self.assertEqual(block['block']['file_count'], 2) self.assertTrue('block_events' not in block['block']) self.assertEqual(block['block']['open_for_writing'], 0) self.assertTrue('close_settings' not in block) except: self.fail("We failed at some point in the test") finally: # We don't trust anyone else with _exit del os.environ["DONT_TRAP_EXIT"] return
class RucioInjectorPollerTest(EmulatedUnitTestCase): """ Tests for the RucioInjectorPoller component """ def setUp(self): """ Install the DBSBuffer schema into the database and connect to Rucio. """ super(RucioInjectorPollerTest, self).setUp() self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection(destroyAllDatabase=True) self.testInit.setSchema(customModules=["WMComponent.DBS3Buffer"], useDefault=False) myThread = threading.currentThread() daofactory = DAOFactory(package="WMComponent.DBS3Buffer", logger=myThread.logger, dbinterface=myThread.dbi) locationAction = daofactory(classname="DBSBufferFiles.AddLocation") self.locations = ["T2_CH_CERN", "T1_US_FNAL_Disk"] for rse in self.locations: locationAction.execute(siteName=rse) self.testFilesA = [] self.testFilesB = [] self.testDatasetA = "/SampleA/PromptReco-v1/RECO" self.testDatasetB = "/SampleB/CRUZET11-v1/RAW" if PY3: self.assertItemsEqual = self.assertCountEqual return def tearDown(self): """ Delete the database. """ self.testInit.clearDatabase() def createConfig(self): """ Create a basic configuration for the component """ config = self.testInit.getConfiguration() config.component_("RucioInjector") config.RucioInjector.pollInterval = 300 config.RucioInjector.pollIntervalRules = 43200 config.RucioInjector.cacheExpiration = 2 * 24 * 60 * 60 # two days config.RucioInjector.createBlockRules = True config.RucioInjector.RSEPostfix = False # enable it to append _Test to the RSE names config.RucioInjector.metaDIDProject = "Production" config.RucioInjector.containerDiskRuleParams = {"weight": "ddm_quota", "copies": 2, "grouping": "DATASET"} config.RucioInjector.containerDiskRuleRSEExpr = "(tier=2|tier=1)&cms_type=real&rse_type=DISK" config.RucioInjector.rucioAccount = "wma_test" config.RucioInjector.rucioUrl = "http://cmsrucio-int.cern.ch" config.RucioInjector.rucioAuthUrl = "https://cmsrucio-auth-int.cern.ch" return config def stuffDatabase(self): """ Fill the dbsbuffer tables with some files and blocks. We'll insert a total of 5 files spanning two blocks. There will be a total of two datasets inserted into the database. We'll inject files with the location set as an SE name as well as a PhEDEx node name as well. """ myThread = threading.currentThread() # Create the DAOs factory and the relevant instances buffer3Factory = DAOFactory(package="WMComponent.DBS3Buffer", logger=myThread.logger, dbinterface=myThread.dbi) setBlock = buffer3Factory(classname="DBSBufferFiles.SetBlock") fileStatus = buffer3Factory(classname="DBSBufferFiles.SetStatus") associateWorkflow = buffer3Factory(classname="DBSBufferFiles.AssociateWorkflowToFile") insertWorkflow = buffer3Factory(classname="InsertWorkflow") datasetAction = buffer3Factory(classname="NewDataset") createAction = buffer3Factory(classname="CreateBlocks") # Create workflow in the database insertWorkflow.execute("BogusRequest", "BogusTask", 0, 0, 0, 0) # First file on first block checksums = {"adler32": "1234", "cksum": "5678"} testFileA = DBSBufferFile(lfn=makeUUID(), size=1024, events=10, checksums=checksums, locations=set(["T2_CH_CERN"])) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath(self.testDatasetA) testFileA.addRun(Run(2, *[45])) testFileA.create() # Second file on first block testFileB = DBSBufferFile(lfn=makeUUID(), size=1024, events=10, checksums=checksums, locations=set(["T2_CH_CERN"])) testFileB.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileB.setDatasetPath(self.testDatasetA) testFileB.addRun(Run(2, *[45])) testFileB.create() # Third file on first block testFileC = DBSBufferFile(lfn=makeUUID(), size=1024, events=10, checksums=checksums, locations=set(["T2_CH_CERN"])) testFileC.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileC.setDatasetPath(self.testDatasetA) testFileC.addRun(Run(2, *[45])) testFileC.create() self.testFilesA.append(testFileA) self.testFilesA.append(testFileB) self.testFilesA.append(testFileC) # First file on second block testFileD = DBSBufferFile(lfn=makeUUID(), size=1024, events=10, checksums=checksums, locations=set(["T1_US_FNAL_Disk"])) testFileD.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileD.setDatasetPath(self.testDatasetB) testFileD.addRun(Run(2, *[45])) testFileD.create() # Second file on second block testFileE = DBSBufferFile(lfn=makeUUID(), size=1024, events=10, checksums=checksums, locations=set(["T1_US_FNAL_Disk"])) testFileE.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileE.setDatasetPath(self.testDatasetB) testFileE.addRun(Run(2, *[45])) testFileE.create() self.testFilesB.append(testFileD) self.testFilesB.append(testFileE) # insert datasets in the dbsbuffer table datasetAction.execute(datasetPath=self.testDatasetA) datasetAction.execute(datasetPath=self.testDatasetB) self.blockAName = self.testDatasetA + "#" + makeUUID() self.blockBName = self.testDatasetB + "#" + makeUUID() # create and insert blocks into dbsbuffer table newBlockA = DBSBufferBlock(name=self.blockAName, location="T2_CH_CERN", datasetpath=None) newBlockA.setDataset(self.testDatasetA, 'data', 'VALID') newBlockA.status = 'Closed' newBlockB = DBSBufferBlock(name=self.blockBName, location="T1_US_FNAL_Disk", datasetpath=None) newBlockB.setDataset(self.testDatasetB, 'data', 'VALID') newBlockB.status = 'Closed' createAction.execute(blocks=[newBlockA, newBlockB]) # associate files to their correspondent block id setBlock.execute(testFileA["lfn"], self.blockAName) setBlock.execute(testFileB["lfn"], self.blockAName) setBlock.execute(testFileC["lfn"], self.blockAName) setBlock.execute(testFileD["lfn"], self.blockBName) setBlock.execute(testFileE["lfn"], self.blockBName) # set file status to LOCAL fileStatus.execute(testFileA["lfn"], "LOCAL") fileStatus.execute(testFileB["lfn"], "LOCAL") fileStatus.execute(testFileC["lfn"], "LOCAL") fileStatus.execute(testFileD["lfn"], "LOCAL") fileStatus.execute(testFileE["lfn"], "LOCAL") # associate files to a given workflow associateWorkflow.execute(testFileA["lfn"], "BogusRequest", "BogusTask") associateWorkflow.execute(testFileB["lfn"], "BogusRequest", "BogusTask") associateWorkflow.execute(testFileC["lfn"], "BogusRequest", "BogusTask") associateWorkflow.execute(testFileD["lfn"], "BogusRequest", "BogusTask") associateWorkflow.execute(testFileE["lfn"], "BogusRequest", "BogusTask") return def testBadConfig(self): """ Test wrong component configuration """ config = self.createConfig() config.RucioInjector.metaDIDProject = "Very invalid project name" with self.assertRaises(RucioInjectorException): RucioInjectorPoller(config) def testActivityMap(self): """ Initialize a RucioInjectorPoller object and test `_activityMap` method """ poller = RucioInjectorPoller(self.createConfig()) # test production agent and non-Tape endpoint activity = poller._activityMap("T1_US_FNAL_Disk") self.assertEquals(activity, "Production Output") activity = poller._activityMap("T1_US_FNAL_Test") self.assertEquals(activity, "Production Output") # test production agent and Tape endpoint (which is forbidden at the moment) with self.assertRaises(WMRucioException): poller._activityMap("T1_US_FNAL_Tape") # now pretend it to be a T0 agent/component poller.isT0agent = True # test T0 agent and non-Tape endpoint activity = poller._activityMap("T1_US_FNAL_Disk") self.assertEquals(activity, "T0 Export") activity = poller._activityMap("T1_US_FNAL_Test") self.assertEquals(activity, "T0 Export") # test T0 agent and Tape endpoint activity = poller._activityMap("T1_US_FNAL_Tape") self.assertEquals(activity, "T0 Tape") def testLoadingFiles(self): """ Initialize a RucioInjectorPoller object and load uninjected files """ self.stuffDatabase() poller = RucioInjectorPoller(self.createConfig()) poller.setup(parameters=None) uninjectedFiles = poller.getUninjected.execute() self.assertItemsEqual(list(uninjectedFiles), self.locations) self.assertEquals(list(uninjectedFiles["T2_CH_CERN"]), [self.testDatasetA]) self.assertEquals(list(uninjectedFiles["T1_US_FNAL_Disk"]), [self.testDatasetB])
class ProcessPoolTest(unittest.TestCase): def setUp(self): """ _setUp_ """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection(destroyAllDatabase = True) self.testInit.setSchema(customModules = ["WMCore.Agent.Database"], useDefault = False) return def tearDown(self): """ _tearDown_ """ self.testInit.clearDatabase() return def testA_ProcessPool(self): """ _testProcessPool_ """ raise nose.SkipTest config = self.testInit.getConfiguration() config.Agent.useHeartbeat = False self.testInit.generateWorkDir(config) processPool = ProcessPool("ProcessPool_t.ProcessPoolTestWorker", totalSlaves = 1, componentDir = config.General.workDir, config = config, namespace = "WMCore_t") processPool.enqueue(["One", "Two", "Three"]) result = processPool.dequeue(3) self.assertEqual(len(result), 3, "Error: Expected three items back.") self.assertTrue( "One" in result) self.assertTrue( "Two" in result) self.assertTrue( "Three" in result) return def testB_ProcessPoolStress(self): """ _testProcessPoolStress_ """ raise nose.SkipTest config = self.testInit.getConfiguration() config.Agent.useHeartbeat = False self.testInit.generateWorkDir(config) processPool = ProcessPool("ProcessPool_t.ProcessPoolTestWorker", totalSlaves = 1, componentDir = config.General.workDir, namespace = "WMCore_t", config = config) result = None input = None for i in range(1000): input = [] while i > 0: input.append("COMMAND%s" % i) i -= 1 processPool.enqueue(input) result = processPool.dequeue(len(input)) self.assertEqual(len(result), len(input), "Error: Wrong number of results returned.") for k in result: self.assertTrue(k in input) return def testC_MultiPool(self): """ _testMultiPool_ Run a test with multiple workers """ raise nose.SkipTest config = self.testInit.getConfiguration() config.Agent.useHeartbeat = False self.testInit.generateWorkDir(config) processPool = ProcessPool("ProcessPool_t.ProcessPoolTestWorker", totalSlaves = 3, componentDir = config.General.workDir, namespace = "WMCore_t", config = config) for i in range(100): input = [] while i > 0: input.append("COMMAND%s" % i) i -= 1 processPool.enqueue(input) result = processPool.dequeue(len(input)) self.assertEqual(len(result), len(input), "Error: Wrong number of results returned.")
class DBSBufferFileTest(unittest.TestCase): def setUp(self): """ _setUp_ Setup the database and logging connection. Try to create all of the DBSBuffer tables. Also add some dummy locations. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["WMComponent.DBS3Buffer"], useDefault=False) myThread = threading.currentThread() self.daoFactory = DAOFactory(package="WMComponent.DBS3Buffer", logger=myThread.logger, dbinterface=myThread.dbi) locationAction = self.daoFactory( classname="DBSBufferFiles.AddLocation") locationAction.execute(siteName="se1.cern.ch") locationAction.execute(siteName="se1.fnal.gov") def tearDown(self): """ _tearDown_ Drop all the DBSBuffer tables. """ self.testInit.clearDatabase() def testCreateDeleteExists(self): """ _testCreateDeleteExists_ Test the create(), delete() and exists() methods of the file class by creating and deleting a file. The exists() method will be called before and after creation and after deletion. """ testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFile.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") assert testFile.exists() == False, \ "ERROR: File exists before it was created" testFile.addRun(Run(1, *[45])) testFile.create() assert testFile.exists() > 0, \ "ERROR: File does not exist after it was created" testFile.delete() assert testFile.exists() == False, \ "ERROR: File exists after it has been deleted" return def testCreateTransaction(self): """ _testCreateTransaction_ Begin a transaction and then create a file in the database. Afterwards, rollback the transaction. Use the File class's exists() method to to verify that the file doesn't exist before it was created, exists after it was created and doesn't exist after the transaction was rolled back. """ myThread = threading.currentThread() myThread.transaction.begin() testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFile.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") assert testFile.exists() == False, \ "ERROR: File exists before it was created" testFile.addRun(Run(1, *[45])) testFile.create() assert testFile.exists() > 0, \ "ERROR: File does not exist after it was created" myThread.transaction.rollback() assert testFile.exists() == False, \ "ERROR: File exists after transaction was rolled back." return def testDeleteTransaction(self): """ _testDeleteTransaction_ Create a file and commit it to the database. Start a new transaction and delete the file. Rollback the transaction after the file has been deleted. Use the file class's exists() method to verify that the file does not exist after it has been deleted but does exist after the transaction is rolled back. """ testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFile.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") assert testFile.exists() == False, \ "ERROR: File exists before it was created" testFile.addRun(Run(1, *[45])) testFile.create() assert testFile.exists() > 0, \ "ERROR: File does not exist after it was created" myThread = threading.currentThread() myThread.transaction.begin() testFile.delete() assert testFile.exists() == False, \ "ERROR: File exists after it has been deleted" myThread.transaction.rollback() assert testFile.exists() > 0, \ "ERROR: File does not exist after transaction was rolled back." return def testGetParentLFNs(self): """ _testGetParentLFNs_ Create three files and set them to be parents of a fourth file. Check to make sure that getParentLFNs() on the child file returns the correct LFNs. """ testFileParentA = DBSBufferFile(lfn="/this/is/a/parent/lfnA", size=1024, events=20) testFileParentA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileParentA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileParentA.addRun(Run(1, *[45])) testFileParentB = DBSBufferFile(lfn="/this/is/a/parent/lfnB", size=1024, events=20) testFileParentB.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileParentB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileParentB.addRun(Run(1, *[45])) testFileParentC = DBSBufferFile(lfn="/this/is/a/parent/lfnC", size=1024, events=20) testFileParentC.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileParentC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileParentC.addRun(Run(1, *[45])) testFileParentA.create() testFileParentB.create() testFileParentC.create() testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFile.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFile.addRun(Run(1, *[45])) testFile.create() testFile.addParents([ testFileParentA["lfn"], testFileParentB["lfn"], testFileParentC["lfn"] ]) parentLFNs = testFile.getParentLFNs() assert len(parentLFNs) == 3, \ "ERROR: Child does not have the right amount of parents" goldenLFNs = [ "/this/is/a/parent/lfnA", "/this/is/a/parent/lfnB", "/this/is/a/parent/lfnC" ] for parentLFN in parentLFNs: assert parentLFN in goldenLFNs, \ "ERROR: Unknown parent lfn" goldenLFNs.remove(parentLFN) testFile.delete() testFileParentA.delete() testFileParentB.delete() testFileParentC.delete() return def testLoad(self): """ _testLoad_ Test the loading of file meta data using the ID of a file and the LFN of a file. """ checksums = {"adler32": "adler32", "cksum": "cksum", "md5": "md5"} testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10, checksums=checksums) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileA.create() testFileB = DBSBufferFile(lfn=testFileA["lfn"]) testFileB.load() testFileC = DBSBufferFile(id=testFileA["id"]) testFileC.load() assert testFileA == testFileB, \ "ERROR: File load by LFN didn't work" assert testFileA == testFileC, \ "ERROR: File load by ID didn't work" assert isinstance(testFileB["id"], int) or isinstance(testFileB["id"], long), \ "ERROR: File id is not an integer type." assert isinstance(testFileB["size"], int) or isinstance(testFileB["size"], long), \ "ERROR: File size is not an integer type." assert isinstance(testFileB["events"], int) or isinstance(testFileB["events"], long), \ "ERROR: File events is not an integer type." assert isinstance(testFileC["id"], int) or isinstance(testFileC["id"], long), \ "ERROR: File id is not an integer type." assert isinstance(testFileC["size"], int) or isinstance(testFileC["size"], long), \ "ERROR: File size is not an integer type." assert isinstance(testFileC["events"], int) or isinstance(testFileC["events"], long), \ "ERROR: File events is not an integer type." testFileA.delete() return def testFilesize(self): """ _testFilesize_ Test storing and loading the file information from dbsbuffer_file. Make sure filesize can be bigger than 32 bits """ checksums = {"adler32": "adler32", "cksum": "cksum"} testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=3221225472, events=1500000, checksums=checksums) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_7_6_0", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileA.create() testFileB = DBSBufferFile(lfn=testFileA["lfn"]) testFileB.load() self.assertEqual(testFileB["size"], 3221225472, "Error: the filesize should be 3GB") self.assertEqual(testFileB["events"], 1500000, "Error: the number of events should be 1.5M") def testAddChild(self): """ _testAddChild_ Add a child to some parent files and make sure that all the parentage information is loaded/stored correctly from the database. """ testFileParentA = DBSBufferFile(lfn="/this/is/a/parent/lfnA", size=1024, events=20) testFileParentA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileParentA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileParentA.addRun(Run(1, *[45])) testFileParentB = DBSBufferFile(lfn="/this/is/a/parent/lfnB", size=1024, events=20) testFileParentB.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileParentB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileParentB.addRun(Run(1, *[45])) testFileParentA.create() testFileParentB.create() testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileA.addRun(Run(1, *[45])) testFileA.create() testFileParentA.addChildren("/this/is/a/lfn") testFileParentB.addChildren("/this/is/a/lfn") testFileB = DBSBufferFile(id=testFileA["id"]) testFileB.load(parentage=1) goldenFiles = [testFileParentA, testFileParentB] for parentFile in testFileB["parents"]: assert parentFile in goldenFiles, \ "ERROR: Unknown parent file" goldenFiles.remove(parentFile) assert len(goldenFiles) == 0, \ "ERROR: Some parents are missing" return def testAddChildTransaction(self): """ _testAddChildTransaction_ Add a child to some parent files and make sure that all the parentage information is loaded/stored correctly from the database. Rollback the addition of one of the childs and then verify that it does in fact only have one parent. """ testFileParentA = DBSBufferFile(lfn="/this/is/a/parent/lfnA", size=1024, events=20) testFileParentA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileParentA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileParentA.addRun(Run(1, *[45])) testFileParentB = DBSBufferFile(lfn="/this/is/a/parent/lfnB", size=1024, events=20) testFileParentB.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileParentB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileParentB.addRun(Run(1, *[45])) testFileParentA.create() testFileParentB.create() testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileA.addRun(Run(1, *[45])) testFileA.create() testFileParentA.addChildren("/this/is/a/lfn") myThread = threading.currentThread() myThread.transaction.begin() testFileParentB.addChildren("/this/is/a/lfn") testFileB = DBSBufferFile(id=testFileA["id"]) testFileB.load(parentage=1) goldenFiles = [testFileParentA, testFileParentB] for parentFile in testFileB["parents"]: assert parentFile in goldenFiles, \ "ERROR: Unknown parent file" goldenFiles.remove(parentFile) assert len(goldenFiles) == 0, \ "ERROR: Some parents are missing" myThread.transaction.rollback() testFileB.load(parentage=1) goldenFiles = [testFileParentA] for parentFile in testFileB["parents"]: assert parentFile in goldenFiles, \ "ERROR: Unknown parent file" goldenFiles.remove(parentFile) assert len(goldenFiles) == 0, \ "ERROR: Some parents are missing" return def testSetLocation(self): """ _testSetLocation_ Create a file and add a couple locations. Load the file from the database to make sure that the locations were set correctly. """ testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileA.addRun(Run(1, *[45])) testFileA.create() testFileA.setLocation(["se1.fnal.gov", "se1.cern.ch"]) testFileA.setLocation(["bunkse1.fnal.gov", "bunkse1.cern.ch"], immediateSave=False) testFileB = DBSBufferFile(id=testFileA["id"]) testFileB.load() goldenLocations = ["se1.fnal.gov", "se1.cern.ch"] for location in testFileB["locations"]: assert location in goldenLocations, \ "ERROR: Unknown file location" goldenLocations.remove(location) assert len(goldenLocations) == 0, \ "ERROR: Some locations are missing" return def testSetLocationTransaction(self): """ _testSetLocationTransaction_ Create a file at specific locations and commit everything to the database. Reload the file from the database and verify that the locations are correct. Rollback the database transaction and once again reload the file. Verify that the original locations are back. """ testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileA.addRun(Run(1, *[45])) testFileA.create() testFileA.setLocation(["se1.fnal.gov"]) myThread = threading.currentThread() myThread.transaction.begin() testFileA.setLocation(["se1.cern.ch"]) testFileA.setLocation(["bunkse1.fnal.gov", "bunkse1.cern.ch"], immediateSave=False) testFileB = DBSBufferFile(id=testFileA["id"]) testFileB.load() goldenLocations = ["se1.fnal.gov", "se1.cern.ch"] for location in testFileB["locations"]: assert location in goldenLocations, \ "ERROR: Unknown file location" goldenLocations.remove(location) assert len(goldenLocations) == 0, \ "ERROR: Some locations are missing" myThread.transaction.rollback() testFileB.load() goldenLocations = ["se1.fnal.gov"] for location in testFileB["locations"]: assert location in goldenLocations, \ "ERROR: Unknown file location" goldenLocations.remove(location) assert len(goldenLocations) == 0, \ "ERROR: Some locations are missing" return def testLocationsConstructor(self): """ _testLocationsConstructor_ Test to make sure that locations passed into the File() constructor are loaded from and save to the database correctly. Also test to make sure that the class behaves well when the location is passed in as a single string instead of a set. """ testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10, locations=set(["se1.fnal.gov"])) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileA.addRun(Run(1, *[45])) testFileA.create() testFileB = DBSBufferFile(lfn="/this/is/a/lfn2", size=1024, events=10, locations="se1.fnal.gov") testFileB.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileB.addRun(Run(1, *[45])) testFileB.create() testFileC = DBSBufferFile(id=testFileA["id"]) testFileC.load() goldenLocations = ["se1.fnal.gov"] for location in testFileC["locations"]: assert location in goldenLocations, \ "ERROR: Unknown file location" goldenLocations.remove(location) assert len(goldenLocations) == 0, \ "ERROR: Some locations are missing" testFileC = DBSBufferFile(id=testFileB["id"]) testFileC.load() goldenLocations = ["se1.fnal.gov"] for location in testFileC["locations"]: assert location in goldenLocations, \ "ERROR: Unknown file location" goldenLocations.remove(location) assert len(goldenLocations) == 0, \ "ERROR: Some locations are missing" return def testAddRunSet(self): """ _testAddRunSet_ Test the ability to add run and lumi information to a file. """ testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10, locations="se1.fnal.gov") testFile.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFile.create() runSet = set() runSet.add(Run(1, *[45])) runSet.add(Run(2, *[67, 68])) testFile.addRunSet(runSet) assert (runSet - testFile["runs"]) == set(), \ "Error: addRunSet is not updating set correctly" def testSetBlock(self): """ _testSetBlock_ Verify that the [Set|Get]Block DAOs work correctly. """ myThread = threading.currentThread() dataset = "/Cosmics/CRUZET09-PromptReco-v1/RECO" uploadFactory = DAOFactory(package="WMComponent.DBS3Buffer", logger=myThread.logger, dbinterface=myThread.dbi) datasetAction = uploadFactory(classname="NewDataset") createAction = uploadFactory(classname="CreateBlocks") datasetAction.execute(datasetPath=dataset) newBlock = DBSBufferBlock(name="someblockname", location="se1.cern.ch", datasetpath=None) newBlock.setDataset(dataset, 'data', 'VALID') createAction.execute(blocks=[newBlock]) setBlockAction = self.daoFactory(classname="DBSBufferFiles.SetBlock") getBlockAction = self.daoFactory(classname="DBSBufferFiles.GetBlock") testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10, locations="se1.fnal.gov") testFile.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFile.setDatasetPath(dataset) testFile.create() setBlockAction.execute(lfn=testFile["lfn"], blockName="someblockname") blockName = getBlockAction.execute(lfn=testFile["lfn"]) assert blockName[0][0] == "someblockname", \ "Error: Incorrect block returned: %s" % blockName[0][0] return def testCountFilesDAO(self): """ _testCountFilesDAO_ Verify that the CountFiles DAO object functions correctly. """ testFileA = DBSBufferFile(lfn="/this/is/a/lfnA", size=1024, events=10, locations="se1.fnal.gov") testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileA.create() testFileB = DBSBufferFile(lfn="/this/is/a/lfnB", size=1024, events=10, locations="se1.fnal.gov") testFileB.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileB.create() testFileC = DBSBufferFile(lfn="/this/is/a/lfnC", size=1024, events=10, locations="se1.fnal.gov") testFileC.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileC.create() countAction = self.daoFactory(classname="CountFiles") assert countAction.execute() == 3, \ "Error: Wrong number of files counted in DBS Buffer." return def testAddParents(self): """ _testAddParents_ Verify that the addParents() method works correctly even if the parents do not already exist in the database. """ myThread = threading.currentThread() testFile = DBSBufferFile(lfn="/this/is/a/lfnA", size=1024, events=10, locations="se1.fnal.gov") testFile.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFile.create() testParent = DBSBufferFile(lfn="/this/is/a/lfnB", size=1024, events=10, locations="se1.fnal.gov") testParent.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testParent.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RAW") testParent.create() goldenLFNs = ["lfn1", "lfn2", "lfn3", "/this/is/a/lfnB"] testFile.addParents(goldenLFNs) verifyFile = DBSBufferFile(id=testFile["id"]) verifyFile.load(parentage=1) parentLFNs = verifyFile.getParentLFNs() for parentLFN in parentLFNs: self.assertTrue(parentLFN in goldenLFNs, "Error: unknown lfn %s" % parentLFN) goldenLFNs.remove(parentLFN) self.assertEqual(len(goldenLFNs), 0, "Error: missing LFNs...") # Check that the bogus dataset is listed as inDBS sqlCommand = """SELECT in_dbs FROM dbsbuffer_algo_dataset_assoc das INNER JOIN dbsbuffer_dataset ds ON das.dataset_id = ds.id WHERE ds.path = 'bogus'""" status = myThread.dbi.processData(sqlCommand)[0].fetchall()[0][0] self.assertEqual(status, 1) # Now make sure the dummy files are listed as being in DBS sqlCommand = """SELECT status FROM dbsbuffer_file df INNER JOIN dbsbuffer_algo_dataset_assoc das ON das.id = df.dataset_algo INNER JOIN dbsbuffer_dataset ds ON das.dataset_id = ds.id WHERE ds.path = '/bogus/dataset/path' """ status = myThread.dbi.processData(sqlCommand)[0].fetchall() for entry in status: self.assertEqual(entry, ('AlreadyInDBS', )) return def testGetChildrenDAO(self): """ _testGetChildrenDAO_ Verify that the GetChildren DAO correctly returns the LFNs of a file's children. """ testFileChildA = DBSBufferFile(lfn="/this/is/a/child/lfnA", size=1024, events=20) testFileChildA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileChildA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileChildB = DBSBufferFile(lfn="/this/is/a/child/lfnB", size=1024, events=20) testFileChildB.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileChildB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileChildC = DBSBufferFile(lfn="/this/is/a/child/lfnC", size=1024, events=20) testFileChildC.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileChildC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileChildA.create() testFileChildB.create() testFileChildC.create() testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFile.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFile.create() testFileChildA.addParents([testFile["lfn"]]) testFileChildB.addParents([testFile["lfn"]]) testFileChildC.addParents([testFile["lfn"]]) getChildrenAction = self.daoFactory( classname="DBSBufferFiles.GetChildren") childLFNs = getChildrenAction.execute(testFile["lfn"]) assert len(childLFNs) == 3, \ "ERROR: Parent does not have the right amount of children." goldenLFNs = [ "/this/is/a/child/lfnA", "/this/is/a/child/lfnB", "/this/is/a/child/lfnC" ] for childLFN in childLFNs: assert childLFN in goldenLFNs, \ "ERROR: Unknown child lfn" goldenLFNs.remove(childLFN) return def testGetParentStatusDAO(self): """ _testGetParentStatusDAO_ Verify that the GetParentStatus DAO correctly returns the status of a file's children. """ testFileChild = DBSBufferFile(lfn="/this/is/a/child/lfnA", size=1024, events=20) testFileChild.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileChild.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileChild.create() testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFile.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFile.create() testFileChild.addParents([testFile["lfn"]]) getStatusAction = self.daoFactory( classname="DBSBufferFiles.GetParentStatus") parentStatus = getStatusAction.execute(testFileChild["lfn"]) assert len(parentStatus) == 1, \ "ERROR: Wrong number of statuses returned." assert parentStatus[0] == "NOTUPLOADED", \ "ERROR: Wrong status returned." return def testSetLocationByLFN(self): """ _testSetLocationByLFN_ """ testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileA.addRun(Run(1, *[45])) testFileA.create() setLocationAction = self.daoFactory( classname="DBSBufferFiles.SetLocationByLFN") setLocationAction.execute(binds={ 'lfn': "/this/is/a/lfn", 'pnn': 'se1.cern.ch' }) testFileB = DBSBufferFile(id=testFileA["id"]) testFileB.load() self.assertEqual(testFileB['locations'], set(['se1.cern.ch'])) return def testAddCKSumByLFN(self): """ _testAddCKSumByLFN_ """ testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileA.create() setCksumAction = self.daoFactory( classname="DBSBufferFiles.AddChecksumByLFN") binds = [{ 'lfn': "/this/is/a/lfn", 'cktype': 'adler32', 'cksum': 201 }, { 'lfn': "/this/is/a/lfn", 'cktype': 'cksum', 'cksum': 101 }] setCksumAction.execute(bulkList=binds) testFileB = DBSBufferFile(id=testFileA["id"]) testFileB.load() self.assertEqual(testFileB['checksums'], { 'adler32': '201', 'cksum': '101' }) return def testBulkLoad(self): """ _testBulkLoad_ Can we load in bulk? """ addToBuffer = DBSBufferUtil() testFileChildA = DBSBufferFile(lfn="/this/is/a/child/lfnA", size=1024, events=20) testFileChildA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileChildA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileChildB = DBSBufferFile(lfn="/this/is/a/child/lfnB", size=1024, events=20) testFileChildB.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileChildB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileChildC = DBSBufferFile(lfn="/this/is/a/child/lfnC", size=1024, events=20) testFileChildC.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileChildC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileChildA.create() testFileChildB.create() testFileChildC.create() testFileChildA.setLocation(["se1.fnal.gov", "se1.cern.ch"]) testFileChildB.setLocation(["se1.fnal.gov", "se1.cern.ch"]) testFileChildC.setLocation(["se1.fnal.gov", "se1.cern.ch"]) runSet = set() runSet.add(Run(1, *[45])) runSet.add(Run(2, *[67, 68])) testFileChildA.addRunSet(runSet) testFileChildB.addRunSet(runSet) testFileChildC.addRunSet(runSet) testFileChildA.save() testFileChildB.save() testFileChildC.save() setCksumAction = self.daoFactory( classname="DBSBufferFiles.AddChecksumByLFN") binds = [{ 'lfn': "/this/is/a/child/lfnA", 'cktype': 'adler32', 'cksum': 201 }, { 'lfn': "/this/is/a/child/lfnA", 'cktype': 'cksum', 'cksum': 101 }, { 'lfn': "/this/is/a/child/lfnB", 'cktype': 'adler32', 'cksum': 201 }, { 'lfn': "/this/is/a/child/lfnB", 'cktype': 'cksum', 'cksum': 101 }, { 'lfn': "/this/is/a/child/lfnC", 'cktype': 'adler32', 'cksum': 201 }, { 'lfn': "/this/is/a/child/lfnC", 'cktype': 'cksum', 'cksum': 101 }] setCksumAction.execute(bulkList=binds) testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFile.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFile.create() testFileChildA.addParents([testFile["lfn"]]) testFileChildB.addParents([testFile["lfn"]]) testFileChildC.addParents([testFile["lfn"]]) binds = [{ 'id': testFileChildA.exists() }, { 'id': testFileChildB.exists() }, { 'id': testFileChildC.exists() }] listOfFiles = addToBuffer.loadDBSBufferFilesBulk(fileObjs=binds) # print listOfFiles compareList = [ 'locations', 'psetHash', 'configContent', 'appName', 'appVer', 'appFam', 'events', 'datasetPath', 'runs' ] for f in listOfFiles: self.assertTrue( f['lfn'] in [ "/this/is/a/child/lfnA", "/this/is/a/child/lfnB", "/this/is/a/child/lfnC" ], "Unknown file in loaded results") self.assertEqual(f['checksums'], { 'adler32': '201', 'cksum': '101' }) for parent in f['parents']: self.assertEqual(parent['lfn'], testFile['lfn']) for key in compareList: self.assertEqual(f[key], testFileChildA[key]) def testProperties(self): """ _testProperties_ Test added tags that use DBSBuffer to transfer from workload to DBS """ testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10) testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8", appFam="RECO", psetHash="GIBBERISH", configContent="MOREGIBBERISH") testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO") testFileA.setValidStatus(validStatus="VALID") testFileA.setProcessingVer(ver="ProcVer") testFileA.setAcquisitionEra(era="AcqEra") testFileA.setGlobalTag(globalTag="GlobalTag") testFileA.setDatasetParent(datasetParent="Parent") testFileA.create() return
class RepackTest(unittest.TestCase): """ _RepackTest_ Test for Repack job splitter """ def setUp(self): """ _setUp_ """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules=["T0.WMBS"]) self.splitterFactory = SplitterFactory(package="T0.JobSplitting") myThread = threading.currentThread() daoFactory = DAOFactory(package="T0.WMBS", logger=logging, dbinterface=myThread.dbi) myThread.dbi.processData("""INSERT INTO wmbs_location (id, site_name, state) VALUES (1, 'SomeSite', 1) """, transaction=False) myThread.dbi.processData("""INSERT INTO wmbs_location_senames (location, se_name) VALUES (1, 'SomeSE') """, transaction=False) myThread.dbi.processData("""INSERT INTO wmbs_location_senames (location, se_name) VALUES (1, 'SomeSE2') """, transaction=False) insertRunDAO = daoFactory(classname="RunConfig.InsertRun") insertRunDAO.execute(binds={ 'RUN': 1, 'TIME': int(time.time()), 'HLTKEY': "someHLTKey" }, transaction=False) insertLumiDAO = daoFactory(classname="RunConfig.InsertLumiSection") for lumi in [1, 2, 3, 4]: insertLumiDAO.execute(binds={ 'RUN': 1, 'LUMI': lumi }, transaction=False) insertStreamDAO = daoFactory(classname="RunConfig.InsertStream") insertStreamDAO.execute(binds={'STREAM': "A"}, transaction=False) insertStreamFilesetDAO = daoFactory( classname="RunConfig.InsertStreamFileset") insertStreamFilesetDAO.execute(1, "A", "TestFileset1") self.fileset1 = Fileset(name="TestFileset1") self.fileset1.load() workflow1 = Workflow(spec="spec.xml", owner="hufnagel", name="TestWorkflow1", task="Test") workflow1.create() self.subscription1 = Subscription(fileset=self.fileset1, workflow=workflow1, split_algo="Repack", type="Repack") self.subscription1.create() # keep for later self.insertClosedLumiDAO = daoFactory( classname="RunLumiCloseout.InsertClosedLumi") self.currentTime = int(time.time()) # default split parameters self.splitArgs = {} self.splitArgs['maxSizeSingleLumi'] = 20 * 1024 * 1024 * 1024 self.splitArgs['maxSizeMultiLumi'] = 10 * 1024 * 1024 * 1024 self.splitArgs['maxInputEvents'] = 500000 self.splitArgs['maxInputFiles'] = 1000 return def tearDown(self): """ _tearDown_ """ self.testInit.clearDatabase() return def getNumActiveSplitLumis(self): """ _getNumActiveSplitLumis_ helper function that counts the number of active split lumis """ myThread = threading.currentThread() results = myThread.dbi.processData("""SELECT COUNT(*) FROM lumi_section_split_active """, transaction=False)[0].fetchall() return results[0][0] def test00(self): """ _test00_ Test that the job name prefix feature works Test multi lumi size threshold Multi lumi input """ mySplitArgs = self.splitArgs.copy() for lumi in [1, 2, 3, 4]: filecount = 2 for i in range(filecount): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomeSE", immediateSave=False) newFile.create() self.fileset1.addFile(newFile) self.fileset1.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1) mySplitArgs['maxSizeMultiLumi'] = self.splitArgs['maxSizeMultiLumi'] jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxSizeMultiLumi'] = 5000 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create a single job") job = jobGroups[0].jobs[0] self.assertTrue(job['name'].startswith("Repack-"), "ERROR: Job has wrong name") self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") self.fileset1.markOpen(False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create a single job") job = jobGroups[0].jobs[0] self.assertTrue(job['name'].startswith("Repack-"), "ERROR: Job has wrong name") self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") self.assertEqual(self.getNumActiveSplitLumis(), 0, "ERROR: Split lumis were created") return def test01(self): """ _test01_ Test multi lumi event threshold Multi lumi input """ mySplitArgs = self.splitArgs.copy() insertClosedLumiBinds = [] for lumi in [1, 2, 3, 4]: filecount = 2 for i in range(filecount): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomeSE", immediateSave=False) newFile.create() self.fileset1.addFile(newFile) insertClosedLumiBinds.append({ 'RUN': 1, 'LUMI': lumi, 'STREAM': "A", 'FILECOUNT': filecount, 'INSERT_TIME': self.currentTime, 'CLOSE_TIME': self.currentTime }) self.fileset1.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1) self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds, transaction=False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxInputEvents'] = 500 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create a single job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") self.fileset1.markOpen(False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create a single job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") self.assertEqual(self.getNumActiveSplitLumis(), 0, "ERROR: Split lumis were created") return def test02(self): """ _test02_ Test single lumi size threshold Single lumi input """ mySplitArgs = self.splitArgs.copy() insertClosedLumiBinds = [] for lumi in [1]: filecount = 8 for i in range(filecount): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomeSE", immediateSave=False) newFile.create() self.fileset1.addFile(newFile) insertClosedLumiBinds.append({ 'RUN': 1, 'LUMI': lumi, 'STREAM': "A", 'FILECOUNT': filecount, 'INSERT_TIME': self.currentTime, 'CLOSE_TIME': self.currentTime }) self.fileset1.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1) self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds, transaction=False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxSizeSingleLumi'] = 6500 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 2, "ERROR: JobFactory didn't create two jobs") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 6, "ERROR: Job does not process 6 files") job = jobGroups[0].jobs[1] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") self.assertEqual(self.getNumActiveSplitLumis(), 1, "ERROR: Split lumis were not created") return def test03(self): """ _test03_ Test single lumi event threshold Single lumi input """ mySplitArgs = self.splitArgs.copy() insertClosedLumiBinds = [] for lumi in [1]: filecount = 8 for i in range(filecount): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomeSE", immediateSave=False) newFile.create() self.fileset1.addFile(newFile) insertClosedLumiBinds.append({ 'RUN': 1, 'LUMI': lumi, 'STREAM': "A", 'FILECOUNT': filecount, 'INSERT_TIME': self.currentTime, 'CLOSE_TIME': self.currentTime }) self.fileset1.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1) self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds, transaction=False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxInputEvents'] = 650 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 2, "ERROR: JobFactory didn't create two jobs") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 6, "ERROR: Job does not process 6 files") job = jobGroups[0].jobs[1] self.assertEqual(len(job.getFiles()), 2, "ERROR: Job does not process 2 files") self.assertEqual(self.getNumActiveSplitLumis(), 1, "ERROR: Split lumis were not created") return def test04(self): """ _test04_ Test streamer count threshold (only multi lumi) Multi lumi input """ mySplitArgs = self.splitArgs.copy() insertClosedLumiBinds = [] for lumi in [1, 2, 3, 4]: filecount = 2 for i in range(filecount): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomeSE", immediateSave=False) newFile.create() self.fileset1.addFile(newFile) insertClosedLumiBinds.append({ 'RUN': 1, 'LUMI': lumi, 'STREAM': "A", 'FILECOUNT': filecount, 'INSERT_TIME': self.currentTime, 'CLOSE_TIME': self.currentTime }) self.fileset1.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1) self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds, transaction=False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") mySplitArgs['maxInputFiles'] = 5 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create a single job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") self.fileset1.markOpen(False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create a single job") job = jobGroups[0].jobs[0] self.assertEqual(len(job.getFiles()), 4, "ERROR: Job does not process 4 files") self.assertEqual(self.getNumActiveSplitLumis(), 0, "ERROR: Split lumis were created") return def test05(self): """ _test05_ Test repacking of multiple lumis with holes in the lumi sequence Multi lumi input """ mySplitArgs = self.splitArgs.copy() insertClosedLumiBinds = [] for lumi in [1, 2, 4]: filecount = 2 for i in range(filecount): newFile = File(makeUUID(), size=1000, events=100) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomeSE", immediateSave=False) newFile.create() self.fileset1.addFile(newFile) insertClosedLumiBinds.append({ 'RUN': 1, 'LUMI': lumi, 'STREAM': "A", 'FILECOUNT': filecount, 'INSERT_TIME': self.currentTime, 'CLOSE_TIME': self.currentTime }) self.fileset1.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1) self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds, transaction=False) mySplitArgs['maxInputFiles'] = 5 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 0, "ERROR: JobFactory should have returned no JobGroup") self.insertClosedLumiDAO.execute(binds={ 'RUN': 1, 'LUMI': 3, 'STREAM': "A", 'FILECOUNT': 0, 'INSERT_TIME': self.currentTime, 'CLOSE_TIME': self.currentTime }, transaction=False) jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 1, "ERROR: JobFactory didn't create one job") self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4, "ERROR: first job does not process 4 files") return def test06(self): """ _test06_ Test repacking of 3 lumis 2 small lumis (single job), followed by a big one (multiple jobs) files for lumi 1 and 2 are below multi-lumi thresholds files for lumi 3 are above single-lumi threshold """ mySplitArgs = self.splitArgs.copy() insertClosedLumiBinds = [] for lumi in [1, 2, 3]: filecount = 2 for i in range(filecount): if lumi == 3: nevents = 500 else: nevents = 100 newFile = File(makeUUID(), size=1000, events=nevents) newFile.addRun(Run(1, *[lumi])) newFile.setLocation("SomeSE", immediateSave=False) newFile.create() self.fileset1.addFile(newFile) insertClosedLumiBinds.append({ 'RUN': 1, 'LUMI': lumi, 'STREAM': "A", 'FILECOUNT': filecount, 'INSERT_TIME': self.currentTime, 'CLOSE_TIME': self.currentTime }) self.fileset1.commit() jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1) self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds, transaction=False) mySplitArgs['maxInputEvents'] = 900 jobGroups = jobFactory(**mySplitArgs) self.assertEqual(len(jobGroups), 1, "ERROR: JobFactory didn't return one JobGroup") self.assertEqual(len(jobGroups[0].jobs), 3, "ERROR: JobFactory didn't create three jobs") self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4, "ERROR: first job does not process 4 files") self.assertEqual(len(jobGroups[0].jobs[1].getFiles()), 1, "ERROR: second job does not process 1 file") self.assertEqual(len(jobGroups[0].jobs[2].getFiles()), 1, "ERROR: third job does not process 1 file") return
class ReportIntegrationTest(unittest.TestCase): """ _ReportIntegrationTest_ """ def setUp(self): """ _setUp_ Setup the database and WMBS for the test. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer", "WMCore.WMBS"], useDefault = False) myThread = threading.currentThread() self.daofactory = DAOFactory(package = "WMCore.WMBS", logger = myThread.logger, dbinterface = myThread.dbi) self.dbsfactory = DAOFactory(package = "WMComponent.DBS3Buffer", logger = myThread.logger, dbinterface = myThread.dbi) locationAction = self.daofactory(classname = "Locations.New") locationAction.execute(siteName = "site1", pnn = "T1_US_FNAL_Disk") inputFile = File(lfn = "/path/to/some/lfn", size = 10, events = 10, locations = "T1_US_FNAL_Disk") inputFile.create() inputFileset = Fileset(name = "InputFileset") inputFileset.create() inputFileset.addFile(inputFile) inputFileset.commit() unmergedFileset = Fileset(name = "UnmergedFileset") unmergedFileset.create() mergedFileset = Fileset(name = "MergedFileset") mergedFileset.create() procWorkflow = Workflow(spec = "wf001.xml", owner = "Steve", name = "TestWF", task = "/TestWF/None") procWorkflow.create() procWorkflow.addOutput("outputRECORECO", unmergedFileset) mergeWorkflow = Workflow(spec = "wf002.xml", owner = "Steve", name = "MergeWF", task = "/MergeWF/None") mergeWorkflow.create() mergeWorkflow.addOutput("Merged", mergedFileset) insertWorkflow = self.dbsfactory(classname = "InsertWorkflow") insertWorkflow.execute("TestWF", "/TestWF/None", 0, 0, 0, 0) insertWorkflow.execute("MergeWF", "/MergeWF/None", 0, 0, 0, 0) self.procSubscription = Subscription(fileset = inputFileset, workflow = procWorkflow, split_algo = "FileBased", type = "Processing") self.procSubscription.create() self.procSubscription.acquireFiles() self.mergeSubscription = Subscription(fileset = unmergedFileset, workflow = mergeWorkflow, split_algo = "WMBSMergeBySize", type = "Merge") self.mergeSubscription.create() self.procJobGroup = JobGroup(subscription = self.procSubscription) self.procJobGroup.create() self.mergeJobGroup = JobGroup(subscription = self.mergeSubscription) self.mergeJobGroup.create() self.testJob = Job(name = "testJob", files = [inputFile]) self.testJob.create(group = self.procJobGroup) self.testJob["state"] = "complete" myThread = threading.currentThread() self.daofactory = DAOFactory(package = "WMCore.WMBS", logger = myThread.logger, dbinterface = myThread.dbi) self.stateChangeAction = self.daofactory(classname = "Jobs.ChangeState") self.setFWJRAction = self.daofactory(classname = "Jobs.SetFWJRPath") self.getJobTypeAction = self.daofactory(classname = "Jobs.GetType") locationAction = self.daofactory(classname = "Locations.New") locationAction.execute(siteName = "cmssrm.fnal.gov") self.stateChangeAction.execute(jobs = [self.testJob]) self.tempDir = tempfile.mkdtemp() return def tearDown(self): """ _tearDown_ Clear out the database and the pickled report file. """ self.testInit.clearDatabase() try: os.remove(os.path.join(self.tempDir, "ProcReport.pkl")) os.remove(os.path.join(self.tempDir, "MergeReport.pkl")) except Exception as ex: pass try: os.rmdir(self.tempDir) except Exception as ex: pass return def createConfig(self, workerThreads): """ _createConfig_ Create a config for the JobAccountant with the given number of worker threads. This config needs to include information for connecting to the database as the component will create it's own database connections. These parameters are still pulled from the environment. """ config = self.testInit.getConfiguration() self.testInit.generateWorkDir(config) config.section_("JobStateMachine") config.JobStateMachine.couchurl = os.getenv("COUCHURL") config.JobStateMachine.couchDBName = "report_integration_t" config.JobStateMachine.jobSummaryDBName = "report_integration_wmagent_summary_t" config.component_("JobAccountant") config.JobAccountant.pollInterval = 60 config.JobAccountant.workerThreads = workerThreads config.JobAccountant.componentDir = os.getcwd() config.JobAccountant.logLevel = 'SQLDEBUG' config.component_("TaskArchiver") config.TaskArchiver.localWMStatsURL = "%s/%s" % (config.JobStateMachine.couchurl, config.JobStateMachine.jobSummaryDBName) return config def verifyJobSuccess(self, jobID): """ _verifyJobSuccess_ Verify that the metadata for a successful job is correct. This will check the outcome, retry count and state. """ testJob = Job(id = jobID) testJob.load() assert testJob["state"] == "success", \ "Error: test job in wrong state: %s" % testJob["state"] assert testJob["retry_count"] == 0, \ "Error: test job has wrong retry count: %s" % testJob["retry_count"] assert testJob["outcome"] == "success", \ "Error: test job has wrong outcome: %s" % testJob["outcome"] return def verifyFileMetaData(self, jobID, fwkJobReportFiles): """ _verifyFileMetaData_ Verify that all the files that were output by a job made it into WMBS correctly. Compare the contents of WMBS to the files in the frameworks job report. Note that fwkJobReportFiles is a list of DataStructs File objects. """ testJob = Job(id = jobID) testJob.loadData() inputLFNs = [] for inputFile in testJob["input_files"]: inputLFNs.append(inputFile["lfn"]) for fwkJobReportFile in fwkJobReportFiles: outputFile = File(lfn = fwkJobReportFile["lfn"]) outputFile.loadData(parentage = 1) assert outputFile["events"] == int(fwkJobReportFile["events"]), \ "Error: Output file has wrong events: %s, %s" % \ (outputFile["events"], fwkJobReportFile["events"]) assert outputFile["size"] == int(fwkJobReportFile["size"]), \ "Error: Output file has wrong size: %s, %s" % \ (outputFile["size"], fwkJobReportFile["size"]) for ckType in fwkJobReportFile["checksums"]: assert ckType in outputFile["checksums"], \ "Error: Output file is missing checksums: %s" % ckType assert outputFile["checksums"][ckType] == fwkJobReportFile["checksums"][ckType], \ "Error: Checksums don't match." assert len(fwkJobReportFile["checksums"]) == \ len(outputFile["checksums"]), \ "Error: Wrong number of checksums." jobType = self.getJobTypeAction.execute(jobID = jobID) if jobType == "Merge": assert str(outputFile["merged"]) == "True", \ "Error: Merge jobs should output merged files." else: assert outputFile["merged"] == fwkJobReportFile["merged"], \ "Error: Output file merged output is wrong: %s, %s" % \ (outputFile["merged"], fwkJobReportFile["merged"]) assert len(outputFile["locations"]) == 1, \ "Error: outputfile should have one location: %s" % outputFile["locations"] assert list(outputFile["locations"])[0] == list(fwkJobReportFile["locations"])[0], \ "Error: wrong location for file." assert len(outputFile["parents"]) == len(inputLFNs), \ "Error: Output file has wrong number of parents." for outputParent in outputFile["parents"]: assert outputParent["lfn"] in inputLFNs, \ "Error: Unknown parent file: %s" % outputParent["lfn"] fwjrRuns = {} for run in fwkJobReportFile["runs"]: fwjrRuns[run.run] = run.lumis for run in outputFile["runs"]: assert run.run in fwjrRuns, \ "Error: Extra run in output: %s" % run.run for lumi in run: assert lumi in fwjrRuns[run.run], \ "Error: Extra lumi: %s" % lumi fwjrRuns[run.run].remove(lumi) if len(fwjrRuns[run.run]) == 0: del fwjrRuns[run.run] assert len(fwjrRuns) == 0, \ "Error: Missing runs, lumis: %s" % fwjrRuns testJobGroup = JobGroup(id = testJob["jobgroup"]) testJobGroup.loadData() jobGroupFileset = testJobGroup.output jobGroupFileset.loadData() assert outputFile["id"] in jobGroupFileset.getFiles(type = "id"), \ "Error: output file not in jobgroup fileset." if testJob["mask"]["FirstEvent"] == None: assert outputFile["first_event"] == 0, \ "Error: first event not set correctly: 0, %s" % \ outputFile["first_event"] else: assert testJob["mask"]["FirstEvent"] == outputFile["first_event"], \ "Error: last event not set correctly: %s, %s" % \ (testJob["mask"]["FirstEvent"], outputFile["first_event"]) return def testReportHandling(self): """ _testReportHandling_ Verify that we're able to parse a CMSSW report, convert it to a Report() style report, pickle it and then have the accountant process it. """ self.procPath = os.path.join(WMCore.WMBase.getTestBase(), "WMCore_t/FwkJobReport_t/CMSSWProcessingReport.xml") myReport = Report("cmsRun1") myReport.parse(self.procPath) # Fake some metadata that should be added by the stageout scripts. for fileRef in myReport.getAllFileRefsFromStep("cmsRun1"): fileRef.size = 1024 fileRef.location = "cmssrm.fnal.gov" fwjrPath = os.path.join(self.tempDir, "ProcReport.pkl") cmsRunStep = myReport.retrieveStep("cmsRun1") cmsRunStep.status = 0 myReport.setTaskName('/TestWF/None') myReport.persist(fwjrPath) self.setFWJRAction.execute(jobID = self.testJob["id"], fwjrPath = fwjrPath) pFile = DBSBufferFile(lfn = "/path/to/some/lfn", size = 600000, events = 60000) pFile.setAlgorithm(appName = "cmsRun", appVer = "UNKNOWN", appFam = "RECO", psetHash = "GIBBERISH", configContent = "MOREGIBBERISH") pFile.setDatasetPath("/bogus/dataset/path") #pFile.addRun(Run(1, *[45])) pFile.create() config = self.createConfig(workerThreads = 1) accountant = JobAccountantPoller(config) accountant.setup() accountant.algorithm() self.verifyJobSuccess(self.testJob["id"]) self.verifyFileMetaData(self.testJob["id"], myReport.getAllFilesFromStep("cmsRun1")) inputFile = File(lfn = "/store/backfill/2/unmerged/WMAgentCommissioining10/MinimumBias/RECO/rereco_GR09_R_34X_V5_All_v1/0000/outputRECORECO.root") inputFile.load() self.testMergeJob = Job(name = "testMergeJob", files = [inputFile]) self.testMergeJob.create(group = self.mergeJobGroup) self.testMergeJob["state"] = "complete" self.stateChangeAction.execute(jobs = [self.testMergeJob]) self.mergePath = os.path.join(WMCore.WMBase.getTestBase(), "WMCore_t/FwkJobReport_t/CMSSWMergeReport.xml") myReport = Report("mergeReco") myReport.parse(self.mergePath) # Fake some metadata that should be added by the stageout scripts. for fileRef in myReport.getAllFileRefsFromStep("mergeReco"): fileRef.size = 1024 fileRef.location = "cmssrm.fnal.gov" fileRef.dataset = {"applicationName": "cmsRun", "applicationVersion": "CMSSW_3_4_2_patch1", "primaryDataset": "MinimumBias", "processedDataset": "Rereco-v1", "dataTier": "RECO"} fwjrPath = os.path.join(self.tempDir, "MergeReport.pkl") myReport.setTaskName('/MergeWF/None') cmsRunStep = myReport.retrieveStep("mergeReco") cmsRunStep.status = 0 myReport.persist(fwjrPath) self.setFWJRAction.execute(jobID = self.testMergeJob["id"], fwjrPath = fwjrPath) accountant.algorithm() self.verifyJobSuccess(self.testMergeJob["id"]) self.verifyFileMetaData(self.testMergeJob["id"], myReport.getAllFilesFromStep("mergeReco")) return