def injectTaskIntoWMBS(specUrl, workflowName, task, inputFileset, indent=0): """ _injectTaskIntoWMBS_ """ print "%sinjecting %s" % (doIndent(indent), task.getPathName()) print "%s input fileset: %s" % (doIndent(indent), inputFileset.name) myWorkflow = Workflow(spec=specUrl, owner=arguments['Requestor'], name=workflowName, task=task.getPathName()) myWorkflow.create() mySubscription = Subscription(fileset=inputFileset, workflow=myWorkflow, split_algo=task.jobSplittingAlgorithm(), type=task.taskType()) mySubscription.create() outputModules = task.getOutputModulesForStep(task.getTopStepName()) for outputModuleName in outputModules.listSections_(): print "%s configuring output module: %s" % (doIndent(indent), outputModuleName) if task.taskType() == "Merge": outputFilesetName = "%s/merged-%s" % (task.getPathName(), outputModuleName) else: outputFilesetName = "%s/unmerged-%s" % (task.getPathName(), outputModuleName) print "%s output fileset: %s" % (doIndent(indent), outputFilesetName) outputFileset = Fileset(name=outputFilesetName) outputFileset.create() myWorkflow.addOutput(outputModuleName, outputFileset) # See if any other steps run over this output. print "%s searching for child tasks..." % (doIndent(indent)) for childTask in task.childTaskIterator(): if childTask.data.input.outputModule == outputModuleName: injectTaskIntoWMBS(specUrl, workflowName, childTask, outputFileset, indent + 4)
def injectTaskIntoWMBS(specUrl, workflowName, task, inputFileset, indent = 0): """ _injectTaskIntoWMBS_ """ print "%sinjecting %s" % (doIndent(indent), task.getPathName()) print "%s input fileset: %s" % (doIndent(indent), inputFileset.name) myWorkflow = Workflow(spec = specUrl, owner = arguments['Requestor'], name = workflowName, task = task.getPathName()) myWorkflow.create() mySubscription = Subscription(fileset = inputFileset, workflow = myWorkflow, split_algo = task.jobSplittingAlgorithm(), type = task.taskType()) mySubscription.create() outputModules = task.getOutputModulesForStep(task.getTopStepName()) for outputModuleName in outputModules.listSections_(): print "%s configuring output module: %s" % (doIndent(indent), outputModuleName) if task.taskType() == "Merge": outputFilesetName = "%s/merged-%s" % (task.getPathName(), outputModuleName) else: outputFilesetName = "%s/unmerged-%s" % (task.getPathName(), outputModuleName) print "%s output fileset: %s" % (doIndent(indent), outputFilesetName) outputFileset = Fileset(name = outputFilesetName) outputFileset.create() myWorkflow.addOutput(outputModuleName, outputFileset) # See if any other steps run over this output. print "%s searching for child tasks..." % (doIndent(indent)) for childTask in task.childTaskIterator(): if childTask.data.input.outputModule == outputModuleName: injectTaskIntoWMBS(specUrl, workflowName, childTask, outputFileset, indent + 4)
def testGetOutputMapDAO(self): """ _testGetOutputMapDAO_ Verify the proper behavior of the GetOutputMapDAO for a variety of different processing chains. """ recoOutputFileset = Fileset(name="RECO") recoOutputFileset.create() mergedRecoOutputFileset = Fileset(name="MergedRECO") mergedRecoOutputFileset.create() alcaOutputFileset = Fileset(name="ALCA") alcaOutputFileset.create() mergedAlcaOutputFileset = Fileset(name="MergedALCA") mergedAlcaOutputFileset.create() dqmOutputFileset = Fileset(name="DQM") dqmOutputFileset.create() mergedDqmOutputFileset = Fileset(name="MergedDQM") mergedDqmOutputFileset.create() cleanupFileset = Fileset(name="Cleanup") cleanupFileset.create() testWorkflow = Workflow(spec="wf001.xml", owner="Steve", name="TestWF", task="None") testWorkflow.create() testWorkflow.addOutput("output", recoOutputFileset, mergedRecoOutputFileset) testWorkflow.addOutput("ALCARECOStreamCombined", alcaOutputFileset, mergedAlcaOutputFileset) testWorkflow.addOutput("DQM", dqmOutputFileset, mergedDqmOutputFileset) testWorkflow.addOutput("output", cleanupFileset) testWorkflow.addOutput("ALCARECOStreamCombined", cleanupFileset) testWorkflow.addOutput("DQM", cleanupFileset) testRecoMergeWorkflow = Workflow(spec="wf002.xml", owner="Steve", name="TestRecoMergeWF", task="None") testRecoMergeWorkflow.create() testRecoMergeWorkflow.addOutput("anything", mergedRecoOutputFileset, mergedRecoOutputFileset) testRecoProcWorkflow = Workflow(spec="wf004.xml", owner="Steve", name="TestRecoProcWF", task="None") testRecoProcWorkflow.create() testAlcaChildWorkflow = Workflow(spec="wf003.xml", owner="Steve", name="TestAlcaChildWF", task="None") testAlcaChildWorkflow.create() inputFile = File(lfn="/path/to/some/lfn", size=600000, events=60000, locations="cmssrm.fnal.gov") inputFile.create() testFileset = Fileset(name="TestFileset") testFileset.create() testFileset.addFile(inputFile) testFileset.commit() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow, split_algo="EventBased", type="Processing") testMergeRecoSubscription = Subscription( fileset=recoOutputFileset, workflow=testRecoMergeWorkflow, split_algo="WMBSMergeBySize", type="Merge") testProcRecoSubscription = Subscription(fileset=recoOutputFileset, workflow=testRecoProcWorkflow, split_algo="FileBased", type="Processing") testChildAlcaSubscription = Subscription( fileset=alcaOutputFileset, workflow=testAlcaChildWorkflow, split_algo="FileBased", type="Processing") testSubscription.create() testMergeRecoSubscription.create() testProcRecoSubscription.create() testChildAlcaSubscription.create() testSubscription.acquireFiles() testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() testJob = Job(name="SplitJobA", files=[inputFile]) testJob.create(group=testJobGroup) testJob["state"] = "complete" testJob.save() outputMapAction = self.daoFactory(classname="Jobs.GetOutputMap") outputMap = outputMapAction.execute(jobID=testJob["id"]) assert len(outputMap.keys()) == 3, \ "Error: Wrong number of outputs for primary workflow." goldenMap = { "output": (recoOutputFileset.id, mergedRecoOutputFileset.id), "ALCARECOStreamCombined": (alcaOutputFileset.id, mergedAlcaOutputFileset.id), "DQM": (dqmOutputFileset.id, mergedDqmOutputFileset.id) } for outputID in outputMap.keys(): for outputFilesets in outputMap[outputID]: if outputFilesets["merged_output_fileset"] == None: self.assertEqual(outputFilesets["output_fileset"], cleanupFileset.id, "Error: Cleanup fileset is wrong.") continue self.assertTrue(outputID in goldenMap.keys(), "Error: Output identifier is missing.") self.assertEqual(outputFilesets["output_fileset"], goldenMap[outputID][0], "Error: Output fileset is wrong.") self.assertEqual(outputFilesets["merged_output_fileset"], goldenMap[outputID][1], "Error: Merged output fileset is wrong.") del goldenMap[outputID] self.assertEqual(len(goldenMap.keys()), 0, "Error: Missing output maps.") return
def _createSubscriptionsInWMBS(self, task, fileset, alternativeFilesetClose = False): """ __createSubscriptionsInWMBS_ Create subscriptions in WMBS for all the tasks in the spec. This includes filesets, workflows and the output map for each task. """ # create runtime sandbox for workflow self.createSandbox() #FIXME: Let workflow put in values if spec is missing them workflow = Workflow(spec = self.wmSpec.specUrl(), owner = self.wmSpec.getOwner()["name"], dn = self.wmSpec.getOwner().get("dn", "unknown"), group = self.wmSpec.getOwner().get("group", "unknown"), owner_vogroup = self.wmSpec.getOwner().get("vogroup", "DEFAULT"), owner_vorole = self.wmSpec.getOwner().get("vorole", "DEFAULT"), name = self.wmSpec.name(), task = task.getPathName(), wfType = self.wmSpec.getDashboardActivity(), alternativeFilesetClose = alternativeFilesetClose, priority = self.wmSpec.priority()) workflow.create() subscription = Subscription(fileset = fileset, workflow = workflow, split_algo = task.jobSplittingAlgorithm(), type = task.getPrimarySubType()) if subscription.exists(): subscription.load() msg = "Subscription %s already exists for %s (you may ignore file insertion messages below, existing files wont be duplicated)" self.logger.info(msg % (subscription['id'], task.getPathName())) else: subscription.create() for site in task.siteWhitelist(): subscription.addWhiteBlackList([{"site_name": site, "valid": True}]) for site in task.siteBlacklist(): subscription.addWhiteBlackList([{"site_name": site, "valid": False}]) if self.topLevelSubscription == None: self.topLevelSubscription = subscription logging.info("Top level subscription created: %s" % subscription["id"]) else: logging.info("Child subscription created: %s" % subscription["id"]) outputModules = task.getOutputModulesForTask() for outputModule in outputModules: for outputModuleName in outputModule.listSections_(): outputFileset = Fileset(self.outputFilesetName(task, outputModuleName)) outputFileset.create() outputFileset.markOpen(True) mergedOutputFileset = None for childTask in task.childTaskIterator(): if childTask.data.input.outputModule == outputModuleName: if childTask.taskType() == "Merge": mergedOutputFileset = Fileset(self.outputFilesetName(childTask, "Merged")) mergedOutputFileset.create() mergedOutputFileset.markOpen(True) primaryDataset = getattr(getattr(outputModule, outputModuleName), "primaryDataset", None) if primaryDataset != None: self.mergeOutputMapping[mergedOutputFileset.id] = primaryDataset self._createSubscriptionsInWMBS(childTask, outputFileset, alternativeFilesetClose) if mergedOutputFileset == None: workflow.addOutput(outputModuleName, outputFileset, outputFileset) else: workflow.addOutput(outputModuleName, outputFileset, mergedOutputFileset) return self.topLevelSubscription
def createTestJobGroup(self, config, name = "TestWorkthrough", filesetName = "TestFileset", specLocation = "spec.xml", error = False, task = "/TestWorkload/ReReco", type = "Processing"): """ Creates a group of several jobs """ myThread = threading.currentThread() testWorkflow = Workflow(spec = specLocation, owner = self.OWNERDN, name = name, task = task, owner_vogroup="", owner_vorole="") testWorkflow.create() self.inject.execute(names = [name], injected = True) testWMBSFileset = Fileset(name = filesetName) testWMBSFileset.create() testFileA = File(lfn = "/this/is/a/lfnA" , size = 1024, events = 10) testFileA.addRun(Run(10, *[12312])) testFileA.setLocation('malpaquet') testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10) testFileB.addRun(Run(10, *[12314])) testFileB.setLocation('malpaquet') testFileA.create() testFileB.create() testWMBSFileset.addFile(testFileA) testWMBSFileset.addFile(testFileB) testWMBSFileset.commit() testWMBSFileset.markOpen(0) outputWMBSFileset = Fileset(name = '%sOutput' % filesetName) outputWMBSFileset.create() testFileC = File(lfn = "/this/is/a/lfnC" , size = 1024, events = 10) testFileC.addRun(Run(10, *[12312])) testFileC.setLocation('malpaquet') testFileC.create() outputWMBSFileset.addFile(testFileC) outputWMBSFileset.commit() outputWMBSFileset.markOpen(0) testWorkflow.addOutput('output', outputWMBSFileset) testSubscription = Subscription(fileset = testWMBSFileset, workflow = testWorkflow, type = type) testSubscription.create() testJobGroup = JobGroup(subscription = testSubscription) testJobGroup.create() for i in range(0,self.nJobs): testJob = Job(name = makeUUID()) testJob.addFile(testFileA) testJob.addFile(testFileB) testJob['retry_count'] = 1 testJob['retry_max'] = 10 testJob['mask'].addRunAndLumis(run = 10, lumis = [12312, 12313]) testJobGroup.add(testJob) testJobGroup.commit() changer = ChangeState(config) report1 = Report() report2 = Report() if error: path1 = os.path.join(WMCore.WMBase.getTestBase(), "WMComponent_t/JobAccountant_t/fwjrs", "badBackfillJobReport.pkl") path2 = os.path.join(WMCore.WMBase.getTestBase(), 'WMComponent_t/TaskArchiver_t/fwjrs', 'logCollectReport2.pkl') else: path1 = os.path.join(WMCore.WMBase.getTestBase(), 'WMComponent_t/TaskArchiver_t/fwjrs', 'mergeReport1.pkl') path2 = os.path.join(WMCore.WMBase.getTestBase(), 'WMComponent_t/TaskArchiver_t/fwjrs', 'logCollectReport2.pkl') report1.load(filename = path1) report2.load(filename = path2) changer.propagate(testJobGroup.jobs, 'created', 'new') changer.propagate(testJobGroup.jobs, 'executing', 'created') changer.propagate(testJobGroup.jobs, 'complete', 'executing') for i in range(self.nJobs): if i < self.nJobs/2: testJobGroup.jobs[i]['fwjr'] = report1 else: testJobGroup.jobs[i]['fwjr'] = report2 changer.propagate(testJobGroup.jobs, 'jobfailed', 'complete') changer.propagate(testJobGroup.jobs, 'jobcooloff', 'jobfailed') changer.propagate(testJobGroup.jobs, 'created', 'jobcooloff') changer.propagate(testJobGroup.jobs, 'executing', 'created') changer.propagate(testJobGroup.jobs, 'complete', 'executing') changer.propagate(testJobGroup.jobs, 'jobfailed', 'complete') changer.propagate(testJobGroup.jobs, 'retrydone', 'jobfailed') changer.propagate(testJobGroup.jobs, 'exhausted', 'retrydone') changer.propagate(testJobGroup.jobs, 'cleanout', 'exhausted') testSubscription.completeFiles([testFileA, testFileB]) return testJobGroup
def setUp(self): """ _setUp_ Setup the database and WMBS for the test. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer", "WMCore.WMBS"], useDefault = False) myThread = threading.currentThread() self.daofactory = DAOFactory(package = "WMCore.WMBS", logger = myThread.logger, dbinterface = myThread.dbi) self.dbsfactory = DAOFactory(package = "WMComponent.DBS3Buffer", logger = myThread.logger, dbinterface = myThread.dbi) locationAction = self.daofactory(classname = "Locations.New") locationAction.execute(siteName = "site1", pnn = "T1_US_FNAL_Disk") inputFile = File(lfn = "/path/to/some/lfn", size = 10, events = 10, locations = "T1_US_FNAL_Disk") inputFile.create() inputFileset = Fileset(name = "InputFileset") inputFileset.create() inputFileset.addFile(inputFile) inputFileset.commit() unmergedFileset = Fileset(name = "UnmergedFileset") unmergedFileset.create() mergedFileset = Fileset(name = "MergedFileset") mergedFileset.create() procWorkflow = Workflow(spec = "wf001.xml", owner = "Steve", name = "TestWF", task = "/TestWF/None") procWorkflow.create() procWorkflow.addOutput("outputRECORECO", unmergedFileset) mergeWorkflow = Workflow(spec = "wf002.xml", owner = "Steve", name = "MergeWF", task = "/MergeWF/None") mergeWorkflow.create() mergeWorkflow.addOutput("Merged", mergedFileset) insertWorkflow = self.dbsfactory(classname = "InsertWorkflow") insertWorkflow.execute("TestWF", "/TestWF/None", 0, 0, 0, 0) insertWorkflow.execute("MergeWF", "/MergeWF/None", 0, 0, 0, 0) self.procSubscription = Subscription(fileset = inputFileset, workflow = procWorkflow, split_algo = "FileBased", type = "Processing") self.procSubscription.create() self.procSubscription.acquireFiles() self.mergeSubscription = Subscription(fileset = unmergedFileset, workflow = mergeWorkflow, split_algo = "WMBSMergeBySize", type = "Merge") self.mergeSubscription.create() self.procJobGroup = JobGroup(subscription = self.procSubscription) self.procJobGroup.create() self.mergeJobGroup = JobGroup(subscription = self.mergeSubscription) self.mergeJobGroup.create() self.testJob = Job(name = "testJob", files = [inputFile]) self.testJob.create(group = self.procJobGroup) self.testJob["state"] = "complete" myThread = threading.currentThread() self.daofactory = DAOFactory(package = "WMCore.WMBS", logger = myThread.logger, dbinterface = myThread.dbi) self.stateChangeAction = self.daofactory(classname = "Jobs.ChangeState") self.setFWJRAction = self.daofactory(classname = "Jobs.SetFWJRPath") self.getJobTypeAction = self.daofactory(classname = "Jobs.GetType") locationAction = self.daofactory(classname = "Locations.New") locationAction.execute(siteName = "cmssrm.fnal.gov") self.stateChangeAction.execute(jobs = [self.testJob]) self.tempDir = tempfile.mkdtemp() return
def createTestJobGroup(self, config, name="TestWorkthrough", filesetName="TestFileset", specLocation="spec.xml", error=False, task="/TestWorkload/ReReco", multicore=False): """ Creates a group of several jobs """ myThread = threading.currentThread() testWorkflow = Workflow(spec=specLocation, owner=self.OWNERDN, name=name, task=task, owner_vogroup="", owner_vorole="") testWorkflow.create() self.inject.execute(names=[name], injected=True) testWMBSFileset = Fileset(name=filesetName) testWMBSFileset.create() testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10) testFileA.addRun(Run(10, *[12312])) testFileA.setLocation('malpaquet') testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10) testFileB.addRun(Run(10, *[12314])) testFileB.setLocation('malpaquet') testFileA.create() testFileB.create() testWMBSFileset.addFile(testFileA) testWMBSFileset.addFile(testFileB) testWMBSFileset.commit() testWMBSFileset.markOpen(0) outputWMBSFileset = Fileset(name='%sOutput' % filesetName) outputWMBSFileset.create() testFileC = File(lfn="/this/is/a/lfnC", size=1024, events=10) testFileC.addRun(Run(10, *[12312])) testFileC.setLocation('malpaquet') testFileC.create() outputWMBSFileset.addFile(testFileC) outputWMBSFileset.commit() outputWMBSFileset.markOpen(0) testWorkflow.addOutput('output', outputWMBSFileset) testSubscription = Subscription(fileset=testWMBSFileset, workflow=testWorkflow) testSubscription.create() testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() for i in range(0, self.nJobs): testJob = Job(name=makeUUID()) testJob.addFile(testFileA) testJob.addFile(testFileB) testJob['retry_count'] = 1 testJob['retry_max'] = 10 testJob['mask'].addRunAndLumis(run=10, lumis=[12312, 12313]) testJobGroup.add(testJob) testJobGroup.commit() changer = ChangeState(config) report1 = Report() report2 = Report() if error: path1 = os.path.join(WMCore.WMBase.getTestBase(), "WMComponent_t/JobAccountant_t/fwjrs", "badBackfillJobReport.pkl") path2 = os.path.join(WMCore.WMBase.getTestBase(), 'WMComponent_t/TaskArchiver_t/fwjrs', 'logCollectReport2.pkl') elif multicore: path1 = os.path.join( WMCore.WMBase.getTestBase(), "WMCore_t/FwkJobReport_t/MulticoreReport.pkl") path2 = path1 else: path1 = os.path.join(WMCore.WMBase.getTestBase(), 'WMComponent_t/TaskArchiver_t/fwjrs', 'mergeReport1.pkl') path2 = os.path.join(WMCore.WMBase.getTestBase(), 'WMComponent_t/TaskArchiver_t/fwjrs', 'logCollectReport2.pkl') report1.load(filename=path1) report2.load(filename=path2) changer.propagate(testJobGroup.jobs, 'created', 'new') changer.propagate(testJobGroup.jobs, 'executing', 'created') changer.propagate(testJobGroup.jobs, 'complete', 'executing') for i in range(self.nJobs): if i < self.nJobs / 2: testJobGroup.jobs[i]['fwjr'] = report1 else: testJobGroup.jobs[i]['fwjr'] = report2 changer.propagate(testJobGroup.jobs, 'jobfailed', 'complete') changer.propagate(testJobGroup.jobs, 'jobcooloff', 'jobfailed') changer.propagate(testJobGroup.jobs, 'created', 'jobcooloff') changer.propagate(testJobGroup.jobs, 'executing', 'created') changer.propagate(testJobGroup.jobs, 'complete', 'executing') changer.propagate(testJobGroup.jobs, 'jobfailed', 'complete') changer.propagate(testJobGroup.jobs, 'exhausted', 'jobfailed') changer.propagate(testJobGroup.jobs, 'cleanout', 'exhausted') testSubscription.completeFiles([testFileA, testFileB]) return testJobGroup
def _createSubscriptionsInWMBS(self, task, fileset, alternativeFilesetClose=False): """ __createSubscriptionsInWMBS_ Create subscriptions in WMBS for all the tasks in the spec. This includes filesets, workflows and the output map for each task. """ # create runtime sandbox for workflow self.createSandbox() # FIXME: Let workflow put in values if spec is missing them workflow = Workflow( spec=self.wmSpec.specUrl(), owner=self.wmSpec.getOwner()["name"], dn=self.wmSpec.getOwner().get("dn", "unknown"), group=self.wmSpec.getOwner().get("group", "unknown"), owner_vogroup=self.wmSpec.getOwner().get("vogroup", "DEFAULT"), owner_vorole=self.wmSpec.getOwner().get("vorole", "DEFAULT"), name=self.wmSpec.name(), task=task.getPathName(), wfType=self.wmSpec.getDashboardActivity(), alternativeFilesetClose=alternativeFilesetClose, priority=self.wmSpec.priority()) workflow.create() subscription = Subscription(fileset=fileset, workflow=workflow, split_algo=task.jobSplittingAlgorithm(), type=task.getPrimarySubType()) if subscription.exists(): subscription.load() msg = "Subscription %s already exists for %s (you may ignore file insertion messages below, existing files wont be duplicated)" self.logger.info(msg % (subscription['id'], task.getPathName())) else: subscription.create() for site in task.siteWhitelist(): subscription.addWhiteBlackList([{ "site_name": site, "valid": True }]) for site in task.siteBlacklist(): subscription.addWhiteBlackList([{ "site_name": site, "valid": False }]) if self.topLevelSubscription is None: self.topLevelSubscription = subscription logging.info("Top level subscription created: %s", subscription["id"]) else: logging.info("Child subscription created: %s", subscription["id"]) outputModules = task.getOutputModulesForTask() ignoredOutputModules = task.getIgnoredOutputModulesForTask() for outputModule in outputModules: for outputModuleName in outputModule.listSections_(): if outputModuleName in ignoredOutputModules: logging.info( "IgnoredOutputModule set for %s, skipping fileset creation.", outputModuleName) continue outputFileset = Fileset( self.outputFilesetName(task, outputModuleName)) outputFileset.create() outputFileset.markOpen(True) mergedOutputFileset = None for childTask in task.childTaskIterator(): if childTask.data.input.outputModule == outputModuleName: if childTask.taskType() == "Merge": mergedOutputFileset = Fileset( self.outputFilesetName(childTask, "Merged")) mergedOutputFileset.create() mergedOutputFileset.markOpen(True) primaryDataset = getattr( getattr(outputModule, outputModuleName), "primaryDataset", None) if primaryDataset != None: self.mergeOutputMapping[ mergedOutputFileset.id] = primaryDataset self._createSubscriptionsInWMBS( childTask, outputFileset, alternativeFilesetClose) if mergedOutputFileset is None: workflow.addOutput(outputModuleName, outputFileset, outputFileset) else: workflow.addOutput(outputModuleName, outputFileset, mergedOutputFileset) return self.topLevelSubscription
def _createSubscriptionsInWMBS(self, task, fileset, alternativeFilesetClose=False): """ __createSubscriptionsInWMBS_ Create subscriptions in WMBS for all the tasks in the spec. This includes filesets, workflows and the output map for each task. """ # create runtime sandbox for workflow self.createSandbox() # FIXME: Let workflow put in values if spec is missing them workflow = Workflow(spec=self.wmSpec.specUrl(), owner=self.wmSpec.getOwner()["name"], dn=self.wmSpec.getOwner().get("dn", "unknown"), group=self.wmSpec.getOwner().get("group", "unknown"), owner_vogroup=self.wmSpec.getOwner().get("vogroup", "DEFAULT"), owner_vorole=self.wmSpec.getOwner().get("vorole", "DEFAULT"), name=self.wmSpec.name(), task=task.getPathName(), wfType=self.wmSpec.getDashboardActivity(), alternativeFilesetClose=alternativeFilesetClose, priority=self.wmSpec.priority()) workflow.create() subscription = Subscription(fileset=fileset, workflow=workflow, split_algo=task.jobSplittingAlgorithm(), type=task.getPrimarySubType()) subscription.create() ### FIXME: I'm pretty sure we can improve how we handle this site white/black list for site in task.siteWhitelist(): subscription.addWhiteBlackList([{"site_name": site, "valid": True}]) for site in task.siteBlacklist(): subscription.addWhiteBlackList([{"site_name": site, "valid": False}]) if self.topLevelSubscription is None: self.topLevelSubscription = subscription logging.info("Top level subscription %s created for %s", subscription["id"], self.wmSpec.name()) else: logging.info("Child subscription %s created for %s", subscription["id"], self.wmSpec.name()) outputModules = task.getOutputModulesForTask() ignoredOutputModules = task.getIgnoredOutputModulesForTask() for outputModule in outputModules: for outputModuleName in outputModule.listSections_(): if outputModuleName in ignoredOutputModules: msg = "%s has %s as IgnoredOutputModule, skipping fileset creation." logging.info(msg, task.getPathName(), outputModuleName) continue dataTier = getattr(getattr(outputModule, outputModuleName), "dataTier", '') filesetName = self.outputFilesetName(task, outputModuleName, dataTier) outputFileset = Fileset(filesetName) outputFileset.create() outputFileset.markOpen(True) mergedOutputFileset = None for childTask in task.childTaskIterator(): if childTask.data.input.outputModule == outputModuleName: childDatatier = getattr(childTask.data.input, 'dataTier', '') if childTask.taskType() in ["Cleanup", "Merge"] and childDatatier != dataTier: continue elif childTask.taskType() == "Merge" and childDatatier == dataTier: filesetName = self.outputFilesetName(childTask, "Merged", dataTier) mergedOutputFileset = Fileset(filesetName) mergedOutputFileset.create() mergedOutputFileset.markOpen(True) primaryDataset = getattr(getattr(outputModule, outputModuleName), "primaryDataset", None) if primaryDataset is not None: self.mergeOutputMapping[mergedOutputFileset.id] = primaryDataset self._createSubscriptionsInWMBS(childTask, outputFileset, alternativeFilesetClose) if mergedOutputFileset is None: workflow.addOutput(outputModuleName + dataTier, outputFileset, outputFileset) else: workflow.addOutput(outputModuleName + dataTier, outputFileset, mergedOutputFileset) return
def _createSubscriptionsInWMBS(self, task, fileset, alternativeFilesetClose=False): """ __createSubscriptionsInWMBS_ Create subscriptions in WMBS for all the tasks in the spec. This includes filesets, workflows and the output map for each task. """ # create runtime sandbox for workflow self.createSandbox() # FIXME: Let workflow put in values if spec is missing them workflow = Workflow( spec=self.wmSpec.specUrl(), owner=self.wmSpec.getOwner()["name"], dn=self.wmSpec.getOwner().get("dn", "unknown"), group=self.wmSpec.getOwner().get("group", "unknown"), owner_vogroup=self.wmSpec.getOwner().get("vogroup", "DEFAULT"), owner_vorole=self.wmSpec.getOwner().get("vorole", "DEFAULT"), name=self.wmSpec.name(), task=task.getPathName(), wfType=self.wmSpec.getDashboardActivity(), alternativeFilesetClose=alternativeFilesetClose, priority=self.wmSpec.priority()) workflow.create() subscription = Subscription(fileset=fileset, workflow=workflow, split_algo=task.jobSplittingAlgorithm(), type=task.getPrimarySubType()) subscription.create() ### FIXME: I'm pretty sure we can improve how we handle this site white/black list for site in task.siteWhitelist(): subscription.addWhiteBlackList([{ "site_name": site, "valid": True }]) for site in task.siteBlacklist(): subscription.addWhiteBlackList([{ "site_name": site, "valid": False }]) if self.topLevelSubscription is None: self.topLevelSubscription = subscription logging.info("Top level subscription %s created for %s", subscription["id"], self.wmSpec.name()) else: logging.info("Child subscription %s created for %s", subscription["id"], self.wmSpec.name()) outputModules = task.getOutputModulesForTask() ignoredOutputModules = task.getIgnoredOutputModulesForTask() for outputModule in outputModules: for outputModuleName in outputModule.listSections_(): if outputModuleName in ignoredOutputModules: msg = "%s has %s as IgnoredOutputModule, skipping fileset creation." logging.info(msg, task.getPathName(), outputModuleName) continue dataTier = getattr(getattr(outputModule, outputModuleName), "dataTier", '') filesetName = self.outputFilesetName(task, outputModuleName, dataTier) outputFileset = Fileset(filesetName) outputFileset.create() outputFileset.markOpen(True) mergedOutputFileset = None for childTask in task.childTaskIterator(): if childTask.data.input.outputModule == outputModuleName: childDatatier = getattr(childTask.data.input, 'dataTier', '') if childTask.taskType() in [ "Cleanup", "Merge" ] and childDatatier != dataTier: continue elif childTask.taskType( ) == "Merge" and childDatatier == dataTier: filesetName = self.outputFilesetName( childTask, "Merged", dataTier) mergedOutputFileset = Fileset(filesetName) mergedOutputFileset.create() mergedOutputFileset.markOpen(True) primaryDataset = getattr( getattr(outputModule, outputModuleName), "primaryDataset", None) if primaryDataset is not None: self.mergeOutputMapping[ mergedOutputFileset.id] = primaryDataset self._createSubscriptionsInWMBS( childTask, outputFileset, alternativeFilesetClose) if mergedOutputFileset is None: workflow.addOutput(outputModuleName + dataTier, outputFileset, outputFileset) else: workflow.addOutput(outputModuleName + dataTier, outputFileset, mergedOutputFileset) return
def testOutput(self): """ _testOutput_ Creat a workflow and add some outputs to it. Verify that these are stored to and loaded from the database correctly. """ testFilesetA = Fileset(name="testFilesetA") testMergedFilesetA = Fileset(name="testMergedFilesetA") testFilesetB = Fileset(name="testFilesetB") testMergedFilesetB = Fileset(name="testMergedFilesetB") testFilesetC = Fileset(name="testFilesetC") testMergedFilesetC = Fileset(name="testMergedFilesetC") testFilesetA.create() testFilesetB.create() testFilesetC.create() testMergedFilesetA.create() testMergedFilesetB.create() testMergedFilesetC.create() testWorkflowA = Workflow(spec="spec.xml", owner="Simon", name="wf001", task='Test') testWorkflowA.create() testWorkflowB = Workflow(name="wf001", task='Test') testWorkflowB.load() self.assertEqual(len(testWorkflowB.outputMap.keys()), 0, "ERROR: Output map exists before output is assigned") testWorkflowA.addOutput("outModOne", testFilesetA, testMergedFilesetA) testWorkflowA.addOutput("outModOne", testFilesetC, testMergedFilesetC) testWorkflowA.addOutput("outModTwo", testFilesetB, testMergedFilesetB) testWorkflowC = Workflow(name="wf001", task='Test') testWorkflowC.load() self.assertEqual(len(testWorkflowC.outputMap.keys()), 2, "ERROR: Incorrect number of outputs in output map") self.assertTrue("outModOne" in testWorkflowC.outputMap.keys(), "ERROR: Output modules missing from workflow output map") self.assertTrue("outModTwo" in testWorkflowC.outputMap.keys(), "ERROR: Output modules missing from workflow output map") for outputMap in testWorkflowC.outputMap["outModOne"]: if outputMap["output_fileset"].id == testFilesetA.id: self.assertEqual(outputMap["merged_output_fileset"].id, testMergedFilesetA.id, "Error: Output map incorrectly maps filesets.") else: self.assertEqual(outputMap["merged_output_fileset"].id, testMergedFilesetC.id, "Error: Output map incorrectly maps filesets.") self.assertEqual(outputMap["output_fileset"].id, testFilesetC.id, "Error: Output map incorrectly maps filesets.") self.assertEqual(testWorkflowC.outputMap["outModTwo"][0]["merged_output_fileset"].id, testMergedFilesetB.id, "Error: Output map incorrectly maps filesets.") self.assertEqual(testWorkflowC.outputMap["outModTwo"][0]["output_fileset"].id, testFilesetB.id, "Error: Output map incorrectly maps filesets.") return
def setUp(self): """ _setUp_ Setup the database and WMBS for the test. """ self.testInit = TestInit(__file__) self.testInit.setLogging() self.testInit.setDatabaseConnection() self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer", "WMCore.WMBS"], useDefault = False) myThread = threading.currentThread() self.daofactory = DAOFactory(package = "WMCore.WMBS", logger = myThread.logger, dbinterface = myThread.dbi) self.dbsfactory = DAOFactory(package = "WMComponent.DBS3Buffer", logger = myThread.logger, dbinterface = myThread.dbi) locationAction = self.daofactory(classname = "Locations.New") locationAction.execute(siteName = "site1", seName = "cmssrm.fnal.gov") inputFile = File(lfn = "/path/to/some/lfn", size = 10, events = 10, locations = "cmssrm.fnal.gov") inputFile.create() inputFileset = Fileset(name = "InputFileset") inputFileset.create() inputFileset.addFile(inputFile) inputFileset.commit() unmergedFileset = Fileset(name = "UnmergedFileset") unmergedFileset.create() mergedFileset = Fileset(name = "MergedFileset") mergedFileset.create() procWorkflow = Workflow(spec = "wf001.xml", owner = "Steve", name = "TestWF", task = "/TestWF/None") procWorkflow.create() procWorkflow.addOutput("outputRECORECO", unmergedFileset) mergeWorkflow = Workflow(spec = "wf002.xml", owner = "Steve", name = "MergeWF", task = "/MergeWF/None") mergeWorkflow.create() mergeWorkflow.addOutput("Merged", mergedFileset) insertWorkflow = self.dbsfactory(classname = "InsertWorkflow") insertWorkflow.execute("TestWF", "/TestWF/None", 0, 0, 0, 0) insertWorkflow.execute("MergeWF", "/MergeWF/None", 0, 0, 0, 0) self.procSubscription = Subscription(fileset = inputFileset, workflow = procWorkflow, split_algo = "FileBased", type = "Processing") self.procSubscription.create() self.procSubscription.acquireFiles() self.mergeSubscription = Subscription(fileset = unmergedFileset, workflow = mergeWorkflow, split_algo = "WMBSMergeBySize", type = "Merge") self.mergeSubscription.create() self.procJobGroup = JobGroup(subscription = self.procSubscription) self.procJobGroup.create() self.mergeJobGroup = JobGroup(subscription = self.mergeSubscription) self.mergeJobGroup.create() self.testJob = Job(name = "testJob", files = [inputFile]) self.testJob.create(group = self.procJobGroup) self.testJob["state"] = "complete" myThread = threading.currentThread() self.daofactory = DAOFactory(package = "WMCore.WMBS", logger = myThread.logger, dbinterface = myThread.dbi) self.stateChangeAction = self.daofactory(classname = "Jobs.ChangeState") self.setFWJRAction = self.daofactory(classname = "Jobs.SetFWJRPath") self.getJobTypeAction = self.daofactory(classname = "Jobs.GetType") locationAction = self.daofactory(classname = "Locations.New") locationAction.execute(siteName = "cmssrm.fnal.gov") self.stateChangeAction.execute(jobs = [self.testJob]) self.tempDir = tempfile.mkdtemp() return
def testParallelProcessing(self): """ _testParallelProcessing_ Verify that merging works correctly when multiple processing subscriptions are run over the same input files. The merging algorithm should ignore processing jobs that feed into different merge subscriptions. """ locationAction = self.daoFactory(classname="Locations.New") locationAction.execute(siteName="T2_CH_CERN", pnn="T2_CH_CERN") locationAction.execute(siteName="T1_US_FNAL", pnn="T2_CH_CERN") mergeFilesetA = Fileset(name="mergeFilesetA") mergeFilesetB = Fileset(name="mergeFilesetB") mergeFilesetA.create() mergeFilesetB.create() mergeMergedFilesetA = Fileset(name="mergeMergedFilesetA") mergeMergedFilesetB = Fileset(name="mergeMergedFilesetB") mergeMergedFilesetA.create() mergeMergedFilesetB.create() mergeWorkflow = Workflow(name="mergeWorkflow", spec="bogus", owner="Steve", task="Test") mergeWorkflow.create() mergeSubscriptionA = Subscription(fileset=mergeFilesetA, workflow=mergeWorkflow, split_algo="WMBSMergeBySize") mergeSubscriptionB = Subscription(fileset=mergeFilesetB, workflow=mergeWorkflow, split_algo="WMBSMergeBySize") mergeSubscriptionA.create() mergeSubscriptionB.create() inputFileset = Fileset(name="inputFileset") inputFileset.create() inputFileA = File(lfn="inputLFNA") inputFileB = File(lfn="inputLFNB") inputFileA.create() inputFileB.create() procWorkflowA = Workflow(name="procWorkflowA", spec="bunk2", owner="Steve", task="Test") procWorkflowA.create() procWorkflowA.addOutput("output", mergeFilesetA, mergeMergedFilesetA) procWorkflowB = Workflow(name="procWorkflowB", spec="bunk3", owner="Steve", task="Test2") procWorkflowB.create() procWorkflowB.addOutput("output", mergeFilesetB, mergeMergedFilesetB) procSubscriptionA = Subscription(fileset=inputFileset, workflow=procWorkflowA, split_algo="EventBased") procSubscriptionA.create() procSubscriptionB = Subscription(fileset=inputFileset, workflow=procWorkflowB, split_algo="EventBased") procSubscriptionB.create() jobGroupA = JobGroup(subscription=procSubscriptionA) jobGroupA.create() jobGroupB = JobGroup(subscription=procSubscriptionB) jobGroupB.create() changeStateDAO = self.daoFactory(classname="Jobs.ChangeState") testJobA = Job() testJobA.addFile(inputFileA) testJobA.create(jobGroupA) testJobA["state"] = "cleanout" testJobA["oldstate"] = "new" testJobA["couch_record"] = "somejive" testJobA["retry_count"] = 0 testJobA["outcome"] = "success" testJobA.save() testJobB = Job() testJobB.addFile(inputFileB) testJobB.create(jobGroupA) testJobB["state"] = "cleanout" testJobB["oldstate"] = "new" testJobB["couch_record"] = "somejive" testJobB["retry_count"] = 0 testJobB["outcome"] = "success" testJobB.save() testJobC = Job() testJobC.addFile(inputFileA) testJobC.create(jobGroupB) testJobC["state"] = "cleanout" testJobC["oldstate"] = "new" testJobC["couch_record"] = "somejive" testJobC["retry_count"] = 0 testJobC["outcome"] = "success" testJobC.save() testJobD = Job() testJobD.addFile(inputFileA) testJobD.create(jobGroupB) testJobD["state"] = "cleanout" testJobD["oldstate"] = "new" testJobD["couch_record"] = "somejive" testJobD["retry_count"] = 0 testJobD["outcome"] = "failure" testJobD.save() testJobE = Job() testJobE.addFile(inputFileB) testJobE.create(jobGroupB) testJobE["state"] = "cleanout" testJobE["oldstate"] = "new" testJobE["couch_record"] = "somejive" testJobE["retry_count"] = 0 testJobE["outcome"] = "success" testJobE.save() testJobF = Job() testJobF.addFile(inputFileB) testJobF.create(jobGroupB) testJobF["state"] = "cleanout" testJobF["oldstate"] = "new" testJobF["couch_record"] = "somejive" testJobF["retry_count"] = 0 testJobF["outcome"] = "failure" testJobF.save() changeStateDAO.execute([testJobA, testJobB, testJobC, testJobD, testJobE, testJobF]) fileA = File(lfn="fileA", size=1024, events=1024, first_event=0, locations={"T2_CH_CERN"}) fileA.addRun(Run(1, *[45])) fileA.create() fileA.addParent(inputFileA["lfn"]) fileB = File(lfn="fileB", size=1024, events=1024, first_event=0, locations={"T2_CH_CERN"}) fileB.addRun(Run(1, *[45])) fileB.create() fileB.addParent(inputFileB["lfn"]) jobGroupA.output.addFile(fileA) jobGroupA.output.addFile(fileB) jobGroupA.output.commit() mergeFilesetA.addFile(fileA) mergeFilesetA.addFile(fileB) mergeFilesetA.commit() fileC = File(lfn="fileC", size=1024, events=1024, first_event=0, locations={"T2_CH_CERN"}) fileC.addRun(Run(1, *[45])) fileC.create() fileC.addParent(inputFileA["lfn"]) fileD = File(lfn="fileD", size=1024, events=1024, first_event=0, locations={"T2_CH_CERN"}) fileD.addRun(Run(1, *[45])) fileD.create() fileD.addParent(inputFileB["lfn"]) jobGroupB.output.addFile(fileC) jobGroupB.output.addFile(fileD) mergeFilesetB.addFile(fileC) mergeFilesetB.addFile(fileD) mergeFilesetB.commit() splitter = SplitterFactory() jobFactory = splitter(package="WMCore.WMBS", subscription=mergeSubscriptionB) result = jobFactory(min_merge_size=1, max_merge_size=20000, max_merge_events=7169) assert len(result) == 0, \ "Error: No merge jobs should have been created." fileE = File(lfn="fileE", size=1024, events=1024, first_event=0, locations={"T2_CH_CERN"}) fileE.addRun(Run(1, *[45])) fileE.create() fileE.addParent(inputFileA["lfn"]) fileF = File(lfn="fileF", size=1024, events=1024, first_event=0, locations={"T2_CH_CERN"}) fileF.addRun(Run(1, *[45])) fileF.create() fileF.addParent(inputFileB["lfn"]) jobGroupB.output.addFile(fileE) jobGroupB.output.addFile(fileF) mergeFilesetB.addFile(fileE) mergeFilesetB.addFile(fileF) mergeFilesetB.commit() testJobD["outcome"] = "success" testJobD.save() testJobF["outcome"] = "success" testJobF.save() changeStateDAO.execute([testJobD, testJobF]) result = jobFactory(min_merge_size=1, max_merge_size=20000, max_merge_events=7169) assert len(result) == 1, \ "Error: One merge job should have been created: %s" % len(result) return
def stuffWMBS(self, injected=True): """ _stuffWMBS_ Insert some dummy jobs, jobgroups, filesets, files and subscriptions into WMBS to test job creation. Three completed job groups each containing several files are injected. Another incomplete job group is also injected. Also files are added to the "Mergeable" subscription as well as to the output fileset for their jobgroups. """ locationAction = self.daoFactory(classname="Locations.New") locationAction.execute(siteName="T2_CH_CERN", pnn="T2_CH_CERN") locationAction.execute(siteName="T1_US_FNAL", pnn="T2_CH_CERN") changeStateDAO = self.daoFactory(classname="Jobs.ChangeState") self.mergeFileset = Fileset(name="mergeFileset") self.mergeFileset.create() self.bogusFileset = Fileset(name="bogusFileset") self.bogusFileset.create() self.mergeMergedFileset = Fileset(name="mergeMergedFileset") self.mergeMergedFileset.create() self.bogusMergedFileset = Fileset(name="bogusMergedFileset") self.bogusMergedFileset.create() mergeWorkflow = Workflow(name="mergeWorkflow", spec="bunk2", owner="Steve", task="Test") mergeWorkflow.create() markWorkflow = self.daoFactory(classname="Workflow.MarkInjectedWorkflows") markWorkflow.execute(names=[mergeWorkflow.name], injected=injected) self.mergeSubscription = Subscription(fileset=self.mergeFileset, workflow=mergeWorkflow, split_algo="WMBSMergeBySize") self.mergeSubscription.create() self.bogusSubscription = Subscription(fileset=self.bogusFileset, workflow=mergeWorkflow, split_algo="WMBSMergeBySize") inputFileset = Fileset(name="inputFileset") inputFileset.create() inputWorkflow = Workflow(name="inputWorkflow", spec="input", owner="Steve", task="Test") inputWorkflow.create() inputWorkflow.addOutput("output", self.mergeFileset, self.mergeMergedFileset) inputWorkflow.addOutput("output2", self.bogusFileset, self.bogusMergedFileset) bogusInputWorkflow = Workflow(name="bogusInputWorkflow", spec="input", owner="Steve", task="Test") bogusInputWorkflow.create() inputSubscription = Subscription(fileset=inputFileset, workflow=inputWorkflow) inputSubscription.create() bogusInputSubscription = Subscription(fileset=inputFileset, workflow=bogusInputWorkflow) bogusInputSubscription.create() parentFile1 = File(lfn="parentFile1") parentFile1.create() parentFile2 = File(lfn="parentFile2") parentFile2.create() parentFile3 = File(lfn="parentFile3") parentFile3.create() parentFile4 = File(lfn="parentFile4") parentFile4.create() self.parentFileSite2 = File(lfn="parentFileSite2") self.parentFileSite2.create() jobGroup1 = JobGroup(subscription=inputSubscription) jobGroup1.create() jobGroup2 = JobGroup(subscription=inputSubscription) jobGroup2.create() jobGroup3 = JobGroup(subscription=bogusInputSubscription) jobGroup3.create() testJob1 = Job() testJob1.addFile(parentFile1) testJob1.create(jobGroup1) testJob1["state"] = "cleanout" testJob1["oldstate"] = "new" testJob1["couch_record"] = "somejive" testJob1["retry_count"] = 0 testJob1["outcome"] = "success" testJob1.save() changeStateDAO.execute([testJob1]) testJob1A = Job() testJob1A.addFile(parentFile1) testJob1A.create(jobGroup3) testJob1A["state"] = "cleanout" testJob1A["oldstate"] = "new" testJob1A["couch_record"] = "somejive" testJob1A["retry_count"] = 0 testJob1A["outcome"] = "failure" testJob1A.save() changeStateDAO.execute([testJob1A]) testJob2 = Job() testJob2.addFile(parentFile2) testJob2.create(jobGroup1) testJob2["state"] = "cleanout" testJob2["oldstate"] = "new" testJob2["couch_record"] = "somejive" testJob2["retry_count"] = 0 testJob2["outcome"] = "success" testJob2.save() changeStateDAO.execute([testJob2]) testJob3 = Job() testJob3.addFile(parentFile3) testJob3.create(jobGroup2) testJob3["state"] = "cleanout" testJob3["oldstate"] = "new" testJob3["couch_record"] = "somejive" testJob3["retry_count"] = 0 testJob3["outcome"] = "success" testJob3.save() changeStateDAO.execute([testJob3]) testJob4 = Job() testJob4.addFile(parentFile4) testJob4.create(jobGroup2) testJob4["state"] = "cleanout" testJob4["oldstate"] = "new" testJob4["couch_record"] = "somejive" testJob4["retry_count"] = 0 testJob4["outcome"] = "failure" testJob4.save() changeStateDAO.execute([testJob4]) # We'll simulate a failed split by event job that the merger should # ignore. parentFile5 = File(lfn="parentFile5") parentFile5.create() testJob5 = Job() testJob5.addFile(parentFile5) testJob5.create(jobGroup2) testJob5["state"] = "cleanout" testJob5["oldstate"] = "new" testJob5["couch_record"] = "somejive" testJob5["retry_count"] = 0 testJob5["outcome"] = "success" testJob5.save() changeStateDAO.execute([testJob5]) testJob6 = Job() testJob6.addFile(parentFile5) testJob6.create(jobGroup2) testJob6["state"] = "cleanout" testJob6["oldstate"] = "new" testJob6["couch_record"] = "somejive" testJob6["retry_count"] = 0 testJob6["outcome"] = "failure" testJob6.save() changeStateDAO.execute([testJob6]) testJob7 = Job() testJob7.addFile(self.parentFileSite2) testJob7.create(jobGroup2) testJob7["state"] = "cleanout" testJob7["oldstate"] = "new" testJob7["couch_record"] = "somejive" testJob7["retry_count"] = 0 testJob7["outcome"] = "success" testJob7.save() changeStateDAO.execute([testJob7]) badFile1 = File(lfn="badFile1", size=10241024, events=10241024, first_event=0, locations={"T2_CH_CERN"}) badFile1.addRun(Run(1, *[45])) badFile1.create() badFile1.addParent(parentFile5["lfn"]) file1 = File(lfn="file1", size=1024, events=1024, first_event=0, locations={"T2_CH_CERN"}) file1.addRun(Run(1, *[45])) file1.create() file1.addParent(parentFile1["lfn"]) file2 = File(lfn="file2", size=1024, events=1024, first_event=1024, locations={"T2_CH_CERN"}) file2.addRun(Run(1, *[45])) file2.create() file2.addParent(parentFile1["lfn"]) file3 = File(lfn="file3", size=1024, events=1024, first_event=2048, locations={"T2_CH_CERN"}) file3.addRun(Run(1, *[45])) file3.create() file3.addParent(parentFile1["lfn"]) file4 = File(lfn="file4", size=1024, events=1024, first_event=3072, locations={"T2_CH_CERN"}) file4.addRun(Run(1, *[45])) file4.create() file4.addParent(parentFile1["lfn"]) fileA = File(lfn="fileA", size=1024, events=1024, first_event=0, locations={"T2_CH_CERN"}) fileA.addRun(Run(1, *[46])) fileA.create() fileA.addParent(parentFile2["lfn"]) fileB = File(lfn="fileB", size=1024, events=1024, first_event=1024, locations={"T2_CH_CERN"}) fileB.addRun(Run(1, *[46])) fileB.create() fileB.addParent(parentFile2["lfn"]) fileC = File(lfn="fileC", size=1024, events=1024, first_event=2048, locations={"T2_CH_CERN"}) fileC.addRun(Run(1, *[46])) fileC.create() fileC.addParent(parentFile2["lfn"]) fileI = File(lfn="fileI", size=1024, events=1024, first_event=0, locations={"T2_CH_CERN"}) fileI.addRun(Run(2, *[46])) fileI.create() fileI.addParent(parentFile3["lfn"]) fileII = File(lfn="fileII", size=1024, events=1024, first_event=1024, locations={"T2_CH_CERN"}) fileII.addRun(Run(2, *[46])) fileII.create() fileII.addParent(parentFile3["lfn"]) fileIII = File(lfn="fileIII", size=1024, events=1024, first_event=2048, locations={"T2_CH_CERN"}) fileIII.addRun(Run(2, *[46])) fileIII.create() fileIII.addParent(parentFile3["lfn"]) fileIV = File(lfn="fileIV", size=1024, events=1024, first_event=3072, locations={"T2_CH_CERN"}) fileIV.addRun(Run(2, *[46])) fileIV.create() fileIV.addParent(parentFile3["lfn"]) fileX = File(lfn="badFileA", size=1024, events=1024, first_event=0, locations={"T2_CH_CERN"}) fileX.addRun(Run(1, *[47])) fileX.create() fileX.addParent(parentFile4["lfn"]) fileY = File(lfn="badFileB", size=1024, events=1024, first_event=1024, locations={"T2_CH_CERN"}) fileY.addRun(Run(1, *[47])) fileY.create() fileY.addParent(parentFile4["lfn"]) fileZ = File(lfn="badFileC", size=1024, events=1024, first_event=2048, locations={"T2_CH_CERN"}) fileZ.addRun(Run(1, *[47])) fileZ.create() fileZ.addParent(parentFile4["lfn"]) jobGroup1.output.addFile(file1) jobGroup1.output.addFile(file2) jobGroup1.output.addFile(file3) jobGroup1.output.addFile(file4) jobGroup1.output.addFile(fileA) jobGroup1.output.addFile(fileB) jobGroup1.output.addFile(fileC) jobGroup1.output.commit() jobGroup2.output.addFile(fileI) jobGroup2.output.addFile(fileII) jobGroup2.output.addFile(fileIII) jobGroup2.output.addFile(fileIV) jobGroup2.output.addFile(fileX) jobGroup2.output.addFile(fileY) jobGroup2.output.addFile(fileZ) jobGroup2.output.addFile(badFile1) jobGroup2.output.commit() for fileObj in [file1, file2, file3, file4, fileA, fileB, fileC, fileI, fileII, fileIII, fileIV, fileX, fileY, fileZ, badFile1]: self.mergeFileset.addFile(fileObj) self.bogusFileset.addFile(fileObj) self.mergeFileset.commit() self.bogusFileset.commit() return
def testOutput(self): """ _testOutput_ Creat a workflow and add some outputs to it. Verify that these are stored to and loaded from the database correctly. """ testFilesetA = Fileset(name="testFilesetA") testMergedFilesetA = Fileset(name="testMergedFilesetA") testFilesetB = Fileset(name="testFilesetB") testMergedFilesetB = Fileset(name="testMergedFilesetB") testFilesetC = Fileset(name="testFilesetC") testMergedFilesetC = Fileset(name="testMergedFilesetC") testFilesetA.create() testFilesetB.create() testFilesetC.create() testMergedFilesetA.create() testMergedFilesetB.create() testMergedFilesetC.create() testWorkflowA = Workflow(spec="spec.xml", owner="Simon", name="wf001", task='Test') testWorkflowA.create() testWorkflowB = Workflow(name="wf001", task='Test') testWorkflowB.load() self.assertEqual(len(testWorkflowB.outputMap.keys()), 0, "ERROR: Output map exists before output is assigned") testWorkflowA.addOutput("outModOne", testFilesetA, testMergedFilesetA) testWorkflowA.addOutput("outModOne", testFilesetC, testMergedFilesetC) testWorkflowA.addOutput("outModTwo", testFilesetB, testMergedFilesetB) testWorkflowC = Workflow(name="wf001", task='Test') testWorkflowC.load() self.assertEqual(len(testWorkflowC.outputMap.keys()), 2, "ERROR: Incorrect number of outputs in output map") self.assertTrue( "outModOne" in testWorkflowC.outputMap.keys(), "ERROR: Output modules missing from workflow output map") self.assertTrue( "outModTwo" in testWorkflowC.outputMap.keys(), "ERROR: Output modules missing from workflow output map") for outputMap in testWorkflowC.outputMap["outModOne"]: if outputMap["output_fileset"].id == testFilesetA.id: self.assertEqual( outputMap["merged_output_fileset"].id, testMergedFilesetA.id, "Error: Output map incorrectly maps filesets.") else: self.assertEqual( outputMap["merged_output_fileset"].id, testMergedFilesetC.id, "Error: Output map incorrectly maps filesets.") self.assertEqual( outputMap["output_fileset"].id, testFilesetC.id, "Error: Output map incorrectly maps filesets.") self.assertEqual( testWorkflowC.outputMap["outModTwo"][0] ["merged_output_fileset"].id, testMergedFilesetB.id, "Error: Output map incorrectly maps filesets.") self.assertEqual( testWorkflowC.outputMap["outModTwo"][0]["output_fileset"].id, testFilesetB.id, "Error: Output map incorrectly maps filesets.") return
def testGetOutputMapDAO(self): """ _testGetOutputMapDAO_ Verify the proper behavior of the GetOutputMapDAO for a variety of different processing chains. """ recoOutputFileset = Fileset(name="RECO") recoOutputFileset.create() mergedRecoOutputFileset = Fileset(name="MergedRECO") mergedRecoOutputFileset.create() alcaOutputFileset = Fileset(name="ALCA") alcaOutputFileset.create() mergedAlcaOutputFileset = Fileset(name="MergedALCA") mergedAlcaOutputFileset.create() dqmOutputFileset = Fileset(name="DQM") dqmOutputFileset.create() mergedDqmOutputFileset = Fileset(name="MergedDQM") mergedDqmOutputFileset.create() cleanupFileset = Fileset(name="Cleanup") cleanupFileset.create() testWorkflow = Workflow(spec="wf001.xml", owner="Steve", name="TestWF", task="None") testWorkflow.create() testWorkflow.addOutput("output", recoOutputFileset, mergedRecoOutputFileset) testWorkflow.addOutput("ALCARECOStreamCombined", alcaOutputFileset, mergedAlcaOutputFileset) testWorkflow.addOutput("DQM", dqmOutputFileset, mergedDqmOutputFileset) testWorkflow.addOutput("output", cleanupFileset) testWorkflow.addOutput("ALCARECOStreamCombined", cleanupFileset) testWorkflow.addOutput("DQM", cleanupFileset) testRecoMergeWorkflow = Workflow(spec="wf002.xml", owner="Steve", name="TestRecoMergeWF", task="None") testRecoMergeWorkflow.create() testRecoMergeWorkflow.addOutput("anything", mergedRecoOutputFileset, mergedRecoOutputFileset) testRecoProcWorkflow = Workflow(spec="wf004.xml", owner="Steve", name="TestRecoProcWF", task="None") testRecoProcWorkflow.create() testAlcaChildWorkflow = Workflow(spec="wf003.xml", owner="Steve", name="TestAlcaChildWF", task="None") testAlcaChildWorkflow.create() inputFile = File(lfn="/path/to/some/lfn", size=600000, events=60000, locations="cmssrm.fnal.gov") inputFile.create() testFileset = Fileset(name="TestFileset") testFileset.create() testFileset.addFile(inputFile) testFileset.commit() testSubscription = Subscription(fileset=testFileset, workflow=testWorkflow, split_algo="EventBased", type="Processing") testMergeRecoSubscription = Subscription(fileset=recoOutputFileset, workflow=testRecoMergeWorkflow, split_algo="WMBSMergeBySize", type="Merge") testProcRecoSubscription = Subscription(fileset=recoOutputFileset, workflow=testRecoProcWorkflow, split_algo="FileBased", type="Processing") testChildAlcaSubscription = Subscription(fileset=alcaOutputFileset, workflow=testAlcaChildWorkflow, split_algo="FileBased", type="Processing") testSubscription.create() testMergeRecoSubscription.create() testProcRecoSubscription.create() testChildAlcaSubscription.create() testSubscription.acquireFiles() testJobGroup = JobGroup(subscription=testSubscription) testJobGroup.create() testJob = Job(name="SplitJobA", files=[inputFile]) testJob.create(group=testJobGroup) testJob["state"] = "complete" testJob.save() outputMapAction = self.daoFactory(classname="Jobs.GetOutputMap") outputMap = outputMapAction.execute(jobID=testJob["id"]) assert len(outputMap.keys()) == 3, \ "Error: Wrong number of outputs for primary workflow." goldenMap = {"output": (recoOutputFileset.id, mergedRecoOutputFileset.id), "ALCARECOStreamCombined": (alcaOutputFileset.id, mergedAlcaOutputFileset.id), "DQM": (dqmOutputFileset.id, mergedDqmOutputFileset.id)} for outputID in outputMap.keys(): for outputFilesets in outputMap[outputID]: if outputFilesets["merged_output_fileset"] is None: self.assertEqual(outputFilesets["output_fileset"], cleanupFileset.id, "Error: Cleanup fileset is wrong.") continue self.assertTrue(outputID in goldenMap.keys(), "Error: Output identifier is missing.") self.assertEqual(outputFilesets["output_fileset"], goldenMap[outputID][0], "Error: Output fileset is wrong.") self.assertEqual(outputFilesets["merged_output_fileset"], goldenMap[outputID][1], "Error: Merged output fileset is wrong.") del goldenMap[outputID] self.assertEqual(len(goldenMap.keys()), 0, "Error: Missing output maps.") return
def testParallelProcessing(self): """ _testParallelProcessing_ Verify that merging works correctly when multiple processing subscriptions are run over the same input files. The merging algorithm should ignore processing jobs that feed into different merge subscriptions. """ locationAction = self.daoFactory(classname = "Locations.New") locationAction.execute(siteName = "s1", seName = "somese.cern.ch") mergeFilesetA = Fileset(name = "mergeFilesetA") mergeFilesetB = Fileset(name = "mergeFilesetB") mergeFilesetA.create() mergeFilesetB.create() mergeMergedFilesetA = Fileset(name = "mergeMergedFilesetA") mergeMergedFilesetB = Fileset(name = "mergeMergedFilesetB") mergeMergedFilesetA.create() mergeMergedFilesetB.create() mergeWorkflow = Workflow(name = "mergeWorkflow", spec = "bogus", owner = "Steve", task = "Test") mergeWorkflow.create() mergeSubscriptionA = Subscription(fileset = mergeFilesetA, workflow = mergeWorkflow, split_algo = "WMBSMergeBySize") mergeSubscriptionB = Subscription(fileset = mergeFilesetB, workflow = mergeWorkflow, split_algo = "WMBSMergeBySize") mergeSubscriptionA.create() mergeSubscriptionB.create() inputFileset = Fileset(name = "inputFileset") inputFileset.create() inputFileA = File(lfn = "inputLFNA") inputFileB = File(lfn = "inputLFNB") inputFileA.create() inputFileB.create() procWorkflowA = Workflow(name = "procWorkflowA", spec = "bunk2", owner = "Steve", task = "Test") procWorkflowA.create() procWorkflowA.addOutput("output", mergeFilesetA, mergeMergedFilesetA) procWorkflowB = Workflow(name = "procWorkflowB", spec = "bunk3", owner = "Steve", task = "Test2") procWorkflowB.create() procWorkflowB.addOutput("output", mergeFilesetB, mergeMergedFilesetB) procSubscriptionA = Subscription(fileset = inputFileset, workflow = procWorkflowA, split_algo = "EventBased") procSubscriptionA.create() procSubscriptionB = Subscription(fileset = inputFileset, workflow = procWorkflowB, split_algo = "EventBased") procSubscriptionB.create() jobGroupA = JobGroup(subscription = procSubscriptionA) jobGroupA.create() jobGroupB = JobGroup(subscription = procSubscriptionB) jobGroupB.create() changeStateDAO = self.daoFactory(classname = "Jobs.ChangeState") testJobA = Job() testJobA.addFile(inputFileA) testJobA.create(jobGroupA) testJobA["state"] = "cleanout" testJobA["oldstate"] = "new" testJobA["couch_record"] = "somejive" testJobA["retry_count"] = 0 testJobA["outcome"] = "success" testJobA.save() testJobB = Job() testJobB.addFile(inputFileB) testJobB.create(jobGroupA) testJobB["state"] = "cleanout" testJobB["oldstate"] = "new" testJobB["couch_record"] = "somejive" testJobB["retry_count"] = 0 testJobB["outcome"] = "success" testJobB.save() testJobC = Job() testJobC.addFile(inputFileA) testJobC.create(jobGroupB) testJobC["state"] = "cleanout" testJobC["oldstate"] = "new" testJobC["couch_record"] = "somejive" testJobC["retry_count"] = 0 testJobC["outcome"] = "success" testJobC.save() testJobD = Job() testJobD.addFile(inputFileA) testJobD.create(jobGroupB) testJobD["state"] = "cleanout" testJobD["oldstate"] = "new" testJobD["couch_record"] = "somejive" testJobD["retry_count"] = 0 testJobD["outcome"] = "failure" testJobD.save() testJobE = Job() testJobE.addFile(inputFileB) testJobE.create(jobGroupB) testJobE["state"] = "cleanout" testJobE["oldstate"] = "new" testJobE["couch_record"] = "somejive" testJobE["retry_count"] = 0 testJobE["outcome"] = "success" testJobE.save() testJobF = Job() testJobF.addFile(inputFileB) testJobF.create(jobGroupB) testJobF["state"] = "cleanout" testJobF["oldstate"] = "new" testJobF["couch_record"] = "somejive" testJobF["retry_count"] = 0 testJobF["outcome"] = "failure" testJobF.save() changeStateDAO.execute([testJobA, testJobB, testJobC, testJobD, testJobE, testJobF]) fileA = File(lfn = "fileA", size = 1024, events = 1024, first_event = 0, locations = set(["somese.cern.ch"])) fileA.addRun(Run(1, *[45])) fileA.create() fileA.addParent(inputFileA["lfn"]) fileB = File(lfn = "fileB", size = 1024, events = 1024, first_event = 0, locations = set(["somese.cern.ch"])) fileB.addRun(Run(1, *[45])) fileB.create() fileB.addParent(inputFileB["lfn"]) jobGroupA.output.addFile(fileA) jobGroupA.output.addFile(fileB) jobGroupA.output.commit() mergeFilesetA.addFile(fileA) mergeFilesetA.addFile(fileB) mergeFilesetA.commit() fileC = File(lfn = "fileC", size = 1024, events = 1024, first_event = 0, locations = set(["somese.cern.ch"])) fileC.addRun(Run(1, *[45])) fileC.create() fileC.addParent(inputFileA["lfn"]) fileD = File(lfn = "fileD", size = 1024, events = 1024, first_event = 0, locations = set(["somese.cern.ch"])) fileD.addRun(Run(1, *[45])) fileD.create() fileD.addParent(inputFileB["lfn"]) jobGroupB.output.addFile(fileC) jobGroupB.output.addFile(fileD) mergeFilesetB.addFile(fileC) mergeFilesetB.addFile(fileD) mergeFilesetB.commit() splitter = SplitterFactory() jobFactory = splitter(package = "WMCore.WMBS", subscription = mergeSubscriptionB) result = jobFactory(min_merge_size = 1, max_merge_size = 20000, max_merge_events = 7169) assert len(result) == 0, \ "Error: No merge jobs should have been created." fileE = File(lfn = "fileE", size = 1024, events = 1024, first_event = 0, locations = set(["somese.cern.ch"])) fileE.addRun(Run(1, *[45])) fileE.create() fileE.addParent(inputFileA["lfn"]) fileF = File(lfn = "fileF", size = 1024, events = 1024, first_event = 0, locations = set(["somese.cern.ch"])) fileF.addRun(Run(1, *[45])) fileF.create() fileF.addParent(inputFileB["lfn"]) jobGroupB.output.addFile(fileE) jobGroupB.output.addFile(fileF) mergeFilesetB.addFile(fileE) mergeFilesetB.addFile(fileF) mergeFilesetB.commit() testJobD["outcome"] = "success" testJobD.save() testJobF["outcome"] = "success" testJobF.save() changeStateDAO.execute([testJobD, testJobF]) result = jobFactory(min_merge_size = 1, max_merge_size = 20000, max_merge_events = 7169) assert len(result) == 1, \ "Error: One merge job should have been created: %s" % len(result) return
def stuffWMBS(self, injected = True): """ _stuffWMBS_ Insert some dummy jobs, jobgroups, filesets, files and subscriptions into WMBS to test job creation. Three completed job groups each containing several files are injected. Another incomplete job group is also injected. Also files are added to the "Mergeable" subscription as well as to the output fileset for their jobgroups. """ locationAction = self.daoFactory(classname = "Locations.New") locationAction.execute(siteName = "s1", seName = "somese.cern.ch") changeStateDAO = self.daoFactory(classname = "Jobs.ChangeState") self.mergeFileset = Fileset(name = "mergeFileset") self.mergeFileset.create() self.bogusFileset = Fileset(name = "bogusFileset") self.bogusFileset.create() self.mergeMergedFileset = Fileset(name = "mergeMergedFileset") self.mergeMergedFileset.create() self.bogusMergedFileset = Fileset(name = "bogusMergedFileset") self.bogusMergedFileset.create() mergeWorkflow = Workflow(name = "mergeWorkflow", spec = "bunk2", owner = "Steve", task="Test") mergeWorkflow.create() markWorkflow = self.daoFactory(classname = "Workflow.MarkInjectedWorkflows") markWorkflow.execute(names = [mergeWorkflow.name], injected = injected) self.mergeSubscription = Subscription(fileset = self.mergeFileset, workflow = mergeWorkflow, split_algo = "WMBSMergeBySize") self.mergeSubscription.create() self.bogusSubscription = Subscription(fileset = self.bogusFileset, workflow = mergeWorkflow, split_algo = "WMBSMergeBySize") inputFileset = Fileset(name = "inputFileset") inputFileset.create() inputWorkflow = Workflow(name = "inputWorkflow", spec = "input", owner = "Steve", task = "Test") inputWorkflow.create() inputWorkflow.addOutput("output", self.mergeFileset, self.mergeMergedFileset) inputWorkflow.addOutput("output2", self.bogusFileset, self.bogusMergedFileset) bogusInputWorkflow = Workflow(name = "bogusInputWorkflow", spec = "input", owner = "Steve", task = "Test") bogusInputWorkflow.create() inputSubscription = Subscription(fileset = inputFileset, workflow = inputWorkflow) inputSubscription.create() bogusInputSubscription = Subscription(fileset = inputFileset, workflow = bogusInputWorkflow) bogusInputSubscription.create() parentFile1 = File(lfn = "parentFile1") parentFile1.create() parentFile2 = File(lfn = "parentFile2") parentFile2.create() parentFile3 = File(lfn = "parentFile3") parentFile3.create() parentFile4 = File(lfn = "parentFile4") parentFile4.create() self.parentFileSite2 = File(lfn = "parentFileSite2") self.parentFileSite2.create() jobGroup1 = JobGroup(subscription = inputSubscription) jobGroup1.create() jobGroup2 = JobGroup(subscription = inputSubscription) jobGroup2.create() jobGroup3 = JobGroup(subscription = bogusInputSubscription) jobGroup3.create() testJob1 = Job() testJob1.addFile(parentFile1) testJob1.create(jobGroup1) testJob1["state"] = "cleanout" testJob1["oldstate"] = "new" testJob1["couch_record"] = "somejive" testJob1["retry_count"] = 0 testJob1["outcome"] = "success" testJob1.save() changeStateDAO.execute([testJob1]) testJob1A = Job() testJob1A.addFile(parentFile1) testJob1A.create(jobGroup3) testJob1A["state"] = "cleanout" testJob1A["oldstate"] = "new" testJob1A["couch_record"] = "somejive" testJob1A["retry_count"] = 0 testJob1A["outcome"] = "failure" testJob1A.save() changeStateDAO.execute([testJob1A]) testJob2 = Job() testJob2.addFile(parentFile2) testJob2.create(jobGroup1) testJob2["state"] = "cleanout" testJob2["oldstate"] = "new" testJob2["couch_record"] = "somejive" testJob2["retry_count"] = 0 testJob2["outcome"] = "success" testJob2.save() changeStateDAO.execute([testJob2]) testJob3 = Job() testJob3.addFile(parentFile3) testJob3.create(jobGroup2) testJob3["state"] = "cleanout" testJob3["oldstate"] = "new" testJob3["couch_record"] = "somejive" testJob3["retry_count"] = 0 testJob3["outcome"] = "success" testJob3.save() changeStateDAO.execute([testJob3]) testJob4 = Job() testJob4.addFile(parentFile4) testJob4.create(jobGroup2) testJob4["state"] = "cleanout" testJob4["oldstate"] = "new" testJob4["couch_record"] = "somejive" testJob4["retry_count"] = 0 testJob4["outcome"] = "failure" testJob4.save() changeStateDAO.execute([testJob4]) # We'll simulate a failed split by event job that the merger should # ignore. parentFile5 = File(lfn = "parentFile5") parentFile5.create() testJob5 = Job() testJob5.addFile(parentFile5) testJob5.create(jobGroup2) testJob5["state"] = "cleanout" testJob5["oldstate"] = "new" testJob5["couch_record"] = "somejive" testJob5["retry_count"] = 0 testJob5["outcome"] = "success" testJob5.save() changeStateDAO.execute([testJob5]) testJob6 = Job() testJob6.addFile(parentFile5) testJob6.create(jobGroup2) testJob6["state"] = "cleanout" testJob6["oldstate"] = "new" testJob6["couch_record"] = "somejive" testJob6["retry_count"] = 0 testJob6["outcome"] = "failure" testJob6.save() changeStateDAO.execute([testJob6]) testJob7 = Job() testJob7.addFile(self.parentFileSite2) testJob7.create(jobGroup2) testJob7["state"] = "cleanout" testJob7["oldstate"] = "new" testJob7["couch_record"] = "somejive" testJob7["retry_count"] = 0 testJob7["outcome"] = "success" testJob7.save() changeStateDAO.execute([testJob7]) badFile1 = File(lfn = "badFile1", size = 10241024, events = 10241024, first_event = 0, locations = set(["somese.cern.ch"])) badFile1.addRun(Run(1, *[45])) badFile1.create() badFile1.addParent(parentFile5["lfn"]) file1 = File(lfn = "file1", size = 1024, events = 1024, first_event = 0, locations = set(["somese.cern.ch"])) file1.addRun(Run(1, *[45])) file1.create() file1.addParent(parentFile1["lfn"]) file2 = File(lfn = "file2", size = 1024, events = 1024, first_event = 1024, locations = set(["somese.cern.ch"])) file2.addRun(Run(1, *[45])) file2.create() file2.addParent(parentFile1["lfn"]) file3 = File(lfn = "file3", size = 1024, events = 1024, first_event = 2048, locations = set(["somese.cern.ch"])) file3.addRun(Run(1, *[45])) file3.create() file3.addParent(parentFile1["lfn"]) file4 = File(lfn = "file4", size = 1024, events = 1024, first_event = 3072, locations = set(["somese.cern.ch"])) file4.addRun(Run(1, *[45])) file4.create() file4.addParent(parentFile1["lfn"]) fileA = File(lfn = "fileA", size = 1024, events = 1024, first_event = 0, locations = set(["somese.cern.ch"])) fileA.addRun(Run(1, *[46])) fileA.create() fileA.addParent(parentFile2["lfn"]) fileB = File(lfn = "fileB", size = 1024, events = 1024, first_event = 1024, locations = set(["somese.cern.ch"])) fileB.addRun(Run(1, *[46])) fileB.create() fileB.addParent(parentFile2["lfn"]) fileC = File(lfn = "fileC", size = 1024, events = 1024, first_event = 2048, locations = set(["somese.cern.ch"])) fileC.addRun(Run(1, *[46])) fileC.create() fileC.addParent(parentFile2["lfn"]) fileI = File(lfn = "fileI", size = 1024, events = 1024, first_event = 0, locations = set(["somese.cern.ch"])) fileI.addRun(Run(2, *[46])) fileI.create() fileI.addParent(parentFile3["lfn"]) fileII = File(lfn = "fileII", size = 1024, events = 1024, first_event = 1024, locations = set(["somese.cern.ch"])) fileII.addRun(Run(2, *[46])) fileII.create() fileII.addParent(parentFile3["lfn"]) fileIII = File(lfn = "fileIII", size = 1024, events = 1024, first_event = 2048, locations = set(["somese.cern.ch"])) fileIII.addRun(Run(2, *[46])) fileIII.create() fileIII.addParent(parentFile3["lfn"]) fileIV = File(lfn = "fileIV", size = 1024, events = 1024, first_event = 3072, locations = set(["somese.cern.ch"])) fileIV.addRun(Run(2, *[46])) fileIV.create() fileIV.addParent(parentFile3["lfn"]) fileX = File(lfn = "badFileA", size = 1024, events = 1024, first_event = 0, locations = set(["somese.cern.ch"])) fileX.addRun(Run(1, *[47])) fileX.create() fileX.addParent(parentFile4["lfn"]) fileY = File(lfn = "badFileB", size = 1024, events = 1024, first_event = 1024, locations = set(["somese.cern.ch"])) fileY.addRun(Run(1, *[47])) fileY.create() fileY.addParent(parentFile4["lfn"]) fileZ = File(lfn = "badFileC", size = 1024, events = 1024, first_event = 2048, locations = set(["somese.cern.ch"])) fileZ.addRun(Run(1, *[47])) fileZ.create() fileZ.addParent(parentFile4["lfn"]) jobGroup1.output.addFile(file1) jobGroup1.output.addFile(file2) jobGroup1.output.addFile(file3) jobGroup1.output.addFile(file4) jobGroup1.output.addFile(fileA) jobGroup1.output.addFile(fileB) jobGroup1.output.addFile(fileC) jobGroup1.output.commit() jobGroup2.output.addFile(fileI) jobGroup2.output.addFile(fileII) jobGroup2.output.addFile(fileIII) jobGroup2.output.addFile(fileIV) jobGroup2.output.addFile(fileX) jobGroup2.output.addFile(fileY) jobGroup2.output.addFile(fileZ) jobGroup2.output.addFile(badFile1) jobGroup2.output.commit() for file in [file1, file2, file3, file4, fileA, fileB, fileC, fileI, fileII, fileIII, fileIV, fileX, fileY, fileZ, badFile1]: self.mergeFileset.addFile(file) self.bogusFileset.addFile(file) self.mergeFileset.commit() self.bogusFileset.commit() return
def createSubscription(self, topLevelFilesetName = None, task = None, fileset = None): """ _createSubscription_ Create subscriptions in WMBS for all the tasks in the spec. This includes filesets, workflows and the output map for each task. """ if task == None or fileset == None: self.createTopLevelFileset(topLevelFilesetName) sub = None for topLevelTask in self.wmSpec.getTopLevelTask(): sub = self.createSubscription(topLevelFilesetName, topLevelTask, self.topLevelFileset) return sub # create runtime sandbox for workflow self.createSandbox() workflow = Workflow(spec = self.wmSpec.specUrl(), owner = self.wmSpec.getOwner()["name"], dn = self.wmSpec.getOwner().get("dn", None), group = self.wmSpec.getOwner().get("group", None), owner_vogroup = self.wmSpec.getOwner().get("vogroup", ''), owner_vorole = self.wmSpec.getOwner().get("vorole", ''), name = self.wmSpec.name(), task = task.getPathName(), wfType = self.wmSpec.getDashboardActivity()) workflow.create() subscription = Subscription(fileset = fileset, workflow = workflow, split_algo = task.jobSplittingAlgorithm(), type = task.taskType()) subscription.create() for site in task.siteWhitelist(): subscription.addWhiteBlackList([{"site_name": site, "valid": True}]) for site in task.siteBlacklist(): subscription.addWhiteBlackList([{"site_name": site, "valid": False}]) if self.topLevelSubscription == None: self.topLevelSubscription = subscription logging.info("Top level subscription created: %s" % subscription["id"]) else: logging.info("Child subscription created: %s" % subscription["id"]) outputModules = task.getOutputModulesForTask() for outputModule in outputModules: for outputModuleName in outputModule.listSections_(): outputFileset = Fileset(self.outputFilesetName(task, outputModuleName)) outputFileset.create() outputFileset.markOpen(True) mergedOutputFileset = None for childTask in task.childTaskIterator(): if childTask.data.input.outputModule == outputModuleName: if childTask.taskType() == "Merge": mergedOutputFileset = Fileset(self.outputFilesetName(childTask, "Merged")) mergedOutputFileset.create() mergedOutputFileset.markOpen(True) self.createSubscription(topLevelFilesetName, childTask, outputFileset) if mergedOutputFileset == None: workflow.addOutput(outputModuleName, outputFileset, outputFileset) else: workflow.addOutput(outputModuleName, outputFileset, mergedOutputFileset) return self.topLevelSubscription