def testMemCoresSettings(self): """ _testMemCoresSettings_ Make sure the multicore and memory setings are properly propagated to all tasks and steps. """ defaultArguments = MonteCarloWorkloadFactory.getTestArguments() defaultArguments["CouchURL"] = os.environ["COUCHURL"] defaultArguments["CouchDBName"] = TEST_DB_NAME defaultArguments["ConfigCacheID"] = self.injectMonteCarloConfig() factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction( "TestWorkload", defaultArguments) # test default values taskObj = testWorkload.getTask('Production') for step in ('cmsRun1', 'stageOut1', 'logArch1'): stepHelper = taskObj.getStepHelper(step) self.assertEqual(stepHelper.getNumberOfCores(), 1) self.assertEqual(stepHelper.getNumberOfStreams(), 0) # then test Memory requirements perfParams = taskObj.jobSplittingParameters()['performance'] self.assertEqual(perfParams['memoryRequirement'], 2300.0) # now test case where args are provided defaultArguments["Multicore"] = 6 defaultArguments["Memory"] = 4600.0 defaultArguments["EventStreams"] = 3 testWorkload = factory.factoryWorkloadConstruction( "TestWorkload", defaultArguments) taskObj = testWorkload.getTask('Production') for step in ('cmsRun1', 'stageOut1', 'logArch1'): stepHelper = taskObj.getStepHelper(step) if step == 'cmsRun1': self.assertEqual(stepHelper.getNumberOfCores(), defaultArguments["Multicore"]) self.assertEqual(stepHelper.getNumberOfStreams(), defaultArguments["EventStreams"]) else: self.assertEqual(stepHelper.getNumberOfCores(), 1) self.assertEqual(stepHelper.getNumberOfStreams(), 0) # then test Memory requirements perfParams = taskObj.jobSplittingParameters()['performance'] self.assertEqual(perfParams['memoryRequirement'], defaultArguments["Memory"]) return
def testMCWithLHE(self): """ _testMCWithLHE_ Create a MonteCarlo workflow with a variation on the type of work done, this refers to the previous LHEStepZero where the input can be .lhe files and there is more than one lumi per job. """ defaultArguments = MonteCarloWorkloadFactory.getTestArguments() defaultArguments["CouchURL"] = os.environ["COUCHURL"] defaultArguments["CouchDBName"] = TEST_DB_NAME defaultArguments["ConfigCacheID"] = self.injectMonteCarloConfig() defaultArguments["LheInputFiles"] = "True" defaultArguments["EventsPerJob"] = 200 defaultArguments["EventsPerLumi"] = 50 factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction("TestWorkload", defaultArguments) testWMBSHelper = WMBSHelper(testWorkload, "Production", "SomeBlock", cachepath=self.testInit.testDir) testWMBSHelper.createTopLevelFileset() testWMBSHelper._createSubscriptionsInWMBS(testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset) self._commonMonteCarloTest() productionTask = testWorkload.getTaskByPath('/TestWorkload/Production') splitting = productionTask.jobSplittingParameters() self.assertEqual(splitting["events_per_job"], 200) self.assertEqual(splitting["events_per_lumi"], 50) self.assertEqual(splitting["lheInputFiles"], True) self.assertFalse(splitting["deterministicPileup"]) return
def testMCWithPileup(self): """ _testMCWithPileup_ Create a Monte Carlo workflow and verify that it is injected correctly into WMBS and invoke its detailed test. The input configuration includes pileup input files. """ defaultArguments = MonteCarloWorkloadFactory.getTestArguments() defaultArguments["CouchURL"] = os.environ["COUCHURL"] defaultArguments["CouchDBName"] = TEST_DB_NAME defaultArguments["ConfigCacheID"] = self.injectMonteCarloConfig() # Add pileup inputs defaultArguments["MCPileup"] = COSMICS_PU defaultArguments["DataPileup"] = DATA_PU defaultArguments["DeterministicPileup"] = True factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction("TestWorkload", defaultArguments) testWMBSHelper = WMBSHelper(testWorkload, "Production", "SomeBlock", cachepath=self.testInit.testDir) testWMBSHelper.createTopLevelFileset() testWMBSHelper._createSubscriptionsInWMBS(testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset) self._commonMonteCarloTest() productionTask = testWorkload.getTaskByPath('/TestWorkload/Production') cmsRunStep = productionTask.getStep("cmsRun1").getTypeHelper() pileupData = cmsRunStep.getPileup() self.assertEqual(pileupData.data.dataset, [DATA_PU]) self.assertEqual(pileupData.mc.dataset, [COSMICS_PU]) splitting = productionTask.jobSplittingParameters() self.assertTrue(splitting["deterministicPileup"]) return
def testPileupFetcherOnMC(self): pileupMcArgs = MonteCarloWorkloadFactory.getTestArguments() pileupMcArgs["PileupConfig"] = { "cosmics": [ "/Mu/PenguinsPenguinsEverywhere-SingleMu-HorriblyJaundicedYellowEyedPenginsSearchingForCarrots-v31/RECO" ], "minbias": [ "/Mu/PenguinsPenguinsEverywhere-SingleMu-HorriblyJaundicedYellowEyedPenginsSearchingForCarrots-v31/RECO" ] } pileupMcArgs["CouchURL"] = os.environ["COUCHURL"] pileupMcArgs["CouchDBName"] = "pileupfetcher_t" pileupMcArgs["ConfigCacheID"] = self.injectGenerationConfig() factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction( "TestWorkload", pileupMcArgs) # Since this is test of the fetcher - The loading from WMBS isn't # really necessary because the fetching happens before the workflow # is inserted into WMBS: feed the workload instance directly into fetcher: fetcher = PileupFetcher() creator = SandboxCreator() pathBase = "%s/%s" % (self.testDir, testWorkload.name()) for topLevelTask in testWorkload.taskIterator(): for taskNode in topLevelTask.nodeIterator(): # this is how the call to PileupFetcher is happening # from the SandboxCreator test task = WMTask.WMTaskHelper(taskNode) taskPath = "%s/WMSandbox/%s" % (pathBase, task.name()) fetcher.setWorkingDirectory(taskPath) # create Sandbox for the fetcher ... creator._makePathonPackage(taskPath) fetcher(task) self._queryPileUpConfigFile(pileupMcArgs, task, taskPath)
def testPileupFetcherOnMC(self): pileupMcArgs = MonteCarloWorkloadFactory.getTestArguments() pileupMcArgs["MCPileup"] = "/Cosmics/ComissioningHI-PromptReco-v1/RECO" pileupMcArgs["DataPileup"] = "/HighPileUp/Run2011A-v1/RAW" pileupMcArgs["CouchURL"] = os.environ["COUCHURL"] pileupMcArgs["CouchDBName"] = "pileupfetcher_t" pileupMcArgs["ConfigCacheID"] = self.injectGenerationConfig() factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction( "TestWorkload", pileupMcArgs) # now that the workload was created and args validated, we can add this PileupConfig pileupMcArgs["PileupConfig"] = parsePileupConfig( pileupMcArgs["MCPileup"], pileupMcArgs["DataPileup"]) # Since this is test of the fetcher - The loading from WMBS isn't # really necessary because the fetching happens before the workflow # is inserted into WMBS: feed the workload instance directly into fetcher: fetcher = PileupFetcher() creator = SandboxCreator() pathBase = "%s/%s" % (self.testDir, testWorkload.name()) for topLevelTask in testWorkload.taskIterator(): for taskNode in topLevelTask.nodeIterator(): # this is how the call to PileupFetcher is happening # from the SandboxCreator test task = WMTask.WMTaskHelper(taskNode) taskPath = "%s/WMSandbox/%s" % (pathBase, task.name()) fetcher.setWorkingDirectory(taskPath) # create Sandbox for the fetcher ... creator._makePathonPackage(taskPath) fetcher(task) self._queryPileUpConfigFile(pileupMcArgs, task, taskPath)
def testPileupFetcherOnMC(self): pileupMcArgs = MonteCarloWorkloadFactory.getTestArguments() pileupMcArgs["PileupConfig"] = {"cosmics": ["/Mu/PenguinsPenguinsEverywhere-SingleMu-HorriblyJaundicedYellowEyedPenginsSearchingForCarrots-v31/RECO"], "minbias": ["/Mu/PenguinsPenguinsEverywhere-SingleMu-HorriblyJaundicedYellowEyedPenginsSearchingForCarrots-v31/RECO"]} pileupMcArgs["CouchURL"] = os.environ["COUCHURL"] pileupMcArgs["CouchDBName"] = "pileupfetcher_t" pileupMcArgs["ConfigCacheID"] = self.injectGenerationConfig() factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction("TestWorkload", pileupMcArgs) # Since this is test of the fetcher - The loading from WMBS isn't # really necessary because the fetching happens before the workflow # is inserted into WMBS: feed the workload instance directly into fetcher: fetcher = PileupFetcher() creator = SandboxCreator() pathBase = "%s/%s" % (self.testDir, testWorkload.name()) for topLevelTask in testWorkload.taskIterator(): for taskNode in topLevelTask.nodeIterator(): # this is how the call to PileupFetcher is happening # from the SandboxCreator test task = WMTask.WMTaskHelper(taskNode) taskPath = "%s/WMSandbox/%s" % (pathBase, task.name()) fetcher.setWorkingDirectory(taskPath) # create Sandbox for the fetcher ... creator._makePathonPackage(taskPath) fetcher(task) self._queryPileUpConfigFile(pileupMcArgs, task, taskPath)
def testMonteCarlo(self): """ _testMonteCarlo_ Create a Monte Carlo workflow and verify that it is injected correctly into WMBS and invoke its detailed test. """ defaultArguments = MonteCarloWorkloadFactory.getTestArguments() defaultArguments["CouchURL"] = os.environ["COUCHURL"] defaultArguments["CouchDBName"] = TEST_DB_NAME defaultArguments["ConfigCacheID"] = self.injectMonteCarloConfig() factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction( "TestWorkload", defaultArguments) testWMBSHelper = WMBSHelper(testWorkload, "Production", "SomeBlock", cachepath=self.testInit.testDir) testWMBSHelper.createTopLevelFileset() testWMBSHelper._createSubscriptionsInWMBS( testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset) self._commonMonteCarloTest() return
def testPileupFetcherOnMC(self): pileupMcArgs = MonteCarloWorkloadFactory.getTestArguments() pileupMcArgs["MCPileup"] = "/Cosmics/ComissioningHI-PromptReco-v1/RECO" pileupMcArgs["DataPileup"] = "/HighPileUp/Run2011A-v1/RAW" pileupMcArgs["CouchURL"] = os.environ["COUCHURL"] pileupMcArgs["CouchDBName"] = "pileupfetcher_t" pileupMcArgs["ConfigCacheID"] = self.injectGenerationConfig() factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction("TestWorkload", pileupMcArgs) # now that the workload was created and args validated, we can add this PileupConfig pileupMcArgs["PileupConfig"] = parsePileupConfig(pileupMcArgs["MCPileup"], pileupMcArgs["DataPileup"]) # Since this is test of the fetcher - The loading from WMBS isn't # really necessary because the fetching happens before the workflow # is inserted into WMBS: feed the workload instance directly into fetcher: fetcher = PileupFetcher() creator = SandboxCreator() pathBase = "%s/%s" % (self.testDir, testWorkload.name()) for topLevelTask in testWorkload.taskIterator(): for taskNode in topLevelTask.nodeIterator(): # this is how the call to PileupFetcher is happening # from the SandboxCreator test task = WMTask.WMTaskHelper(taskNode) taskPath = "%s/WMSandbox/%s" % (pathBase, task.name()) fetcher.setWorkingDirectory(taskPath) # create Sandbox for the fetcher ... creator._makePathonPackage(taskPath) fetcher(task) self._queryPileUpConfigFile(pileupMcArgs, task, taskPath)
def testMemCoresSettings(self): """ _testMemCoresSettings_ Make sure the multicore and memory setings are properly propagated to all tasks and steps. """ defaultArguments = MonteCarloWorkloadFactory.getTestArguments() defaultArguments["CouchURL"] = os.environ["COUCHURL"] defaultArguments["CouchDBName"] = TEST_DB_NAME defaultArguments["ConfigCacheID"] = self.injectMonteCarloConfig() factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction("TestWorkload", defaultArguments) # test default values taskObj = testWorkload.getTask('Production') for step in ('cmsRun1', 'stageOut1', 'logArch1'): stepHelper = taskObj.getStepHelper(step) self.assertEqual(stepHelper.getNumberOfCores(), 1) self.assertEqual(stepHelper.getNumberOfStreams(), 0) # then test Memory requirements perfParams = taskObj.jobSplittingParameters()['performance'] self.assertEqual(perfParams['memoryRequirement'], 2300.0) # now test case where args are provided defaultArguments["Multicore"] = 6 defaultArguments["Memory"] = 4600.0 defaultArguments["EventStreams"] = 3 testWorkload = factory.factoryWorkloadConstruction("TestWorkload", defaultArguments) taskObj = testWorkload.getTask('Production') for step in ('cmsRun1', 'stageOut1', 'logArch1'): stepHelper = taskObj.getStepHelper(step) if step == 'cmsRun1': self.assertEqual(stepHelper.getNumberOfCores(), defaultArguments["Multicore"]) self.assertEqual(stepHelper.getNumberOfStreams(), defaultArguments["EventStreams"]) else: self.assertEqual(stepHelper.getNumberOfCores(), 1) self.assertEqual(stepHelper.getNumberOfStreams(), 0) # then test Memory requirements perfParams = taskObj.jobSplittingParameters()['performance'] self.assertEqual(perfParams['memoryRequirement'], defaultArguments["Memory"]) return
def testMonteCarloExtension(self): """ _testMonteCarloExtension_ Create a Monte Carlo workflow and verify that it is injected correctly into WMBS and invoke its detailed test. This uses a non-zero first lumi. Check that the splitting arguments are correctly set for the lfn counter. """ defaultArguments = MonteCarloWorkloadFactory.getTestArguments() defaultArguments["CouchURL"] = os.environ["COUCHURL"] defaultArguments["CouchDBName"] = TEST_DB_NAME defaultArguments["ConfigCacheID"] = self.injectMonteCarloConfig() defaultArguments["FirstLumi"] = 10001 defaultArguments["EventsPerJob"] = 100 defaultArguments["FirstEvent"] = 10001 #defaultArguments["FirstEvent"] = 10001 initial_lfn_counter = 100 # EventsPerJob == EventsPerLumi, then the number of previous jobs is equal to the number of the initial lumi factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction( "TestWorkload", defaultArguments) testWMBSHelper = WMBSHelper(testWorkload, "Production", "SomeBlock", cachepath=self.testInit.testDir) testWMBSHelper.createTopLevelFileset() testWMBSHelper._createSubscriptionsInWMBS( testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset) self._commonMonteCarloTest() productionTask = testWorkload.getTaskByPath('/TestWorkload/Production') productionSplitting = productionTask.jobSplittingParameters() self.assertTrue("initial_lfn_counter" in productionSplitting, "No initial lfn counter was stored") self.assertEqual(productionSplitting["initial_lfn_counter"], initial_lfn_counter, "Wrong initial LFN counter") for outputMod in ["OutputA", "OutputB"]: mergeTask = testWorkload.getTaskByPath( '/TestWorkload/Production/ProductionMerge%s' % outputMod) mergeSplitting = mergeTask.jobSplittingParameters() self.assertTrue("initial_lfn_counter" in mergeSplitting, "No initial lfn counter was stored") self.assertEqual(mergeSplitting["initial_lfn_counter"], initial_lfn_counter, "Wrong initial LFN counter") return
def testMonteCarloExtension(self): """ _testMonteCarloExtension_ Create a Monte Carlo workflow and verify that it is injected correctly into WMBS and invoke its detailed test. This uses a non-zero first lumi. Check that the splitting arguments are correctly set for the lfn counter. """ defaultArguments = MonteCarloWorkloadFactory.getTestArguments() defaultArguments["CouchURL"] = os.environ["COUCHURL"] defaultArguments["CouchDBName"] = TEST_DB_NAME defaultArguments["ConfigCacheID"] = self.injectMonteCarloConfig() defaultArguments["FirstLumi"] = 10001 defaultArguments["EventsPerJob"] = 100 defaultArguments["FirstEvent"] = 10001 # defaultArguments["FirstEvent"] = 10001 initial_lfn_counter = ( 100 ) # EventsPerJob == EventsPerLumi, then the number of previous jobs is equal to the number of the initial lumi factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction("TestWorkload", defaultArguments) testWMBSHelper = WMBSHelper(testWorkload, "Production", "SomeBlock", cachepath=self.testInit.testDir) testWMBSHelper.createTopLevelFileset() testWMBSHelper._createSubscriptionsInWMBS(testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset) self._commonMonteCarloTest() productionTask = testWorkload.getTaskByPath("/TestWorkload/Production") productionSplitting = productionTask.jobSplittingParameters() self.assertTrue("initial_lfn_counter" in productionSplitting, "No initial lfn counter was stored") self.assertEqual(productionSplitting["initial_lfn_counter"], initial_lfn_counter, "Wrong initial LFN counter") for outputMod in ["OutputA", "OutputB"]: mergeTask = testWorkload.getTaskByPath("/TestWorkload/Production/ProductionMerge%s" % outputMod) mergeSplitting = mergeTask.jobSplittingParameters() self.assertTrue("initial_lfn_counter" in mergeSplitting, "No initial lfn counter was stored") self.assertEqual(mergeSplitting["initial_lfn_counter"], initial_lfn_counter, "Wrong initial LFN counter") return
def testMonteCarlo(self): """ _testMonteCarlo_ Create a Monte Carlo workflow and verify that it is injected correctly into WMBS and invoke its detailed test. """ defaultArguments = MonteCarloWorkloadFactory.getTestArguments() defaultArguments["CouchURL"] = os.environ["COUCHURL"] defaultArguments["ConfigCacheID"] = self.injectMonteCarloConfig() factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction("TestWorkload", defaultArguments) testWMBSHelper = WMBSHelper(testWorkload, "Production", "SomeBlock", cachepath = self.testInit.testDir) testWMBSHelper.createTopLevelFileset() testWMBSHelper._createSubscriptionsInWMBS(testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset) self._commonMonteCarloTest() return
def testFilesets(self): """ Test workflow tasks, filesets and subscriptions creation """ # expected tasks, filesets, subscriptions, etc expOutTasks = ['/TestWorkload/Production', '/TestWorkload/Production/ProductionMergeOutputB', '/TestWorkload/Production/ProductionMergeOutputA'] expWfTasks = ['/TestWorkload/Production', '/TestWorkload/Production/LogCollect', '/TestWorkload/Production/ProductionCleanupUnmergedOutputA', '/TestWorkload/Production/ProductionCleanupUnmergedOutputB', '/TestWorkload/Production/ProductionMergeOutputA', '/TestWorkload/Production/ProductionMergeOutputA/ProductionOutputAMergeLogCollect', '/TestWorkload/Production/ProductionMergeOutputB', '/TestWorkload/Production/ProductionMergeOutputB/ProductionOutputBMergeLogCollect'] expFsets = ['FILESET_DEFINED_DURING_RUNTIME', '/TestWorkload/Production/unmerged-OutputBUSER', '/TestWorkload/Production/ProductionMergeOutputA/merged-logArchive', '/TestWorkload/Production/ProductionMergeOutputA/merged-MergedRECO', '/TestWorkload/Production/ProductionMergeOutputB/merged-logArchive', '/TestWorkload/Production/ProductionMergeOutputB/merged-MergedUSER', '/TestWorkload/Production/unmerged-logArchive', '/TestWorkload/Production/unmerged-OutputARECO'] subMaps = ['FILESET_DEFINED_DURING_RUNTIME', (6, '/TestWorkload/Production/ProductionMergeOutputA/merged-logArchive', '/TestWorkload/Production/ProductionMergeOutputA/ProductionOutputAMergeLogCollect', 'MinFileBased', 'LogCollect'), (3, '/TestWorkload/Production/ProductionMergeOutputB/merged-logArchive', '/TestWorkload/Production/ProductionMergeOutputB/ProductionOutputBMergeLogCollect', 'MinFileBased', 'LogCollect'), (8, '/TestWorkload/Production/unmerged-logArchive', '/TestWorkload/Production/LogCollect', 'MinFileBased', 'LogCollect'), (7, '/TestWorkload/Production/unmerged-OutputARECO', '/TestWorkload/Production/ProductionCleanupUnmergedOutputA', 'SiblingProcessingBased', 'Cleanup'), (5, '/TestWorkload/Production/unmerged-OutputARECO', '/TestWorkload/Production/ProductionMergeOutputA', 'ParentlessMergeBySize', 'Merge'), (4, '/TestWorkload/Production/unmerged-OutputBUSER', '/TestWorkload/Production/ProductionCleanupUnmergedOutputB', 'SiblingProcessingBased', 'Cleanup'), (2, '/TestWorkload/Production/unmerged-OutputBUSER', '/TestWorkload/Production/ProductionMergeOutputB', 'ParentlessMergeBySize', 'Merge')] testArguments = MonteCarloWorkloadFactory.getTestArguments() testArguments["CouchURL"] = os.environ["COUCHURL"] testArguments["CouchDBName"] = TEST_DB_NAME testArguments["ConfigCacheID"] = self.injectMonteCarloConfig() factory = MonteCarloWorkloadFactory() testWorkload = factory.factoryWorkloadConstruction("TestWorkload", testArguments) myMask = Mask(FirstRun=1, FirstLumi=1, FirstEvent=1, LastRun=1, LastLumi=10, LastEvent=1000) testWMBSHelper = WMBSHelper(testWorkload, "Production", mask=myMask, cachepath=self.testInit.testDir) testWMBSHelper.createTopLevelFileset() testWMBSHelper._createSubscriptionsInWMBS(testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset) self.assertItemsEqual(testWorkload.listOutputProducingTasks(), expOutTasks) workflows = self.listTasksByWorkflow.execute(workflow="TestWorkload") self.assertItemsEqual([item['task'] for item in workflows], expWfTasks) # same function as in WMBSHelper, otherwise we cannot know which fileset name is maskString = ",".join(["%s=%s" % (x, myMask[x]) for x in sorted(myMask)]) topFilesetName = 'TestWorkload-Production-%s' % md5(maskString).hexdigest() expFsets[0] = topFilesetName # returns a tuple of id, name, open and last_update filesets = self.listFilesets.execute() self.assertItemsEqual([item[1] for item in filesets], expFsets) subMaps[0] = (1, topFilesetName, '/TestWorkload/Production', 'EventBased', 'Production') subscriptions = self.listSubsMapping.execute(workflow="TestWorkload", returnTuple=True) self.assertItemsEqual(subscriptions, subMaps) ### create another top level subscription myMask = Mask(FirstRun=1, FirstLumi=11, FirstEvent=1001, LastRun=1, LastLumi=20, LastEvent=2000) testWMBSHelper = WMBSHelper(testWorkload, "Production", mask=myMask, cachepath=self.testInit.testDir) testWMBSHelper.createTopLevelFileset() testWMBSHelper._createSubscriptionsInWMBS(testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset) workflows = self.listTasksByWorkflow.execute(workflow="TestWorkload") self.assertItemsEqual([item['task'] for item in workflows], expWfTasks) # same function as in WMBSHelper, otherwise we cannot know which fileset name is maskString = ",".join(["%s=%s" % (x, myMask[x]) for x in sorted(myMask)]) topFilesetName = 'TestWorkload-Production-%s' % md5(maskString).hexdigest() expFsets.append(topFilesetName) # returns a tuple of id, name, open and last_update filesets = self.listFilesets.execute() self.assertItemsEqual([item[1] for item in filesets], expFsets) subMaps.append((9, topFilesetName, '/TestWorkload/Production', 'EventBased', 'Production')) subscriptions = self.listSubsMapping.execute(workflow="TestWorkload", returnTuple=True) self.assertItemsEqual(subscriptions, subMaps)