示例#1
0
class AlertProcessorTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel = logging.DEBUG)
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS",'WMCore.Agent.Database',
                                                 "WMCore.ResourceControl"],
                                 useDefault = False)
        self.testDir = self.testInit.generateWorkDir()

        self.config = Configuration()
        self.config.section_("Agent")
        self.config.Agent.useMsgService = False
        self.config.Agent.useTrigger = False
        self.config.component_("AlertProcessor")
        self.config.AlertProcessor.componentDir = self.testDir
        self.config.AlertProcessor.address = "tcp://127.0.0.1:5557"
        self.config.AlertProcessor.controlAddr = "tcp://127.0.0.1:5559"
        self.config.section_("CoreDatabase")

        self.config.CoreDatabase.socket = os.environ.get("DBSOCK")
        self.config.CoreDatabase.connectUrl = os.environ.get("DATABASE")

        self.config.AlertProcessor.section_("critical")
        self.config.AlertProcessor.section_("soft")

        self.config.AlertProcessor.critical.level = 5
        self.config.AlertProcessor.soft.level = 0
        self.config.AlertProcessor.soft.bufferSize = 3

        self.config.AlertProcessor.critical.section_("sinks")
        self.config.AlertProcessor.soft.section_("sinks")


    def tearDown(self):
        self.testInit.clearDatabase()
        self.testInit.delWorkDir()


    def testAlertProcessorBasic(self):
        alertProcessor = AlertProcessor(self.config)
        try:
            # alertProcessor.startComponent() causes the flow to stop, Harness.py
            # the method just calls prepareToStart() and waits for ever
            # alertProcessor.startDaemon() no good for this either ... puts everything
            # on background
            alertProcessor.prepareToStart()
        except Exception, ex:
            print ex
            self.fail(str(ex))

        logging.debug("AlertProcessor and its sub-components should be running now ...")
        logging.debug("Going to stop the component ...")

        # stop via component method
        try:
            alertProcessor.stopAlertProcessor()
        except Exception, ex:
            print ex
            self.fail(str(ex))
示例#2
0
class CouchTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel = logging.DEBUG)
        self.testDir = self.testInit.generateWorkDir()
        self.config = getConfig(self.testDir)
        # mock generator instance to communicate some configuration values
        self.generator = utils.AlertGeneratorMock(self.config)        
        self.testProcesses = []
        self.testName = self.id().split('.')[-1]
         
        
    def tearDown(self):       
        self.testInit.delWorkDir()
        self.generator = None
        utils.terminateProcesses(self.testProcesses)


    def testAlertGeneratorCouchDbSizePollerBasic(self):
        config = getConfig("/tmp")
        try:
            poller = CouchDbSizePoller(config.AlertGenerator.couchDbSizePoller, self.generator)
        except Exception, ex:
            self.fail("%s: exception: %s" % (self.testName, ex))
        poller.check() # -> on real system dir may result in permission denied
        poller._dbDirectory = "/dev"
        poller.check() # -> OK
        
        # test failing during set up
        poller = CouchDbSizePoller(config.AlertGenerator.couchDbSizePoller, self.generator)
        poller._query = "nonsense query"
        poller._dbDirectory = poller._getDbDir()
        poller.check()
        self.assertEquals(poller._dbDirectory, None)        
示例#3
0
class WorkQueueTestCase(unittest.TestCase):

    def setSchema(self):
        "this can be override if the schema setting is different"
        self.schema = ["WMCore.WMBS","WMComponent.DBS3Buffer","WMCore.BossAir"]
        self.couchApps = ["WorkQueue"]

    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.  Also add some dummy locations.
        """
        self.queueDB = 'workqueue_t'
        self.queueInboxDB = 'workqueue_t_inbox'
        self.globalQDB = 'workqueue_t_global'
        self.globalQInboxDB = 'workqueue_t_global_inbox'
        self.localQDB = 'workqueue_t_local'
        self.localQInboxDB = 'workqueue_t_local_inbox'
        self.localQDB2 = 'workqueue_t_local2'
        self.localQInboxDB2 = 'workqueue_t_local2_inbox'
        self.configCacheDB = 'workqueue_t_config_cache'

        self.setSchema()
        self.testInit = TestInit('WorkQueueTest')
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = self.schema,
                                useDefault = False)
        self.testInit.setupCouch(self.queueDB, *self.couchApps)
        self.testInit.setupCouch(self.queueInboxDB, *self.couchApps)
        self.testInit.setupCouch(self.globalQDB, *self.couchApps)
        self.testInit.setupCouch(self.globalQInboxDB , *self.couchApps)
        self.testInit.setupCouch(self.localQDB, *self.couchApps)
        self.testInit.setupCouch(self.localQInboxDB, *self.couchApps)
        self.testInit.setupCouch(self.localQDB2, *self.couchApps)
        self.testInit.setupCouch(self.localQInboxDB2, *self.couchApps)
        self.testInit.setupCouch(self.configCacheDB, 'ConfigCache')

        couchServer = CouchServer(os.environ.get("COUCHURL"))
        self.configCacheDBInstance = couchServer.connectDatabase(self.configCacheDB)

        self.workDir = self.testInit.generateWorkDir()
        return

    def tearDown(self):
        """
        _tearDown_

        Drop all the WMBS tables.
        """
        #self.testInit.tearDownCouch()
        self.testInit.clearDatabase()
        self.testInit.delWorkDir()
示例#4
0
class DaemonTest(unittest.TestCase):
    """
    _Daemon_t_
    
    Unit tests for message services: subscription, priority subscription, buffers,
    etc..
    
    """

    # minimum number of messages that need to be in queue
    _minMsg = 20
    # number of publish and gets from queue
    _publishAndGet = 10

    def setUp(self):
        "make a logger instance and create tables"
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.tempDir = tempfile.mkdtemp()

    def tearDown(self):
        """
        Deletion of the databases 
        """
        self.testInit.clearDatabase()
        shutil.rmtree( self.tempDir, True )
               
    def testA(self):
        """
        __testSubscribe__
        
        Test daemon creation
        """
        # keep the parent alive
        self.pid = createDaemon(self.tempDir, True)
        try:
            try:
                if self.pid != 0 :
                    time.sleep(2)
                    details = Details(os.path.join(self.tempDir,"Daemon.xml"))
                    time.sleep(10)
                    details.killWithPrejudice()
                else:
                    while True:
                        time.sleep(1)
            except:
                pass
        finally:
            if self.pid == 0:
                os._exit(-1)
            else:
                os.system('kill -9 %s' % self.pid)
示例#5
0
class SetupCMSSWPsetTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testDir = self.testInit.generateWorkDir()
        sys.path.insert(0, os.path.join(WMCore.WMBase.getTestBase(),
                                        "WMCore_t/WMRuntime_t/Scripts_t"))

    def tearDown(self):
        sys.path.remove(os.path.join(WMCore.WMBase.getTestBase(),
                                     "WMCore_t/WMRuntime_t/Scripts_t"))
        del sys.modules["WMTaskSpace"]
        self.testInit.delWorkDir()

    def createTestStep(self):
        """
        _createTestStep_

        Create a test step that can be passed to the setup script.

        """
        newStep = WMStep("cmsRun1")
        newStepHelper = CMSSWStepHelper(newStep)
        newStepHelper.setStepType("CMSSW")
        newStepHelper.setGlobalTag("SomeGlobalTag")
        stepTemplate = StepFactory.getStepTemplate("CMSSW")
        stepTemplate(newStep)
        newStep.application.command.configuration = "PSet.py"
        newStep.application.multicore.numberOfCores = "auto"
        return newStepHelper

    def loadProcessFromPSet(self):
        """
        _loadProcessFromPSet_

        This requires changing the working directory,
        do so in a safe manner to encapsulate the change to this method only
        """

        currentPath = os.getcwd()
        loadedProcess = None
        try:
            if not os.path.isdir(self.testDir):
                raise
            os.chdir(self.testDir)
            testFile = "PSet.py"
            pset = imp.load_source('process', testFile)
            loadedProcess = pset.process
        except Exception, ex:
            self.fail("An exception was caught while trying to load the PSet, %s" % str(ex))
        finally:
def main():
    options = simpleObject()
    options.configFilePath = createConfigFile()
    options.inputDataFilePath = createInputData()

    testInit = TestInit(__file__)
    testInit.setLogging()
    testInit.setDatabaseConnection(destroyAllDatabase=True)
    testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"],
                       useDefault = False)

    injector = Injector(options)
    injector.prepareDBSFiles()
    injector.createFilesInDBSBuffer()
示例#7
0
class AlertGeneratorTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel = logging.DEBUG)
        self.testInit.clearDatabase()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS",'WMCore.Agent.Database',
                                                 "WMCore.ResourceControl"],
                                useDefault = False)
        self.testDir = self.testInit.generateWorkDir()
        # AlertGenerator instance
        self.generator = None
        
        self.config = getConfig(self.testDir)
         
        self.config.section_("CoreDatabase")
        self.config.CoreDatabase.socket = os.environ.get("DBSOCK")
        self.config.CoreDatabase.connectUrl = os.environ.get("DATABASE")
        
        self.testProcesses = []
        self.testComponentDaemonXml = "/tmp/TestComponent/Daemon.xml" 
                

    def tearDown(self):
        self.testInit.clearDatabase()       
        self.testInit.delWorkDir()
        self.generator = None
        utils.terminateProcesses(self.testProcesses)
        # if the directory and file "/tmp/TestComponent/Daemon.xml" after
        # ComponentsPoller test exist, then delete it
        d = os.path.dirname(self.testComponentDaemonXml)
        if os.path.exists(d):
            shutil.rmtree(d)

        
    def _startComponent(self):
        self.generator = AlertGenerator(self.config)
        try:
            # self.proc.startComponent() causes the flow to stop, Harness.py
            # the method just calls prepareToStart() and waits for ever
            # self.proc.startDaemon() no good for this either ... puts everything
            # on background
            self.generator.prepareToStart()
        except Exception, ex:
            print ex
            self.fail(str(ex))
        print "AlertGenerator and its sub-components should be running now ..."
class DBSBufferDatasetTest(unittest.TestCase):
    """
    _DBSBufferDatasetTest_

    Collection of tests for the DBSBuffer object
    """
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"],
                                useDefault = False)

    def tearDown(self):
        self.testInit.clearDatabase()

    def testBasic(self):
        """
        _testBasic_

        Test the basic functions of the DBSBufferDataset,
        create, load, exists and also the ability
        to add subscriptions.
        """
        originalDataset = DBSBufferDataset(path = '/bogus/bogus/go')
        originalDataset.create()
        myThread = threading.currentThread()
        result = myThread.dbi.processData("SELECT id FROM dbsbuffer_dataset")[0].fetchall()
        self.assertEqual(originalDataset.exists(), result[0][0])
        duplicateDataset = DBSBufferDataset(path = '/bogus/bogus/go')
        duplicateDataset.create()
        self.assertEqual(originalDataset.exists(), duplicateDataset.exists())
        result = myThread.dbi.processData("SELECT COUNT(id) FROM dbsbuffer_dataset")[0].fetchall()
        self.assertEqual(result[0][0], 1)
        loadedDataset = DBSBufferDataset(path = '/bogus/bogus/go')
        loadedDataset.load()
        self.assertEqual(loadedDataset.exists(), originalDataset.exists())
        secondDataset = DBSBufferDataset(path = '/BogusPrimary/Run2012Z-PromptReco-v1/RECO')
        secondDataset.create()
        workload = WMWorkloadHelper()
        workload.load(os.path.join(getTestBase(), 'WMComponent_t/PhEDExInjector_t/specs/TestWorkload.pkl'))
        secondDataset.addSubscription(workload.getSubscriptionInformation()['/BogusPrimary/Run2012Z-PromptReco-v1/RECO'])
        secondDataset.addSubscription(workload.getSubscriptionInformation()['/BogusPrimary/Run2012Z-PromptReco-v1/RECO'])
        self.assertEqual(len(secondDataset['subscriptions']), 3)
        result = myThread.dbi.processData("SELECT COUNT(id) FROM dbsbuffer_dataset_subscription")[0].fetchall()
        self.assertEqual(result[0][0], 3)
        return
示例#9
0
class RESTBaseUnitTest(unittest.TestCase):
    
    def setUp(self):
        # default set
        self.schemaModules = []
        self.initialize()
        if self.schemaModules:
            print "Initializing schema : %s" % self.schemaModules
            self.testInit = TestInit(__file__)
            self.testInit.setLogging() # logLevel = logging.SQLDEBUG
            print "Database URL: %s" % self.config.getDBUrl()
            self.testInit.setDatabaseConnection(self.config.getDBUrl())
            #self.testInit.setDatabaseConnection()
            self.testInit.setSchema(customModules = self.schemaModules,
                                    useDefault = False)
        
        print "Starting Cherrypy server ..."
        self.rt = configureServer(config = self.config)
        self.rt.start(blocking=False)
        cherrypy.log.error_log.setLevel(logging.WARNING)
        cherrypy.log.access_log.setLevel(logging.WARNING)
        
    def tearDown(self):
        print "Stopping Cherrypy server ..."
        self.rt.stop()
        
        if self.schemaModules:
            print "Cleaning up database ..."
            self.testInit.clearDatabase()
        self.config = None
        
    def initialize(self):
        """
        i.e.
        
        self.config = DefaultConfig('WMCore.WebTools.RESTModel')
        self.config.setDBUrl("sqlite://")
        self.schemaModules = ["WMCore.ThreadPool", WMCore.WMBS"]
        """
        
        message = "initialize method has to be implemented, self.restModel, self.schemaModules needs to be set"
        raise NotImplementedError, message
示例#10
0
class MySQLTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel = logging.DEBUG)
        self.testInit.setDatabaseConnection()
        self.testDir = self.testInit.generateWorkDir()
        self.config = getConfig(self.testDir)
        # mock generator instance to communicate some configuration values
        self.generator = utils.AlertGeneratorMock(self.config)        
        self.testProcesses = []
        self.testName = self.id().split('.')[-1]
         
        
    def tearDown(self):       
        self.testInit.delWorkDir()
        self.generator = None
        utils.terminateProcesses(self.testProcesses)
        

    def testMySQLPollerBasic(self):
        config = getConfig("/tmp")
        generator = utils.AlertGeneratorMock(config)
        # take for instance mysqlCPUPoller configuration here, just need
        # appropriate attributes set
        try:
            poller = MySQLPoller(config.AlertGenerator.mysqlCPUPoller, generator)
        except Exception, ex:
            self.fail("%s: exception: %s" % (self.testName, ex))                                
        # this class would not have defined polling sample function, give it one
        poller.sample = lambda proc: float(12)
        self.assertEqual(len(poller._measurements), 0)
        poller.check()
        self.assertEqual(len(poller._measurements), 1)
        self.assertEqual(poller._measurements[0], 12)
        
        # test handling of a non-existing process
        MySQLPoller._getProcessPID = lambda inst: 1212121212
        poller = MySQLPoller(config.AlertGenerator.mysqlCPUPoller, generator)
        # polling should not even happen so don't have to define sample function
        poller.check()
        self.assertEqual(poller._measurements, None)
        self.assertEqual(poller._dbProcessDetail, None)
示例#11
0
class FileSinkTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel=logging.DEBUG)
        self.testDir = self.testInit.generateWorkDir()
        self.config = ConfigSection("file")
        self.config.outputfile = os.path.join(self.testDir, "FileSinkTestNew.json")

    def tearDown(self):
        self.testInit.delWorkDir()

    def testFileSinkBasic(self):
        sink = FileSink(self.config)
        alerts = []
        nAlerts = 10
        for i in range(nAlerts):
            a = Alert(Source=__file__, Level=i, Timestamp=time.time(), Type="Test")
            alerts.append(a)
        sink.send(alerts)
        # test by reading back
        loadAlerts = sink.load()
        self.assertEqual(len(loadAlerts), nAlerts)

        # Since FileSink implementation depends on line-separated JSONs of
        # Alert instance, test handling new lines in the payload
        alerts = []
        testMsg = "addtional \n message"
        for i in range(10, 20):
            a = Alert(Source=__file__, Level=i, Timestamp=time.time(), Type="Test", Details={"message": testMsg})
            alerts.append(a)
        self.failUnless(os.path.exists(self.config.outputfile))
        sink.send(alerts)
        # test by reading back
        loadAlerts = sink.load()
        self.assertEqual(len(loadAlerts), 20)
        for a in loadAlerts[10:]:
            self.assertEqual(a["Details"]["message"], testMsg)
示例#12
0
class RucioInjectorPollerTest(EmulatedUnitTestCase):
    """
    Tests for the RucioInjectorPoller component
    """

    def setUp(self):
        """
        Install the DBSBuffer schema into the database and connect to Rucio.
        """
        super(RucioInjectorPollerTest, self).setUp()
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection(destroyAllDatabase=True)

        self.testInit.setSchema(customModules=["WMComponent.DBS3Buffer"],
                                useDefault=False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMComponent.DBS3Buffer",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)

        locationAction = daofactory(classname="DBSBufferFiles.AddLocation")
        self.locations = ["T2_CH_CERN", "T1_US_FNAL_Disk"]
        for rse in self.locations:
            locationAction.execute(siteName=rse)

        self.testFilesA = []
        self.testFilesB = []
        self.testDatasetA = "/SampleA/PromptReco-v1/RECO"
        self.testDatasetB = "/SampleB/CRUZET11-v1/RAW"

        if PY3:
            self.assertItemsEqual = self.assertCountEqual

        return

    def tearDown(self):
        """
        Delete the database.
        """
        self.testInit.clearDatabase()

    def createConfig(self):
        """
        Create a basic configuration for the component
        """
        config = self.testInit.getConfiguration()
        config.component_("RucioInjector")
        config.RucioInjector.pollInterval = 300
        config.RucioInjector.pollIntervalRules = 43200
        config.RucioInjector.cacheExpiration = 2 * 24 * 60 * 60  # two days
        config.RucioInjector.createBlockRules = True
        config.RucioInjector.RSEPostfix = False  # enable it to append _Test to the RSE names
        config.RucioInjector.metaDIDProject = "Production"
        config.RucioInjector.containerDiskRuleParams = {"weight": "ddm_quota", "copies": 2, "grouping": "DATASET"}
        config.RucioInjector.containerDiskRuleRSEExpr = "(tier=2|tier=1)&cms_type=real&rse_type=DISK"
        config.RucioInjector.rucioAccount = "wma_test"
        config.RucioInjector.rucioUrl = "http://cmsrucio-int.cern.ch"
        config.RucioInjector.rucioAuthUrl = "https://cmsrucio-auth-int.cern.ch"
        return config

    def stuffDatabase(self):
        """
        Fill the dbsbuffer tables with some files and blocks.  We'll insert a total
        of 5 files spanning two blocks.  There will be a total of two datasets
        inserted into the database.
        We'll inject files with the location set as an SE name as well as a
        PhEDEx node name as well.
        """
        myThread = threading.currentThread()

        # Create the DAOs factory and the relevant instances
        buffer3Factory = DAOFactory(package="WMComponent.DBS3Buffer",
                                    logger=myThread.logger,
                                    dbinterface=myThread.dbi)
        setBlock = buffer3Factory(classname="DBSBufferFiles.SetBlock")
        fileStatus = buffer3Factory(classname="DBSBufferFiles.SetStatus")
        associateWorkflow = buffer3Factory(classname="DBSBufferFiles.AssociateWorkflowToFile")
        insertWorkflow = buffer3Factory(classname="InsertWorkflow")
        datasetAction = buffer3Factory(classname="NewDataset")
        createAction = buffer3Factory(classname="CreateBlocks")

        # Create workflow in the database
        insertWorkflow.execute("BogusRequest", "BogusTask", 0, 0, 0, 0)

        # First file on first block
        checksums = {"adler32": "1234", "cksum": "5678"}
        testFileA = DBSBufferFile(lfn=makeUUID(), size=1024, events=10,
                                  checksums=checksums,
                                  locations=set(["T2_CH_CERN"]))
        testFileA.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8",
                               appFam="RECO", psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath(self.testDatasetA)
        testFileA.addRun(Run(2, *[45]))
        testFileA.create()

        # Second file on first block
        testFileB = DBSBufferFile(lfn=makeUUID(), size=1024, events=10,
                                  checksums=checksums,
                                  locations=set(["T2_CH_CERN"]))
        testFileB.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8",
                               appFam="RECO", psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileB.setDatasetPath(self.testDatasetA)
        testFileB.addRun(Run(2, *[45]))
        testFileB.create()

        # Third file on first block
        testFileC = DBSBufferFile(lfn=makeUUID(), size=1024, events=10,
                                  checksums=checksums,
                                  locations=set(["T2_CH_CERN"]))
        testFileC.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8",
                               appFam="RECO", psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileC.setDatasetPath(self.testDatasetA)
        testFileC.addRun(Run(2, *[45]))
        testFileC.create()

        self.testFilesA.append(testFileA)
        self.testFilesA.append(testFileB)
        self.testFilesA.append(testFileC)

        # First file on second block
        testFileD = DBSBufferFile(lfn=makeUUID(), size=1024, events=10,
                                  checksums=checksums,
                                  locations=set(["T1_US_FNAL_Disk"]))
        testFileD.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8",
                               appFam="RECO", psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileD.setDatasetPath(self.testDatasetB)
        testFileD.addRun(Run(2, *[45]))
        testFileD.create()

        # Second file on second block
        testFileE = DBSBufferFile(lfn=makeUUID(), size=1024, events=10,
                                  checksums=checksums,
                                  locations=set(["T1_US_FNAL_Disk"]))
        testFileE.setAlgorithm(appName="cmsRun", appVer="CMSSW_2_1_8",
                               appFam="RECO", psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileE.setDatasetPath(self.testDatasetB)
        testFileE.addRun(Run(2, *[45]))
        testFileE.create()

        self.testFilesB.append(testFileD)
        self.testFilesB.append(testFileE)

        # insert datasets in the dbsbuffer table
        datasetAction.execute(datasetPath=self.testDatasetA)
        datasetAction.execute(datasetPath=self.testDatasetB)

        self.blockAName = self.testDatasetA + "#" + makeUUID()
        self.blockBName = self.testDatasetB + "#" + makeUUID()

        # create and insert blocks into dbsbuffer table
        newBlockA = DBSBufferBlock(name=self.blockAName,
                                   location="T2_CH_CERN",
                                   datasetpath=None)
        newBlockA.setDataset(self.testDatasetA, 'data', 'VALID')
        newBlockA.status = 'Closed'

        newBlockB = DBSBufferBlock(name=self.blockBName,
                                   location="T1_US_FNAL_Disk",
                                   datasetpath=None)
        newBlockB.setDataset(self.testDatasetB, 'data', 'VALID')
        newBlockB.status = 'Closed'

        createAction.execute(blocks=[newBlockA, newBlockB])

        # associate files to their correspondent block id
        setBlock.execute(testFileA["lfn"], self.blockAName)
        setBlock.execute(testFileB["lfn"], self.blockAName)
        setBlock.execute(testFileC["lfn"], self.blockAName)
        setBlock.execute(testFileD["lfn"], self.blockBName)
        setBlock.execute(testFileE["lfn"], self.blockBName)

        # set file status to LOCAL
        fileStatus.execute(testFileA["lfn"], "LOCAL")
        fileStatus.execute(testFileB["lfn"], "LOCAL")
        fileStatus.execute(testFileC["lfn"], "LOCAL")
        fileStatus.execute(testFileD["lfn"], "LOCAL")
        fileStatus.execute(testFileE["lfn"], "LOCAL")

        # associate files to a given workflow
        associateWorkflow.execute(testFileA["lfn"], "BogusRequest", "BogusTask")
        associateWorkflow.execute(testFileB["lfn"], "BogusRequest", "BogusTask")
        associateWorkflow.execute(testFileC["lfn"], "BogusRequest", "BogusTask")
        associateWorkflow.execute(testFileD["lfn"], "BogusRequest", "BogusTask")
        associateWorkflow.execute(testFileE["lfn"], "BogusRequest", "BogusTask")

        return

    def testBadConfig(self):
        """
        Test wrong component configuration
        """
        config = self.createConfig()
        config.RucioInjector.metaDIDProject = "Very invalid project name"

        with self.assertRaises(RucioInjectorException):
            RucioInjectorPoller(config)

    def testActivityMap(self):
        """
        Initialize a RucioInjectorPoller object and test `_activityMap` method
        """
        poller = RucioInjectorPoller(self.createConfig())
        # test production agent and non-Tape endpoint
        activity = poller._activityMap("T1_US_FNAL_Disk")
        self.assertEquals(activity, "Production Output")
        activity = poller._activityMap("T1_US_FNAL_Test")
        self.assertEquals(activity, "Production Output")
        # test production agent and Tape endpoint (which is forbidden at the moment)
        with self.assertRaises(WMRucioException):
            poller._activityMap("T1_US_FNAL_Tape")

        # now pretend it to be a T0 agent/component
        poller.isT0agent = True
        # test T0 agent and non-Tape endpoint
        activity = poller._activityMap("T1_US_FNAL_Disk")
        self.assertEquals(activity, "T0 Export")
        activity = poller._activityMap("T1_US_FNAL_Test")
        self.assertEquals(activity, "T0 Export")
        # test T0 agent and Tape endpoint
        activity = poller._activityMap("T1_US_FNAL_Tape")
        self.assertEquals(activity, "T0 Tape")

    def testLoadingFiles(self):
        """
        Initialize a RucioInjectorPoller object and load uninjected files
        """
        self.stuffDatabase()
        poller = RucioInjectorPoller(self.createConfig())
        poller.setup(parameters=None)
        uninjectedFiles = poller.getUninjected.execute()
        self.assertItemsEqual(list(uninjectedFiles), self.locations)
        self.assertEquals(list(uninjectedFiles["T2_CH_CERN"]), [self.testDatasetA])
        self.assertEquals(list(uninjectedFiles["T1_US_FNAL_Disk"]), [self.testDatasetB])
class PhEDExInjectorPollerTest(unittest.TestCase):
    """
    _PhEDExInjectorPollerTest_

    Unit tests for the PhEDExInjector.  Create some database inside DBSBuffer
    and then have the PhEDExInjector upload the data to PhEDEx.  Pull the data
    back down and verify that everything is complete.
    """

    def setUp(self):
        """
        _setUp_

        Install the DBSBuffer schema into the database and connect to PhEDEx.
        """
        self.phedexURL = "https://cmsweb.cern.ch/phedex/datasvc/json/test"
        self.dbsURL = "http://vocms09.cern.ch:8880/cms_dbs_int_local_yy_writer/servlet/DBSServlet"

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection(destroyAllDatabase = True)

        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"],
                                useDefault = False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package = "WMComponent.DBSBuffer.Database",
                                logger = myThread.logger,
                                dbinterface = myThread.dbi)

        locationAction = daofactory(classname = "DBSBufferFiles.AddLocation")
        locationAction.execute(siteName = "srm-cms.cern.ch")
        locationAction.execute(siteName = "se.fnal.gov")

        self.testFilesA = []
        self.testFilesB = []
        self.testDatasetA = "/%s/PromptReco-v1/RECO" % makeUUID()
        self.testDatasetB = "/%s/CRUZET11-v1/RAW" % makeUUID()
        self.phedex = PhEDEx({"endpoint": self.phedexURL}, "json")

        return

    def tearDown(self):
        """
        _tearDown_

        Delete the database.
        """
        self.testInit.clearDatabase()

    def stuffDatabase(self):
        """
        _stuffDatabase_

        Fill the dbsbuffer with some files and blocks.  We'll insert a total
        of 5 files spanning two blocks.  There will be a total of two datasets
        inserted into the datbase.

        We'll inject files with the location set as an SE name as well as a
        PhEDEx node name as well.
        """
        myThread = threading.currentThread()

        buffer3Factory = DAOFactory(package = "WMComponent.DBS3Buffer",
                                   logger = myThread.logger,
                                   dbinterface = myThread.dbi)
        insertWorkflow = buffer3Factory(classname = "InsertWorkflow")
        insertWorkflow.execute("BogusRequest", "BogusTask",
                               0, 0, 0, 0)

        checksums = {"adler32": "1234", "cksum": "5678"}
        testFileA = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10,
                                  checksums = checksums,
                                  locations = set(["srm-cms.cern.ch"]))
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath(self.testDatasetA)
        testFileA.addRun(Run(2, *[45]))
        testFileA.create()

        testFileB = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10,
                                  checksums = checksums,
                                  locations = set(["srm-cms.cern.ch"]))
        testFileB.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileB.setDatasetPath(self.testDatasetA)
        testFileB.addRun(Run(2, *[45]))
        testFileB.create()

        testFileC = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10,
                                  checksums = checksums,
                                  locations = set(["srm-cms.cern.ch"]))
        testFileC.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileC.setDatasetPath(self.testDatasetA)
        testFileC.addRun(Run(2, *[45]))
        testFileC.create()

        self.testFilesA.append(testFileA)
        self.testFilesA.append(testFileB)
        self.testFilesA.append(testFileC)

        testFileD = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10,
                                  checksums = checksums,
                                  locations = set(["srm-cms.cern.ch"]))
        testFileD.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileD.setDatasetPath(self.testDatasetB)
        testFileD.addRun(Run(2, *[45]))
        testFileD.create()

        testFileE = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10,
                                  checksums = checksums,
                                  locations = set(["srm-cms.cern.ch"]))
        testFileE.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileE.setDatasetPath(self.testDatasetB)
        testFileE.addRun(Run(2, *[45]))
        testFileE.create()

        self.testFilesB.append(testFileD)
        self.testFilesB.append(testFileE)

        uploadFactory = DAOFactory(package = "WMComponent.DBS3Buffer",
                                   logger = myThread.logger,
                                   dbinterface = myThread.dbi)
        datasetAction = uploadFactory(classname = "NewDataset")
        createAction = uploadFactory(classname = "CreateBlocks")

        datasetAction.execute(datasetPath = self.testDatasetA)
        datasetAction.execute(datasetPath = self.testDatasetB)

        self.blockAName = self.testDatasetA + "#" + makeUUID()
        self.blockBName = self.testDatasetB + "#" + makeUUID()

        newBlockA = DBSBufferBlock(name = self.blockAName,
                                   location = "srm-cms.cern.ch",
                                   datasetpath = None)
        newBlockA.setDataset(self.testDatasetA, 'data', 'VALID')
        newBlockA.status = 'Closed'

        newBlockB = DBSBufferBlock(name = self.blockBName,
                                   location = "srm-cms.cern.ch",
                                   datasetpath = None)
        newBlockB.setDataset(self.testDatasetB, 'data', 'VALID')
        newBlockB.status = 'Closed'

        createAction.execute(blocks = [newBlockA, newBlockB])

        bufferFactory = DAOFactory(package = "WMComponent.DBSBuffer.Database",
                                   logger = myThread.logger,
                                   dbinterface = myThread.dbi)

        setBlock = bufferFactory(classname = "DBSBufferFiles.SetBlock")
        setBlock.execute(testFileA["lfn"], self.blockAName)
        setBlock.execute(testFileB["lfn"], self.blockAName)
        setBlock.execute(testFileC["lfn"], self.blockAName)
        setBlock.execute(testFileD["lfn"], self.blockBName)
        setBlock.execute(testFileE["lfn"], self.blockBName)

        fileStatus = bufferFactory(classname = "DBSBufferFiles.SetStatus")
        fileStatus.execute(testFileA["lfn"], "LOCAL")
        fileStatus.execute(testFileB["lfn"], "LOCAL")
        fileStatus.execute(testFileC["lfn"], "LOCAL")
        fileStatus.execute(testFileD["lfn"], "LOCAL")
        fileStatus.execute(testFileE["lfn"], "LOCAL")

        associateWorkflow = buffer3Factory(classname = "DBSBufferFiles.AssociateWorkflowToFile")
        associateWorkflow.execute(testFileA["lfn"], "BogusRequest", "BogusTask")
        associateWorkflow.execute(testFileB["lfn"], "BogusRequest", "BogusTask")
        associateWorkflow.execute(testFileC["lfn"], "BogusRequest", "BogusTask")
        associateWorkflow.execute(testFileD["lfn"], "BogusRequest", "BogusTask")
        associateWorkflow.execute(testFileE["lfn"], "BogusRequest", "BogusTask")

        return

    def createConfig(self):
        """
        _createConfig_

        Create a config for the PhEDExInjector with paths to the test DBS and
        PhEDEx instances.
        """
        config = self.testInit.getConfiguration()
        config.component_("DBSInterface")
        config.DBSInterface.globalDBSUrl = self.dbsURL

        config.component_("PhEDExInjector")
        config.PhEDExInjector.phedexurl = self.phedexURL
        config.PhEDExInjector.subscribeMSS = True
        config.PhEDExInjector.group = "Saturn"
        config.PhEDExInjector.pollInterval = 30
        config.PhEDExInjector.subscribeInterval = 60

        return config

    def retrieveReplicaInfoForBlock(self, blockName):
        """
        _retrieveReplicaInfoForBlock_

        Retrieve the replica information for a block.  It takes several minutes
        after a block is injected for the statistics to be calculated, so this
        will block until that information is available.
        """
        attempts = 0

        while attempts < 15:
            result = self.phedex.getReplicaInfoForFiles(block = blockName)

            if "phedex" in result:
                if "block" in result["phedex"]:
                    if len(result["phedex"]["block"]) != 0:
                        return result["phedex"]["block"][0]

            attempts += 1
            time.sleep(20)

        logging.info("Could not retrieve replica info for block: %s" % blockName)
        return None

    @attr("integration")
    def testPoller(self):
        """
        _testPoller_

        Stuff the database and have the poller upload files to PhEDEx.  Retrieve
        replica information for the uploaded blocks and verify that all files
        have been injected.
        """
        return
        self.stuffDatabase()

        poller = PhEDExInjectorPoller(self.createConfig())
        poller.setup(parameters = None)
        poller.algorithm(parameters = None)

        replicaInfo = self.retrieveReplicaInfoForBlock(self.blockAName)
        goldenLFNs = []
        for file in self.testFilesA:
            goldenLFNs.append(file["lfn"])

        for replicaFile in replicaInfo["file"]:
            assert replicaFile["name"] in goldenLFNs, \
                   "Error: Extra file in replica block: %s" % replicaFile["name"]
            goldenLFNs.remove(replicaFile["name"])

        assert len(goldenLFNs) == 0, \
               "Error: Files missing from PhEDEx replica: %s" % goldenLFNs

        replicaInfo = self.retrieveReplicaInfoForBlock(self.blockBName)
        goldenLFNs = []
        for file in self.testFilesB:
            goldenLFNs.append(file["lfn"])

        for replicaFile in replicaInfo["file"]:
            assert replicaFile["name"] in goldenLFNs, \
                   "Error: Extra file in replica block: %s" % replicaFile["name"]
            goldenLFNs.remove(replicaFile["name"])

        assert len(goldenLFNs) == 0, \
               "Error: Files missing from PhEDEx replica: %s" % goldenLFNs

        myThread = threading.currentThread()
        daofactory = DAOFactory(package = "WMComponent.DBSUpload.Database",
                                logger = myThread.logger,
                                dbinterface = myThread.dbi)
        setBlock = daofactory(classname = "SetBlockStatus")
        setBlock.execute(self.blockAName, locations = None,
                         open_status = "InGlobalDBS")

        poller.algorithm(parameters = None)
        replicaInfo = self.retrieveReplicaInfoForBlock(self.blockAName)
        assert replicaInfo["is_open"] == "n", \
               "Error: block should be closed."

        replicaInfo = self.retrieveReplicaInfoForBlock(self.blockBName)
        assert replicaInfo["is_open"] == "y", \
               "Error: block should be open."
        return


    def test_CustodialSiteA(self):
        """
        _CustodialSiteA_

        Check the custodialSite stuff by DAO, since I don't have a cert
        First make sure we properly handle having no custodialSite
        """

        self.stuffDatabase()

        myThread = threading.currentThread()
        daofactory = DAOFactory(package = "WMComponent.PhEDExInjector.Database",
                                   logger = myThread.logger,
                                   dbinterface = myThread.dbi)
        getUninjected = daofactory(classname = "GetUninjectedFiles")

        uninjectedFiles = getUninjected.execute()
        self.assertEqual(uninjectedFiles.keys(), ['srm-cms.cern.ch'])

        return
class EventAwareLumiBasedTest(unittest.TestCase):
    """
    _EventAwareLumiBasedTest_

    Test event based job splitting.
    """
    def setUp(self):
        """
        _setUp_

        Create two subscriptions: One that contains a single file and one that
        contains multiple files.
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMCore.WMBS",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)

        locationAction = daofactory(classname="Locations.New")
        locationAction.execute(siteName="T1_US_FNAL", pnn="T1_US_FNAL_Disk")
        locationAction.execute(siteName="T2_CH_CERN", pnn="T2_CH_CERN")

        self.testWorkflow = Workflow(spec="spec.xml",
                                     owner="Steve",
                                     name="wf001",
                                     task="Test")
        self.testWorkflow.create()

        self.performanceParams = {
            'timePerEvent': 12,
            'memoryRequirement': 2300,
            'sizePerEvent': 400
        }

        return

    def tearDown(self):
        """
        _tearDown_

        Clear out WMBS.
        """
        self.testInit.clearDatabase()
        return

    def createSubscription(self,
                           nFiles,
                           lumisPerFile,
                           twoSites=False,
                           nEventsPerFile=100):
        """
        _createSubscription_

        Create a subscription for testing
        """

        baseName = makeUUID()

        testFileset = Fileset(name=baseName)
        testFileset.create()
        for i in range(nFiles):
            newFile = self.createFile('%s_%i' % (baseName, i), nEventsPerFile,
                                      i, lumisPerFile, 'T1_US_FNAL_Disk')
            newFile.create()
            testFileset.addFile(newFile)
        if twoSites:
            for i in range(nFiles):
                newFile = self.createFile('%s_%i_2' % (baseName, i),
                                          nEventsPerFile, i, lumisPerFile,
                                          'T2_CH_CERN')
                newFile.create()
                testFileset.addFile(newFile)
        testFileset.commit()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=self.testWorkflow,
                                        split_algo="EventAwareLumiBased",
                                        type="Processing")
        testSubscription.create()

        return testSubscription

    def createFile(self,
                   lfn,
                   events,
                   run,
                   lumis,
                   location,
                   lumiMultiplier=None):
        """
        _createFile_

        Create a file for testing
        """
        if lumiMultiplier is None:
            lumiMultiplier = run

        newFile = File(lfn=lfn, size=1000, events=events)
        lumiList = []
        for lumi in range(lumis):
            lumiList.append((lumiMultiplier * lumis) + lumi)
        newFile.addRun(Run(run, *lumiList))
        newFile.setLocation(location)
        return newFile

    def testA_FileSplitNoHardLimit(self):
        """
        _testA_FileSplitNoHardLimit_

        Simplest use case, there is only a self limit of events per job which
        the algorithm must adapt to on a file by file basis. At most
        one file per job so we don't have to pass information between files.
        """
        splitter = SplitterFactory()

        # Create 5 files with 7 lumi per file and 100 events per lumi on average.
        testSubscription = self.createSubscription(nFiles=5,
                                                   lumisPerFile=7,
                                                   twoSites=False,
                                                   nEventsPerFile=700)
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)

        # First test, the optimal settings are 360 events per job
        # As we have files with 100 events per lumi, this will configure the splitting to
        # 3.6 lumis per job, which rounds to 3, the algorithm always approximates to the lower integer.
        jobGroups = jobFactory(halt_job_on_file_boundaries=True,
                               splitOnRun=True,
                               events_per_job=360,
                               performance=self.performanceParams)
        self.assertEqual(len(jobGroups), 1,
                         "There should be only one job group")
        jobs = jobGroups[0].jobs
        self.assertEqual(len(jobs), 15, "There should be 15 jobs")
        for idx, job in enumerate(jobs, start=1):
            # Jobs may have 1 lumi or 2 check performance figures accordingly
            self.assertEqual(job['estimatedMemoryUsage'], 2300)
            if idx % 3 == 0:
                self.assertEqual(job['estimatedDiskUsage'], 100 * 400)
                self.assertEqual(job['estimatedJobTime'], 100 * 12)
            else:
                self.assertEqual(job['estimatedDiskUsage'], 3 * 100 * 400)
                self.assertEqual(job['estimatedJobTime'], 3 * 100 * 12)

        testSubscription = self.createSubscription(nFiles=5,
                                                   lumisPerFile=7,
                                                   twoSites=False,
                                                   nEventsPerFile=700)
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)
        # Now set the average to 200 events per job
        # This results in the algorithm reducing the lumis per job to 2
        jobGroups = jobFactory(halt_job_on_file_boundaries=True,
                               splitOnRun=True,
                               events_per_job=200,
                               performance=self.performanceParams)
        self.assertEqual(len(jobGroups), 1,
                         "There should be only one job group")
        jobs = jobGroups[0].jobs
        self.assertEqual(len(jobs), 20, "There should be 20 jobs")
        for idx, job in enumerate(jobs, start=1):
            # Jobs may have 1 lumi or 2 check performance figures accordingly
            self.assertEqual(job['estimatedMemoryUsage'], 2300)
            if idx % 4 == 0:
                self.assertEqual(job['estimatedDiskUsage'], 100 * 400)
                self.assertEqual(job['estimatedJobTime'], 100 * 12)
            else:
                self.assertEqual(job['estimatedDiskUsage'], 2 * 100 * 400)
                self.assertEqual(job['estimatedJobTime'], 2 * 100 * 12)

        # Check extremes, process a zero event files with lumis. It must be processed in one job
        testSubscription = self.createSubscription(nFiles=5,
                                                   lumisPerFile=100,
                                                   twoSites=False,
                                                   nEventsPerFile=0)
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)
        jobGroups = jobFactory(halt_job_on_file_boundaries=True,
                               events_per_job=5000,
                               performance=self.performanceParams)
        self.assertEqual(
            len(jobGroups), 0,
            "There are not enough events, so it should be 0 instead")

        # we close this fileset to get it moving
        fileset = testSubscription.getFileset()
        fileset.markOpen(False)

        jobGroups = jobFactory(halt_job_on_file_boundaries=True,
                               events_per_job=5000,
                               performance=self.performanceParams)
        self.assertEqual(len(jobGroups), 1,
                         "There should be only one job group")
        jobs = jobGroups[0].jobs
        self.assertEqual(len(jobs), 5, "There should be 5 jobs")
        for job in jobs:
            self.assertEqual(job['estimatedMemoryUsage'], 2300)
            self.assertEqual(job['estimatedDiskUsage'], 0)
            self.assertEqual(job['estimatedJobTime'], 0)

        # Process files with 10k events per lumi, fallback to one lumi per job. We can't do better
        testSubscription = self.createSubscription(nFiles=5,
                                                   lumisPerFile=5,
                                                   twoSites=False,
                                                   nEventsPerFile=50000)
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)
        jobGroups = jobFactory(halt_job_on_file_boundaries=True,
                               splitOnRun=True,
                               events_per_job=5000,
                               performance=self.performanceParams)
        self.assertEqual(len(jobGroups), 1,
                         "There should be only one job group")
        jobs = jobGroups[0].jobs
        self.assertEqual(len(jobs), 25, "There should be 5 jobs")
        for job in jobs:
            self.assertEqual(job['estimatedMemoryUsage'], 2300)
            self.assertEqual(job['estimatedDiskUsage'], 10000 * 400)
            self.assertEqual(job['estimatedJobTime'], 10000 * 12)

    def testB_NoFileSplitNoHardLimit(self):
        """
        _testB_NoFileSplitNoHardLimit_

        In this case we don't split on file boundaries, check different combination of files
        make sure we make the most of the splitting, e.g. include many zero event files in
        a single job.
        """
        splitter = SplitterFactory()

        # Create 100 files with 7 lumi per file and 0 events per lumi on average.
        testSubscription = self.createSubscription(nFiles=100,
                                                   lumisPerFile=7,
                                                   twoSites=False,
                                                   nEventsPerFile=0)
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)

        # First test, the optimal settings are 360 events per job
        # As we have files with 0 events per lumi, this will configure the splitting to
        # a single job containing all files
        jobGroups = jobFactory(halt_job_on_file_boundaries=False,
                               splitOnRun=False,
                               events_per_job=360,
                               performance=self.performanceParams)
        self.assertEqual(
            len(jobGroups), 0,
            "There aren't enough events, so it should have 0 job groups")

        # we close this fileset to get it moving
        fileset = testSubscription.getFileset()
        fileset.markOpen(False)

        jobGroups = jobFactory(halt_job_on_file_boundaries=False,
                               splitOnRun=False,
                               events_per_job=360,
                               performance=self.performanceParams)
        self.assertEqual(len(jobGroups), 1,
                         "There should be only one job group")
        jobs = jobGroups[0].jobs
        self.assertEqual(len(jobs), 1, "There should be 1 job")
        self.assertEqual(len(jobs[0]['input_files']), 100,
                         "All 100 files must be in the job")
        self.assertEqual(jobs[0]['estimatedMemoryUsage'], 2300)
        self.assertEqual(jobs[0]['estimatedDiskUsage'], 0)
        self.assertEqual(jobs[0]['estimatedJobTime'], 0)

        # Create 7 files, each one with different lumi/event distributions
        testFileset = Fileset(name="FilesetA")
        testFileset.create()
        testFileA = self.createFile("/this/is/file1", 250, 0, 5, "T2_CH_CERN")
        testFileB = self.createFile("/this/is/file2", 600, 1, 1, "T2_CH_CERN")
        testFileC = self.createFile("/this/is/file3", 1200, 2, 2, "T2_CH_CERN")
        testFileD = self.createFile("/this/is/file4", 100, 3, 1, "T2_CH_CERN")
        testFileE = self.createFile("/this/is/file5", 30, 4, 1, "T2_CH_CERN")
        testFileF = self.createFile("/this/is/file6", 10, 5, 1, "T2_CH_CERN")
        testFileG = self.createFile("/this/is/file7", 151, 6, 3, "T2_CH_CERN")
        testFileset.addFile(testFileA)
        testFileset.addFile(testFileB)
        testFileset.addFile(testFileC)
        testFileset.addFile(testFileD)
        testFileset.addFile(testFileE)
        testFileset.addFile(testFileF)
        testFileset.addFile(testFileG)
        testFileset.commit()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=self.testWorkflow,
                                        split_algo="EventAwareLumiBased",
                                        type="Processing")
        testSubscription.create()

        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)
        # Optimal settings are: jobs with 150 events per job
        # This means, the first file must be splitted in 3 lumis per job which would leave room
        # for another lumi in the second job, but the second file has a lumi too big for that
        # The 3rd job only contains the second file, the fourth and fifth job split the third file
        jobGroups = jobFactory(halt_job_on_file_boundaries=False,
                               splitOnRun=False,
                               events_per_job=150,
                               performance=self.performanceParams)

        self.assertEqual(len(jobGroups), 1,
                         "There should be only one job group")
        jobs = jobGroups[0].jobs
        self.assertEqual(len(jobs), 7, "7 jobs must be in the jobgroup")
        self.assertEqual(jobs[0]["mask"].getRunAndLumis(), {0: [[0, 2]]},
                         "Wrong mask for the first job")
        self.assertEqual(jobs[0]["estimatedJobTime"], 150 * 12)
        self.assertEqual(jobs[0]["estimatedDiskUsage"], 150 * 400)
        self.assertEqual(jobs[1]["mask"].getRunAndLumis(), {0: [[3, 4]]},
                         "Wrong mask for the second job")
        self.assertEqual(jobs[1]["estimatedJobTime"], 100 * 12)
        self.assertEqual(jobs[1]["estimatedDiskUsage"], 100 * 400)
        self.assertEqual(jobs[2]["mask"].getRunAndLumis(), {1: [[1, 1]]},
                         "Wrong mask for the third job")
        self.assertEqual(jobs[2]["estimatedJobTime"], 600 * 12)
        self.assertEqual(jobs[2]["estimatedDiskUsage"], 600 * 400)
        self.assertEqual(jobs[3]["mask"].getRunAndLumis(), {2: [[4, 4]]},
                         "Wrong mask for the fourth job")
        self.assertEqual(jobs[3]["estimatedJobTime"], 600 * 12)
        self.assertEqual(jobs[3]["estimatedDiskUsage"], 600 * 400)
        self.assertEqual(jobs[4]["mask"].getRunAndLumis(), {2: [[5, 5]]},
                         "Wrong mask for the fifth job")
        self.assertEqual(jobs[4]["estimatedJobTime"], 600 * 12)
        self.assertEqual(jobs[4]["estimatedDiskUsage"], 600 * 400)
        self.assertEqual(jobs[5]["mask"].getRunAndLumis(), {
            3: [[3, 3]],
            4: [[4, 4]],
            5: [[5, 5]]
        }, "Wrong mask for the sixth job")
        self.assertEqual(jobs[5]["estimatedJobTime"], 140 * 12)
        self.assertEqual(jobs[5]["estimatedDiskUsage"], 140 * 400)
        self.assertEqual(jobs[6]["mask"].getRunAndLumis(), {6: [[18, 20]]},
                         "Wrong mask for the seventh job")
        self.assertEqual(jobs[6]["estimatedJobTime"], 150 * 12)
        self.assertEqual(jobs[6]["estimatedDiskUsage"], 150 * 400)

        for job in jobs:
            self.assertEqual(job["estimatedMemoryUsage"], 2300)
        # Test interactions of this algorithm with splitOnRun = True
        # Make 2 files, one with 3 runs and a second one with the last run of the first
        fileA = File(lfn="/this/is/file1a", size=1000, events=2400)
        lumiListA = []
        lumiListB = []
        lumiListC = []
        for lumi in range(8):
            lumiListA.append(1 + lumi)
            lumiListB.append(1 + lumi)
            lumiListC.append(1 + lumi)
        fileA.addRun(Run(1, *lumiListA))
        fileA.addRun(Run(2, *lumiListA))
        fileA.addRun(Run(3, *lumiListA))
        fileA.setLocation("T1_US_FNAL_Disk")

        fileB = self.createFile('/this/is/file2a', 200, 3, 5,
                                "T1_US_FNAL_Disk")

        testFileset = Fileset(name='FilesetB')
        testFileset.create()
        testFileset.addFile(fileA)
        testFileset.addFile(fileB)
        testFileset.commit()
        testSubscription = Subscription(fileset=testFileset,
                                        workflow=self.testWorkflow,
                                        split_algo="EventAwareLumiBased",
                                        type="Processing")
        testSubscription.create()

        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)
        # The settings for this splitting are 700 events per job
        jobGroups = jobFactory(splitOnRun=True,
                               halt_job_on_file_boundaries=False,
                               events_per_job=700,
                               performance=self.performanceParams)
        self.assertEqual(len(jobGroups), 1,
                         "There should be only one job group")
        jobs = jobGroups[0].jobs
        self.assertEqual(len(jobs), 6, "Six jobs must be in the jobgroup")
        self.assertEqual(jobs[0]["estimatedJobTime"], 700 * 12)
        self.assertEqual(jobs[0]["estimatedDiskUsage"], 700 * 400)
        self.assertEqual(jobs[1]["estimatedJobTime"], 100 * 12)
        self.assertEqual(jobs[1]["estimatedDiskUsage"], 100 * 400)
        self.assertEqual(jobs[2]["estimatedJobTime"], 700 * 12)
        self.assertEqual(jobs[2]["estimatedDiskUsage"], 700 * 400)
        self.assertEqual(jobs[3]["estimatedJobTime"], 100 * 12)
        self.assertEqual(jobs[3]["estimatedDiskUsage"], 100 * 400)
        self.assertEqual(jobs[4]["estimatedJobTime"], 700 * 12)
        self.assertEqual(jobs[4]["estimatedDiskUsage"], 700 * 400)
        self.assertEqual(jobs[5]["estimatedJobTime"], 300 * 12)
        self.assertEqual(jobs[5]["estimatedDiskUsage"], 300 * 400)

    def testC_HardLimitSplitting(self):
        """
        _testC_HardLimitSplitting_

        Test that we can specify a event limit, the
        algorithm shall take single lumi files with more events than the limit
        and mark them for failure
        """
        splitter = SplitterFactory()

        # Create 3 files, the one in the middle is a "bad" file
        testFileset = Fileset(name="FilesetA")
        testFileset.create()
        testFileA = self.createFile("/this/is/file1", 1000, 0, 5,
                                    "T1_US_FNAL_Disk")
        testFileB = self.createFile("/this/is/file2", 1000, 1, 1,
                                    "T1_US_FNAL_Disk")
        testFileC = self.createFile("/this/is/file3", 1000, 2, 2,
                                    "T1_US_FNAL_Disk")
        testFileset.addFile(testFileA)
        testFileset.addFile(testFileB)
        testFileset.addFile(testFileC)
        testFileset.commit()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=self.testWorkflow,
                                        split_algo="EventAwareLumiBased",
                                        type="Processing")
        testSubscription.create()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)
        # Settings are to split on job boundaries, to fail sing lumis with more than 800 events
        # and to put 550 events per job
        jobGroups = jobFactory(halt_job_on_file_boundaries=True,
                               splitOnRun=True,
                               events_per_job=550,
                               max_events_per_lumi=800,
                               performance=self.performanceParams)

        self.assertEqual(len(jobGroups), 1,
                         "There should be only one job group")
        jobs = jobGroups[0].jobs
        self.assertEqual(len(jobs), 6, "Six jobs must be in the jobgroup")
        self.assertTrue(
            jobs[3]['failedOnCreation'],
            "The job processing the second file should me marked for failure")
        self.assertEqual(
            jobs[3]['failedReason'],
            "File /this/is/file2 has too many events (1000) in 1 lumi(s)",
            "The reason for the failure is not accurate")

    def testD_HardLimitSplittingOnly(self):
        """
        _testD_HardLimitSplittingOnly_

        Checks that we can split a set of files where every file has a single
        lumi too big to fit in a runnable job
        """
        splitter = SplitterFactory()

        # Create 3 single-big-lumi files
        testFileset = Fileset(name="FilesetA")
        testFileset.create()
        testFileA = self.createFile("/this/is/file1", 1000, 0, 1,
                                    "T1_US_FNAL_Disk")
        testFileB = self.createFile("/this/is/file2", 1000, 1, 1,
                                    "T1_US_FNAL_Disk")
        testFileC = self.createFile("/this/is/file3", 1000, 2, 1,
                                    "T1_US_FNAL_Disk")
        testFileset.addFile(testFileA)
        testFileset.addFile(testFileB)
        testFileset.addFile(testFileC)
        testFileset.commit()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=self.testWorkflow,
                                        split_algo="EventAwareLumiBased",
                                        type="Processing")
        testSubscription.create()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)
        # Settings are to split on job boundaries, to fail sing lumis with more than 800 events
        # and to put 550 events per job
        jobGroups = jobFactory(halt_job_on_file_boundaries=True,
                               splitOnRun=True,
                               events_per_job=550,
                               max_events_per_lumi=800,
                               performance=self.performanceParams)

        self.assertEqual(len(jobGroups), 1,
                         "There should be only one job group")
        jobs = jobGroups[0].jobs
        self.assertEqual(len(jobs), 3, "Three jobs must be in the jobgroup")
        for i in range(1, 4):
            self.assertTrue(
                jobs[i - 1]['failedOnCreation'],
                "The job processing the second file should me marked for failure"
            )
            self.assertEqual(
                jobs[i - 1]['failedReason'],
                "File /this/is/file%d has too many events (1000) in 1 lumi(s)"
                % i, "The reason for the failure is not accurate")

        return

    def test_NotEnoughEvents(self):
        """
        _test_NotEnoughEvents_

        Checks whether jobs are not created when there are not enough files (actually, events)
        according to the events_per_job requested to the splitter algorithm
        """
        splitter = SplitterFactory()

        # Very small fileset (single file) without enough events
        testSubscription = self.createSubscription(nFiles=1,
                                                   lumisPerFile=2,
                                                   nEventsPerFile=200)

        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)
        jobGroups = jobFactory(events_per_job=500,
                               performance=self.performanceParams,
                               splitOnRun=False)

        self.assertEqual(len(jobGroups), 0)

        # Still a small fileset (two files) without enough events
        testSubscription = self.createSubscription(nFiles=2,
                                                   lumisPerFile=2,
                                                   nEventsPerFile=200)

        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)
        jobGroups = jobFactory(events_per_job=500,
                               performance=self.performanceParams,
                               splitOnRun=False)

        self.assertEqual(len(jobGroups), 0)

        # Finally an acceptable fileset size (three files) with enough events
        testSubscription = self.createSubscription(nFiles=3,
                                                   lumisPerFile=2,
                                                   nEventsPerFile=200)

        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)
        jobGroups = jobFactory(events_per_job=500,
                               performance=self.performanceParams,
                               splitOnRun=False)

        self.assertEqual(len(jobGroups), 1)
        jobs = jobGroups[0].jobs
        self.assertEqual(len(jobs), 2)
        self.assertEqual(len(jobs[0]['input_files']), 3)
        self.assertEqual(len(jobs[1]['input_files']), 1)
        self.assertEqual(jobs[0]['mask'].getRunAndLumis(), {
            0: [[0, 1]],
            1: [[2, 3]],
            2: [[4, 4]]
        })
        self.assertEqual(jobs[1]['mask'].getRunAndLumis(), {2: [[5, 5]]})

        # Test fileset with a single run and splitOnRun=True
        testFileset = Fileset(name="FilesetA")
        testFileA = self.createFile("/this/is/file1",
                                    200,
                                    1,
                                    2,
                                    "T1_US_FNAL_Disk",
                                    lumiMultiplier=0)
        testFileB = self.createFile("/this/is/file2",
                                    200,
                                    1,
                                    2,
                                    "T1_US_FNAL_Disk",
                                    lumiMultiplier=1)
        testFileC = self.createFile("/this/is/file3",
                                    200,
                                    1,
                                    2,
                                    "T1_US_FNAL_Disk",
                                    lumiMultiplier=2)
        testFileset.addFile(testFileA)
        testFileset.addFile(testFileB)
        testFileset.addFile(testFileC)
        testFileset.create()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=self.testWorkflow,
                                        split_algo="EventAwareLumiBased",
                                        type="Processing")
        testSubscription.create()

        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=testSubscription)
        jobGroups = jobFactory(events_per_job=500,
                               performance=self.performanceParams)

        self.assertEqual(len(jobGroups), 1)
        jobs = jobGroups[0].jobs
        self.assertEqual(len(jobs), 2)
        self.assertEqual(len(jobs[0]['input_files']), 3)
        self.assertEqual(len(jobs[1]['input_files']), 1)
        self.assertEqual(jobs[0]['mask'].getRunAndLumis(),
                         {1: [[0, 1], [2, 3], [4, 4]]})
        self.assertEqual(jobs[1]['mask'].getRunAndLumis(), {1: [[5, 5]]})

        return
示例#15
0
class FeederManagerTest(unittest.TestCase):
    """
    TestCase for TestFeederManager module
    """

    _maxMessage = 10

    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all needed
        tables.
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.generateWorkDir()
        self.testInit.setSchema(customModules = \
                         ['WMCore.Agent.Database',
                          'WMComponent.FeederManager.Database',
                          'WMCore.ThreadPool',
                          'WMCore.WMBS'],
                                useDefault = False)

        return

    def tearDown(self):
        """
        _tearDown_

        Database deletion
        """
        self.testInit.clearDatabase()

        return

    def getConfig(self):
        """
        _createConfig_

        Create a config for the JobAccountant.  This config needs to include
        information for connecting to the database as the component will create
        it's own database connections.  These parameters are still pulled from
        the environment.
        """
        config = self.testInit.getConfiguration()
        self.testInit.generateWorkDir(config)

        config.component_("FeederManager")
        config.FeederManager.logLevel = "INFO"
        config.FeederManager.componentName = "FeederManager"
        config.FeederManager.componentDir = \
            os.path.join(os.getenv("TESTDIR"), "FeederManager")
        config.FeederManager.addDatasetWatchHandler = \
            'WMComponent.FeederManager.Handler.DefaultAddDatasetWatch'

        # The maximum number of threads to process each message type
        config.FeederManager.maxThreads = 10

        # The poll interval at which to look for new fileset/feeder association
        config.FeederManager.pollInterval = 60

        return config

    def testA(self):
        """
        _testA_

        Handle AddDatasetWatch events
        """
        myThread = threading.currentThread()
        config = self.getConfig()
        testFeederManager = FeederManager(config)
        testFeederManager.prepareToStart()

        for i in xrange(0, FeederManagerTest._maxMessage):
            for j in xrange(0, 3):
                feederManagerdict = {'payload':{'FeederType':'NO Feeder',
                                     'dataset' : 'NO DATASET', 'FileType' : 'NO FILE TYPE',
                                     'StartRun' : 'NO START RUN' }}

                testFeederManager.handleMessage( type = 'AddDatasetWatch',
                                                 payload = feederManagerdict )

        time.sleep(30)

        myThread.workerThreadManager.terminateWorkers()

        while threading.activeCount() > 1:
            print('Currently: '+str(threading.activeCount())+\
                ' Threads. Wait until all our threads have finished')
            time.sleep(1)
示例#16
0
class HarvestTest(unittest.TestCase):
    """
    _HarvestTest_

    Test for EndOfRun job splitter
    """

    def setUp(self):
        """
        _setUp_

        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules=["WMCore.WMBS"])

        self.splitterFactory = SplitterFactory(package="WMCore.JobSplitting")

        myThread = threading.currentThread()
        self.myThread = myThread
        daoFactory = DAOFactory(package="WMCore.WMBS",
                                logger=logging,
                                dbinterface=myThread.dbi)
        self.WMBSFactory = daoFactory

        config = self.getConfig()
        self.changer = ChangeState(config)

        myResourceControl = ResourceControl()
        myResourceControl.insertSite("T1_US_FNAL", 10, 20, "T1_US_FNAL_Disk", "T1_US_FNAL")
        myResourceControl.insertSite("T1_US_FNAL", 10, 20, "T3_US_FNALLPC", "T1_US_FNAL")
        myResourceControl.insertSite("T2_CH_CERN", 10, 20, "T2_CH_CERN", "T2_CH_CERN")

        self.fileset1 = Fileset(name="TestFileset1")
        for fileNum in range(11):
            newFile = File("/some/file/name%d" % fileNum, size=1000, events=100)
            newFile.addRun(Run(1, *[1]))
            newFile.setLocation('T1_US_FNAL_Disk')
            self.fileset1.addFile(newFile)

        self.fileset1.create()

        workflow1 = Workflow(spec="spec.xml", owner="hufnagel", name="TestWorkflow1", task="Test")
        workflow1.create()

        self.subscription1 = Subscription(fileset=self.fileset1,
                                          workflow=workflow1,
                                          split_algo="Harvest",
                                          type="Harvesting")

        self.subscription1.create()
        self.configFile = EmulatorSetup.setupWMAgentConfig()

        return

    def tearDown(self):
        """
        _tearDown_

        """
        self.testInit.clearDatabase()
        EmulatorSetup.deleteConfig(self.configFile)

        return

    def getConfig(self):
        """
        _getConfig_

        """
        config = self.testInit.getConfiguration()
        self.testInit.generateWorkDir(config)

        config.section_("CoreDatabase")
        config.CoreDatabase.connectUrl = os.getenv("DATABASE")
        config.CoreDatabase.socket = os.getenv("DBSOCK")

        # JobStateMachine
        config.component_('JobStateMachine')
        config.JobStateMachine.couchurl = os.getenv('COUCHURL', None)
        config.JobStateMachine.couchDBName = 'wmagent_jobdump'

        return config

    def finishJobs(self, jobGroups, subscription=None):
        """
        _finishJobs_

        """
        if not subscription:
            subscription = self.subscription1
        for f in subscription.acquiredFiles():
            subscription.completeFiles(f)

        for jobGroup in jobGroups:
            self.changer.propagate(jobGroup.jobs, 'executing', 'created')
            self.changer.propagate(jobGroup.jobs, 'complete', 'executing')
            self.changer.propagate(jobGroup.jobs, 'success', 'complete')
            self.changer.propagate(jobGroup.jobs, 'cleanout', 'success')

        return

    def testHarvestEndOfRunTrigger(self):
        """
        _testDQMHarvestEndOfRunTrigger_

        Make sure that the basic splitting algo works, which is only, ExpressMerge is ALL done, fire a job against that fileset

        """
        self.assertEqual(self.fileset1.open, True, "Fileset is closed. Shouldn't")

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1)

        jobGroups = jobFactory()

        self.assertEqual(len(jobGroups), 0,
                         "We got 1 or more jobGroups with an open fileset and no periodic configuration")

        self.fileset1.markOpen(False)
        self.assertEqual(self.fileset1.open, False, "Fileset is opened, why?")

        # We should also check if there are aqcuired files, if there are, there are jobs,
        # we don't want to fire another jobs while previous are running (output is integrating whatever  input)
        # TODO : The above one we can do when all is done. Not priority

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1)
        jobGroups = jobFactory()

        self.assertEqual(len(jobGroups), 1,
                         "Harvest jobsplitter didn't create a single jobGroup after the fileset was closed")

        return

    def testPeriodicTrigger(self):
        """
        _testPeriodicTrigger_

        """
        self.assertEqual(self.fileset1.open, True, "Fileset is not open, not testing periodic here")
        # Test timeout (5s for this first test)
        # there should be no acquired files, if there are, shouldn't be a job
        # self.subscription1.acquireFiles(self.subscription1.availableFiles().pop())

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1)
        jobGroups = jobFactory(periodic_harvest_interval=3)

        self.assertEqual(len(jobGroups), 1, "Didn't created the first periodic job when there were acquired files")

        # For the whole thing to work, faking the first job finishing, and putting the files as complete
        self.finishJobs(jobGroups)

        # Adding more of files, so we have new stuff to process
        for fileNum in range(12, 24):
            newFile = File("/some/file/name%d" % fileNum, size=1000, events=100)
            newFile.addRun(Run(1, *[1]))
            newFile.setLocation('T1_US_FNAL_Disk')
            self.fileset1.addFile(newFile)
        self.fileset1.commit()

        # Testing that it doesn't create a job unless the delay is past
        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1)
        jobGroups = jobFactory(periodic_harvest_interval=2)

        self.assertEqual(len(jobGroups), 0,
                         "Created one or more job, when there were non-acquired file and the period is not passed by")

        time.sleep(2)

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1)
        jobGroups = jobFactory(periodic_harvest_interval=2)

        self.assertEqual(len(jobGroups), 1,
                         "Didn't created one or more job, and there weren't and the period is passed by")

        # Finishing out previous jobs
        self.finishJobs(jobGroups)

        # Adding more of files, so we have new stuff to process
        for fileNum in range(26, 36):
            newFile = File("/some/file/name%d" % fileNum, size=1000, events=100)
            newFile.addRun(Run(1, *[1]))
            newFile.setLocation('T1_US_FNAL_Disk')
            self.fileset1.addFile(newFile)
        self.fileset1.commit()

        # Trying to create another job just afterwards, it shouldn't, because it should respect the configured delay
        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1)
        jobGroups = jobFactory(periodic_harvest_interval=2)

        self.assertEqual(len(jobGroups), 0, "Created one or more job, there are new files, but the delay is not past")

        time.sleep(2)

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1)
        jobGroups = jobFactory(periodic_harvest_interval=2)

        self.assertEqual(len(jobGroups), 1, "Didn't created one or more job, there are new files and the delay is past")

        # Last check is whether the job gets all the files or not

        numFilesJob = jobGroups[0].jobs[0].getFiles()
        numFilesFileset = self.fileset1.getFiles()
        self.assertEqual(numFilesJob, numFilesFileset, "Job didn't got all the files")

        # Finishing out previous jobs
        self.finishJobs(jobGroups)

        # Adding files for the first location
        for fileNum in range(38, 48):
            newFile = File("/some/file/name%d" % fileNum, size=1000, events=100)
            newFile.addRun(Run(1, *[1]))
            newFile.setLocation('T1_US_FNAL_Disk')
            self.fileset1.addFile(newFile)
        self.fileset1.commit()
        # Then another location
        for fileNum in range(50, 56):
            newFile = File("/some/file/name%d" % fileNum, size=1000, events=100)
            newFile.addRun(Run(1, *[1]))
            newFile.setLocation('T2_CH_CERN')
            self.fileset1.addFile(newFile)
        self.fileset1.commit()

        # We should have jobs in both locations
        time.sleep(2)

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1)
        jobGroups = jobFactory(periodic_harvest_interval=2)

        self.assertEqual(len(jobGroups[0].getJobs()), 2, "We didn't get 2 jobs for 2 locations")

        firstJobLocation = jobGroups[0].getJobs()[0].getFileLocations()[0]
        secondJobLocation = jobGroups[0].getJobs()[1].getFileLocations()[0]

        self.assertEqual(firstJobLocation, 'T2_CH_CERN')
        self.assertEqual(secondJobLocation, 'T1_US_FNAL')

        self.finishJobs(jobGroups)

        for fileNum in range(60, 65):
            newFile = File("/some/file/name%d" % fileNum, size=1000, events=100)
            newFile.addRun(Run(2, *[2]))
            newFile.setLocation('T2_CH_CERN')
            self.fileset1.addFile(newFile)
        self.fileset1.commit()

        for fileNum in range(70, 75):
            newFile = File("/some/file/name%d" % fileNum, size=1000, events=100)
            newFile.addRun(Run(3, *[3]))
            newFile.setLocation('T2_CH_CERN')
            self.fileset1.addFile(newFile)
        self.fileset1.commit()

        time.sleep(2)

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=self.subscription1)
        jobGroups = jobFactory(periodic_harvest_interval=2)

        # This is one of the most "complicated" tests so worth to comment, 4 jobs should be created
        # 1 - all previous files from SomeSE and run = 1 (a lot, like ~45)
        # 2 - Few files from SomeSE3, Run = 1
        # 3 - Few files from SomeSE3, Run = 2
        # 4 - Few files from SomeSE3, Run = 3
        self.assertEqual(len(jobGroups[0].getJobs()), 4, "We didn't get 4 jobs for adding 2 different runs to SomeSE3")

        return

    def testMultipleRunHarvesting(self):
        """
        _testMultipleRunHarvesting_

        Add some files with multiple runs in each, make sure the jobs
        are created by location and run. Verify each job mask afterwards.
        Note that in this test run are splitted between sites,
        in real life that MUST NOT happen we still don't support that.
        """
        multipleFilesFileset = Fileset(name="TestFileset")

        newFile = File("/some/file/test1", size=1000, events=100)
        newFile.addRun(Run(1, *[1, 3, 4, 5, 6, 7]))
        newFile.addRun(Run(2, *[1, 2, 4, 5, 6, 7]))
        newFile.setLocation('T1_US_FNAL_Disk')
        multipleFilesFileset.addFile(newFile)
        newFile = File("/some/file/test2", size=1000, events=100)
        newFile.addRun(Run(1, *[2, 8]))
        newFile.addRun(Run(2, *[3, 8]))
        newFile.setLocation('T2_CH_CERN')
        multipleFilesFileset.addFile(newFile)
        multipleFilesFileset.create()

        harvestingWorkflow = Workflow(spec="spec.xml",
                                      owner="hufnagel",
                                      name="TestWorkflow",
                                      task="Test")
        harvestingWorkflow.create()

        harvestSub = Subscription(fileset=multipleFilesFileset,
                                  workflow=harvestingWorkflow,
                                  split_algo="Harvest",
                                  type="Harvesting")
        harvestSub.create()

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=harvestSub)
        jobGroups = jobFactory(periodic_harvest_interval=2)
        self.assertEqual(len(jobGroups), 1, "A single job group was not created")
        self.assertEqual(len(jobGroups[0].getJobs()), 4,
                         "Four jobs were not created")

        for job in jobGroups[0].getJobs():
            runs = job['mask'].getRunAndLumis()
            self.assertEqual(len(runs), 1, "Job has more than one run configured")
            ll = LumiList(compactList={1: [[1, 1], [3, 7], [2, 2], [8, 8]],
                                       2: [[1, 2], [4, 7], [3, 3], [8, 8]]})
            run = runs.keys()[0]
            for lumiPair in runs[run]:
                for lumi in range(lumiPair[0], lumiPair[1] + 1):
                    self.assertTrue((str(run), lumi) in ll, "All of %s not in %s" % (lumiPair, ll))

        self.finishJobs(jobGroups, harvestSub)

        newFile = File("/some/file/test3", size=1000, events=100)
        newFile.addRun(Run(1, *range(9, 15)))
        newFile.setLocation('T2_CH_CERN')
        multipleFilesFileset.addFile(newFile)
        multipleFilesFileset.commit()

        time.sleep(2)

        jobGroups = jobFactory(periodic_harvest_interval=2)
        self.assertEqual(len(jobGroups), 1, "A single job group was not created")
        self.assertEqual(len(jobGroups[0].getJobs()), 4, "Four jobs were not created")

        for job in jobGroups[0].getJobs():
            runs = job['mask'].getRunAndLumis()
            self.assertEqual(len(runs), 1, "Job has more than one run configured")
            ll = LumiList(compactList={1: [[1, 1], [3, 7], [2, 2], [8, 8], [9, 14]],
                                       2: [[1, 2], [4, 7], [3, 3], [8, 8]]})
            run = runs.keys()[0]
            for lumiPair in runs[run]:
                for lumi in range(lumiPair[0], lumiPair[1] + 1):
                    self.assertTrue((run, lumi) in ll, "All of %s not in %s" % (lumiPair, ll))

        harvestingWorkflowSib = Workflow(spec="spec.xml",
                                         owner="hufnagel",
                                         name="TestWorkflowSib",
                                         task="TestSib")
        harvestingWorkflowSib.create()

        harvestSubSib = Subscription(fileset=multipleFilesFileset,
                                     workflow=harvestingWorkflowSib,
                                     split_algo="Harvest",
                                     type="Harvesting")
        harvestSubSib.create()

        jobFactorySib = self.splitterFactory(package="WMCore.WMBS", subscription=harvestSubSib)

        multipleFilesFileset.markOpen(False)

        jobGroups = jobFactorySib(periodic_harvest_sibling=True)
        self.assertEqual(len(jobGroups), 0, "A single job group was created")

        self.finishJobs(jobGroups, harvestSub)

        jobGroups = jobFactorySib(periodic_harvest_sibling=True)
        self.assertEqual(len(jobGroups), 1, "A single job group was not created")
        self.assertEqual(len(jobGroups[0].getJobs()), 4, "Four jobs were not created")

        for job in jobGroups[0].getJobs():
            runs = job['mask'].getRunAndLumis()
            self.assertEqual(len(runs), 1, "Job has more than one run configured")
            ll = LumiList(compactList={1: [[1, 1], [3, 7], [2, 2], [8, 8], [9, 14]],
                                       2: [[1, 2], [4, 7], [3, 3], [8, 8]]})
            run = runs.keys()[0]
            for lumiPair in runs[run]:
                for lumi in range(lumiPair[0], lumiPair[1] + 1):
                    self.assertTrue((run, lumi) in ll, "All of %s not in %s" % (lumiPair, ll))

    def testMultiRunHarvesting(self):
        """
        _testMultiRunHarvesting_

        Provided a fileset with a couple of files and different runs, create a
        single job for all the runs at a specific location, which also adds a
        baggage to the job (True) which is later on looked up by SetupCMSSWPSet.
        """
        multipleFilesFileset = createCommonFileset()
        self.assertEqual(multipleFilesFileset.open, True)

        harvestingWorkflow = Workflow(spec="spec.xml",
                                      owner="amaltaro",
                                      name="TestWorkflow",
                                      task="Test")
        harvestingWorkflow.create()

        harvestSub = Subscription(fileset=multipleFilesFileset,
                                  workflow=harvestingWorkflow,
                                  split_algo="Harvest",
                                  type="Harvesting")
        harvestSub.create()

        multipleFilesFileset.markOpen(False)
        self.assertEqual(multipleFilesFileset.open, False, "Fileset should now be closed")

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=harvestSub)
        jobGroups = jobFactory(dqmHarvestUnit="multiRun")
        self.assertEqual(len(jobGroups), 1)

        for jobGroup in jobGroups:
            self.assertEqual(len(jobGroup.jobs), 1)
            for job in jobGroup.jobs:
                baggage = job.getBaggage()
                self.assertTrue(getattr(baggage, "multiRun", False), "It's supposed to be a multiRun job")
                self.assertEqual(getattr(baggage, "runLimits", ""), "-1-6")

    def testByRunHarvesting(self):
        """
        _testByRunHarvesting_
        Provided a fileset with a couple of files and 4 different runs, create
        one single job per run and location.
        The multiRun baggage should be false in this case.
        """
        multipleFilesFileset = createCommonFileset()
        self.assertEqual(multipleFilesFileset.open, True, "Fileset should be open!")

        harvestingWorkflow = Workflow(spec="spec.xml",
                                      owner="amaltaro",
                                      name="TestWorkflow",
                                      task="Test")
        harvestingWorkflow.create()

        harvestSub = Subscription(fileset=multipleFilesFileset,
                                  workflow=harvestingWorkflow,
                                  split_algo="Harvest",
                                  type="Harvesting")
        harvestSub.create()

        multipleFilesFileset.markOpen(False)
        self.assertEqual(multipleFilesFileset.open, False, "Fileset should now be closed")

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=harvestSub)
        jobGroups = jobFactory()
        self.assertEqual(len(jobGroups), 1, "Should have created 1 job group")

        for jobGroup in jobGroups:
            self.assertEqual(len(jobGroup.jobs), 6, "Should have created 6 jobs")
            for job in jobGroup.jobs:
                baggage = job.getBaggage()
                self.assertFalse(getattr(baggage, "multiRun", False), "It's supposed to be a byRun job")

    def testByRunAndRunWhitelist(self):
        """
        _testByRunAndRunWhitelist_

        Create harvesting jobs by run for the runs provided in the RunWhitelist
        """
        multipleFilesFileset = createCommonFileset()
        self.assertEqual(multipleFilesFileset.open, True)

        harvestingWorkflow = Workflow(spec="spec.xml", owner="amaltaro",
                                      name="TestWorkflow", task="Test")
        harvestingWorkflow.create()

        harvestSub = Subscription(fileset=multipleFilesFileset, workflow=harvestingWorkflow,
                                  split_algo="Harvest", type="Harvesting")
        harvestSub.create()

        multipleFilesFileset.markOpen(False)
        self.assertEqual(multipleFilesFileset.open, False, "Fileset should now be closed")

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=harvestSub)
        jobGroups = jobFactory(runWhitelist=[1, 3])
        self.assertEqual(len(jobGroups), 1, "One jobgroup per location")

        for jobGroup in jobGroups:
            self.assertEqual(len(jobGroup.jobs), 2)

    def testByRunAndRunBlacklist(self):
        """
        _testByRunAndRunWhitelist_

        Create harvesting jobs by run for the runs provided in the RunWhitelist
        """
        multipleFilesFileset = createCommonFileset()
        self.assertEqual(multipleFilesFileset.open, True)

        harvestingWorkflow = Workflow(spec="spec.xml", owner="amaltaro",
                                      name="TestWorkflow", task="Test")
        harvestingWorkflow.create()

        harvestSub = Subscription(fileset=multipleFilesFileset, workflow=harvestingWorkflow,
                                  split_algo="Harvest", type="Harvesting")
        harvestSub.create()

        multipleFilesFileset.markOpen(False)
        self.assertEqual(multipleFilesFileset.open, False, "Fileset should now be closed")

        jobFactory = self.splitterFactory(package="WMCore.WMBS", subscription=harvestSub)
        jobGroups = jobFactory(runWhitelist=[1, 2, 3, 4, 5], runBlacklist=[1, 3])
        self.assertEqual(len(jobGroups), 1, "One jobgroup per location")

        for jobGroup in jobGroups:
            self.assertEqual(len(jobGroup.jobs), 3)
示例#17
0
class ReportIntegrationTest(unittest.TestCase):
    """
    _ReportIntegrationTest_

    """
    def setUp(self):
        """
        _setUp_

        Setup the database and WMBS for the test.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(
            customModules=["WMComponent.DBS3Buffer", "WMCore.WMBS"],
            useDefault=False)

        myThread = threading.currentThread()
        self.daofactory = DAOFactory(package="WMCore.WMBS",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)
        self.dbsfactory = DAOFactory(package="WMComponent.DBS3Buffer",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)
        locationAction = self.daofactory(classname="Locations.New")
        locationAction.execute(siteName="site1", pnn="T1_US_FNAL_Disk")

        inputFile = File(lfn="/path/to/some/lfn",
                         size=10,
                         events=10,
                         locations="T1_US_FNAL_Disk")
        inputFile.create()

        inputFileset = Fileset(name="InputFileset")
        inputFileset.create()
        inputFileset.addFile(inputFile)
        inputFileset.commit()

        unmergedFileset = Fileset(name="UnmergedFileset")
        unmergedFileset.create()

        mergedFileset = Fileset(name="MergedFileset")
        mergedFileset.create()

        procWorkflow = Workflow(spec="wf001.xml",
                                owner="Steve",
                                name="TestWF",
                                task="/TestWF/None")
        procWorkflow.create()
        procWorkflow.addOutput("outputRECORECO", unmergedFileset)

        mergeWorkflow = Workflow(spec="wf002.xml",
                                 owner="Steve",
                                 name="MergeWF",
                                 task="/MergeWF/None")
        mergeWorkflow.create()
        mergeWorkflow.addOutput("Merged", mergedFileset)

        insertWorkflow = self.dbsfactory(classname="InsertWorkflow")
        insertWorkflow.execute("TestWF", "/TestWF/None", 0, 0, 0, 0)
        insertWorkflow.execute("MergeWF", "/MergeWF/None", 0, 0, 0, 0)

        self.procSubscription = Subscription(fileset=inputFileset,
                                             workflow=procWorkflow,
                                             split_algo="FileBased",
                                             type="Processing")
        self.procSubscription.create()
        self.procSubscription.acquireFiles()

        self.mergeSubscription = Subscription(fileset=unmergedFileset,
                                              workflow=mergeWorkflow,
                                              split_algo="WMBSMergeBySize",
                                              type="Merge")
        self.mergeSubscription.create()

        self.procJobGroup = JobGroup(subscription=self.procSubscription)
        self.procJobGroup.create()
        self.mergeJobGroup = JobGroup(subscription=self.mergeSubscription)
        self.mergeJobGroup.create()

        self.testJob = Job(name="testJob", files=[inputFile])
        self.testJob.create(group=self.procJobGroup)
        self.testJob["state"] = "complete"

        myThread = threading.currentThread()
        self.daofactory = DAOFactory(package="WMCore.WMBS",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)
        self.stateChangeAction = self.daofactory(classname="Jobs.ChangeState")
        self.setFWJRAction = self.daofactory(classname="Jobs.SetFWJRPath")
        self.getJobTypeAction = self.daofactory(classname="Jobs.GetType")
        locationAction = self.daofactory(classname="Locations.New")
        locationAction.execute(siteName="cmssrm.fnal.gov")

        self.stateChangeAction.execute(jobs=[self.testJob])

        self.tempDir = tempfile.mkdtemp()
        return

    def tearDown(self):
        """
        _tearDown_

        Clear out the database and the pickled report file.
        """
        self.testInit.clearDatabase()

        try:
            os.remove(os.path.join(self.tempDir, "ProcReport.pkl"))
            os.remove(os.path.join(self.tempDir, "MergeReport.pkl"))
        except Exception as ex:
            pass

        try:
            os.rmdir(self.tempDir)
        except Exception as ex:
            pass

        return

    def createConfig(self, workerThreads):
        """
        _createConfig_

        Create a config for the JobAccountant with the given number of worker
        threads.  This config needs to include information for connecting to the
        database as the component will create it's own database connections.
        These parameters are still pulled from the environment.
        """
        config = self.testInit.getConfiguration()
        self.testInit.generateWorkDir(config)

        config.section_("JobStateMachine")
        config.JobStateMachine.couchurl = os.getenv("COUCHURL")
        config.JobStateMachine.couchDBName = "report_integration_t"
        config.JobStateMachine.jobSummaryDBName = "report_integration_wmagent_summary_t"

        config.component_("JobAccountant")
        config.JobAccountant.pollInterval = 60
        config.JobAccountant.workerThreads = workerThreads
        config.JobAccountant.componentDir = os.getcwd()
        config.JobAccountant.logLevel = 'SQLDEBUG'

        config.component_("TaskArchiver")
        config.TaskArchiver.localWMStatsURL = "%s/%s" % (
            config.JobStateMachine.couchurl,
            config.JobStateMachine.jobSummaryDBName)
        return config

    def verifyJobSuccess(self, jobID):
        """
        _verifyJobSuccess_

        Verify that the metadata for a successful job is correct.  This will
        check the outcome, retry count and state.
        """
        testJob = Job(id=jobID)
        testJob.load()

        assert testJob["state"] == "success", \
               "Error: test job in wrong state: %s" % testJob["state"]
        assert testJob["retry_count"] == 0, \
               "Error: test job has wrong retry count: %s" % testJob["retry_count"]
        assert testJob["outcome"] == "success", \
               "Error: test job has wrong outcome: %s" % testJob["outcome"]

        return

    def verifyFileMetaData(self, jobID, fwkJobReportFiles):
        """
        _verifyFileMetaData_

        Verify that all the files that were output by a job made it into WMBS
        correctly.  Compare the contents of WMBS to the files in the frameworks
        job report.

        Note that fwkJobReportFiles is a list of DataStructs File objects.
        """
        testJob = Job(id=jobID)
        testJob.loadData()

        inputLFNs = []
        for inputFile in testJob["input_files"]:
            inputLFNs.append(inputFile["lfn"])

        for fwkJobReportFile in fwkJobReportFiles:
            outputFile = File(lfn=fwkJobReportFile["lfn"])
            outputFile.loadData(parentage=1)

            assert outputFile["events"] == int(fwkJobReportFile["events"]), \
                   "Error: Output file has wrong events: %s, %s" % \
                   (outputFile["events"], fwkJobReportFile["events"])
            assert outputFile["size"] == int(fwkJobReportFile["size"]), \
                   "Error: Output file has wrong size: %s, %s" % \
                   (outputFile["size"], fwkJobReportFile["size"])

            for ckType in fwkJobReportFile["checksums"].keys():
                assert ckType in outputFile["checksums"].keys(), \
                       "Error: Output file is missing checksums: %s" % ckType
                assert outputFile["checksums"][ckType] == fwkJobReportFile["checksums"][ckType], \
                       "Error: Checksums don't match."

            assert len(fwkJobReportFile["checksums"].keys()) == \
                   len(outputFile["checksums"].keys()), \
                   "Error: Wrong number of checksums."

            jobType = self.getJobTypeAction.execute(jobID=jobID)
            if jobType == "Merge":
                assert str(outputFile["merged"]) == "True", \
                       "Error: Merge jobs should output merged files."
            else:
                assert outputFile["merged"] == fwkJobReportFile["merged"], \
                       "Error: Output file merged output is wrong: %s, %s" % \
                       (outputFile["merged"], fwkJobReportFile["merged"])

            assert len(outputFile["locations"]) == 1, \
                   "Error: outputfile should have one location: %s" % outputFile["locations"]
            assert list(outputFile["locations"])[0] == list(fwkJobReportFile["locations"])[0], \
                   "Error: wrong location for file."

            assert len(outputFile["parents"]) == len(inputLFNs), \
                   "Error: Output file has wrong number of parents."
            for outputParent in outputFile["parents"]:
                assert outputParent["lfn"] in inputLFNs, \
                       "Error: Unknown parent file: %s" % outputParent["lfn"]

            fwjrRuns = {}
            for run in fwkJobReportFile["runs"]:
                fwjrRuns[run.run] = run.lumis

            for run in outputFile["runs"]:
                assert run.run in fwjrRuns, \
                       "Error: Extra run in output: %s" % run.run

                for lumi in run:
                    assert lumi in fwjrRuns[run.run], \
                           "Error: Extra lumi: %s" % lumi

                    fwjrRuns[run.run].remove(lumi)

                if len(fwjrRuns[run.run]) == 0:
                    del fwjrRuns[run.run]

            assert len(fwjrRuns.keys()) == 0, \
                   "Error: Missing runs, lumis: %s" % fwjrRuns

            testJobGroup = JobGroup(id=testJob["jobgroup"])
            testJobGroup.loadData()
            jobGroupFileset = testJobGroup.output
            jobGroupFileset.loadData()

            assert outputFile["id"] in jobGroupFileset.getFiles(type = "id"), \
                   "Error: output file not in jobgroup fileset."

            if testJob["mask"]["FirstEvent"] == None:
                assert outputFile["first_event"] == 0, \
                       "Error: first event not set correctly: 0, %s" % \
                       outputFile["first_event"]
            else:
                assert testJob["mask"]["FirstEvent"] == outputFile["first_event"], \
                       "Error: last event not set correctly: %s, %s" % \
                       (testJob["mask"]["FirstEvent"], outputFile["first_event"])

        return

    def testReportHandling(self):
        """
        _testReportHandling_

        Verify that we're able to parse a CMSSW report, convert it to a Report()
        style report, pickle it and then have the accountant process it.
        """
        self.procPath = os.path.join(
            WMCore.WMBase.getTestBase(),
            "WMCore_t/FwkJobReport_t/CMSSWProcessingReport.xml")

        myReport = Report("cmsRun1")
        myReport.parse(self.procPath)

        # Fake some metadata that should be added by the stageout scripts.
        for fileRef in myReport.getAllFileRefsFromStep("cmsRun1"):
            fileRef.size = 1024
            fileRef.location = "cmssrm.fnal.gov"

        fwjrPath = os.path.join(self.tempDir, "ProcReport.pkl")
        cmsRunStep = myReport.retrieveStep("cmsRun1")
        cmsRunStep.status = 0
        myReport.setTaskName('/TestWF/None')
        myReport.persist(fwjrPath)

        self.setFWJRAction.execute(jobID=self.testJob["id"], fwjrPath=fwjrPath)

        pFile = DBSBufferFile(lfn="/path/to/some/lfn",
                              size=600000,
                              events=60000)
        pFile.setAlgorithm(appName="cmsRun",
                           appVer="UNKNOWN",
                           appFam="RECO",
                           psetHash="GIBBERISH",
                           configContent="MOREGIBBERISH")
        pFile.setDatasetPath("/bogus/dataset/path")
        #pFile.addRun(Run(1, *[45]))
        pFile.create()

        config = self.createConfig(workerThreads=1)
        accountant = JobAccountantPoller(config)
        accountant.setup()
        accountant.algorithm()

        self.verifyJobSuccess(self.testJob["id"])
        self.verifyFileMetaData(self.testJob["id"],
                                myReport.getAllFilesFromStep("cmsRun1"))

        inputFile = File(
            lfn=
            "/store/backfill/2/unmerged/WMAgentCommissioining10/MinimumBias/RECO/rereco_GR09_R_34X_V5_All_v1/0000/outputRECORECO.root"
        )
        inputFile.load()
        self.testMergeJob = Job(name="testMergeJob", files=[inputFile])
        self.testMergeJob.create(group=self.mergeJobGroup)
        self.testMergeJob["state"] = "complete"
        self.stateChangeAction.execute(jobs=[self.testMergeJob])

        self.mergePath = os.path.join(
            WMCore.WMBase.getTestBase(),
            "WMCore_t/FwkJobReport_t/CMSSWMergeReport.xml")

        myReport = Report("mergeReco")
        myReport.parse(self.mergePath)

        # Fake some metadata that should be added by the stageout scripts.
        for fileRef in myReport.getAllFileRefsFromStep("mergeReco"):
            fileRef.size = 1024
            fileRef.location = "cmssrm.fnal.gov"
            fileRef.dataset = {
                "applicationName": "cmsRun",
                "applicationVersion": "CMSSW_3_4_2_patch1",
                "primaryDataset": "MinimumBias",
                "processedDataset": "Rereco-v1",
                "dataTier": "RECO"
            }

        fwjrPath = os.path.join(self.tempDir, "MergeReport.pkl")
        myReport.setTaskName('/MergeWF/None')
        cmsRunStep = myReport.retrieveStep("mergeReco")
        cmsRunStep.status = 0
        myReport.persist(fwjrPath)

        self.setFWJRAction.execute(jobID=self.testMergeJob["id"],
                                   fwjrPath=fwjrPath)
        accountant.algorithm()

        self.verifyJobSuccess(self.testMergeJob["id"])
        self.verifyFileMetaData(self.testMergeJob["id"],
                                myReport.getAllFilesFromStep("mergeReco"))

        return
示例#18
0
class StoreResultsTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        Initialize the database.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)
        self.testDir = self.testInit.generateWorkDir()

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMCore.WMBS",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)
        self.listTasksByWorkflow = self.daoFactory(
            classname="Workflow.LoadFromName")
        self.listFilesets = self.daoFactory(classname="Fileset.List")
        self.listSubsMapping = self.daoFactory(
            classname="Subscriptions.ListSubsAndFilesetsFromWorkflow")

        return

    def tearDown(self):
        """
        _tearDown_

        Clear out the database.
        """
        self.testInit.clearDatabase()
        self.testInit.delWorkDir()
        return

    def testStoreResults(self):
        """
        _testStoreResults_

        Create a StoreResults workflow and verify it installs into WMBS
        correctly.
        """
        arguments = StoreResultsWorkloadFactory.getTestArguments()

        factory = StoreResultsWorkloadFactory()
        testWorkload = factory.factoryWorkloadConstruction(
            "TestWorkload", arguments)

        testWMBSHelper = WMBSHelper(testWorkload,
                                    "StoreResults",
                                    "SomeBlock",
                                    cachepath=self.testDir)
        testWMBSHelper.createTopLevelFileset()
        testWMBSHelper._createSubscriptionsInWMBS(
            testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset)

        testWorkflow = Workflow(name="TestWorkload",
                                task="/TestWorkload/StoreResults")
        testWorkflow.load()

        self.assertEqual(len(testWorkflow.outputMap), 2,
                         "Error: Wrong number of WF outputs.")
        goldenOutputMods = {"Merged": "USER"}
        for goldenOutputMod, tier in viewitems(goldenOutputMods):
            fset = goldenOutputMod + tier
            mergedOutput = testWorkflow.outputMap[fset][0][
                "merged_output_fileset"]
            unmergedOutput = testWorkflow.outputMap[fset][0]["output_fileset"]

            mergedOutput.loadData()
            unmergedOutput.loadData()

            self.assertEqual(
                mergedOutput.name,
                "/TestWorkload/StoreResults/merged-%s" % fset,
                "Error: Merged output fileset is wrong: %s" %
                mergedOutput.name)
            self.assertEqual(
                unmergedOutput.name,
                "/TestWorkload/StoreResults/merged-%s" % fset,
                "Error: Unmerged output fileset is wrong: %s." %
                unmergedOutput.name)

        logArchOutput = testWorkflow.outputMap["logArchive"][0][
            "merged_output_fileset"]
        unmergedLogArchOutput = testWorkflow.outputMap["logArchive"][0][
            "output_fileset"]
        logArchOutput.loadData()
        unmergedLogArchOutput.loadData()

        self.assertEqual(logArchOutput.name,
                         "/TestWorkload/StoreResults/merged-logArchive",
                         "Error: LogArchive output fileset is wrong.")
        self.assertEqual(unmergedLogArchOutput.name,
                         "/TestWorkload/StoreResults/merged-logArchive",
                         "Error: LogArchive output fileset is wrong.")

        topLevelFileset = Fileset(name="TestWorkload-StoreResults-SomeBlock")
        topLevelFileset.loadData()

        procSubscription = Subscription(fileset=topLevelFileset,
                                        workflow=testWorkflow)
        procSubscription.loadData()

        self.assertEqual(procSubscription["type"], "Merge",
                         "Error: Wrong subscription type.")
        self.assertEqual(procSubscription["split_algo"],
                         "ParentlessMergeBySize", "Error: Wrong split algo.")

        return

    def testFilesets(self):
        """
        Test workflow tasks, filesets and subscriptions creation
        """
        # expected tasks, filesets, subscriptions, etc
        expOutTasks = ['/TestWorkload/StoreResults']
        expWfTasks = [
            '/TestWorkload/StoreResults',
            '/TestWorkload/StoreResults/StoreResultsLogCollect'
        ]
        expFsets = [
            'TestWorkload-StoreResults-/MinimumBias/ComissioningHI-v1/RAW',
            '/TestWorkload/StoreResults/merged-MergedUSER',
            '/TestWorkload/StoreResults/merged-logArchive'
        ]
        subMaps = [
            (2, '/TestWorkload/StoreResults/merged-logArchive',
             '/TestWorkload/StoreResults/StoreResultsLogCollect',
             'MinFileBased', 'LogCollect'),
            (1, 'TestWorkload-StoreResults-/MinimumBias/ComissioningHI-v1/RAW',
             '/TestWorkload/StoreResults', 'ParentlessMergeBySize', 'Merge')
        ]

        testArguments = StoreResultsWorkloadFactory.getTestArguments()

        factory = StoreResultsWorkloadFactory()
        testWorkload = factory.factoryWorkloadConstruction(
            "TestWorkload", testArguments)

        testWMBSHelper = WMBSHelper(testWorkload,
                                    "StoreResults",
                                    blockName=testArguments['InputDataset'],
                                    cachepath=self.testInit.testDir)
        testWMBSHelper.createTopLevelFileset()
        testWMBSHelper._createSubscriptionsInWMBS(
            testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset)

        self.assertItemsEqual(testWorkload.listOutputProducingTasks(),
                              expOutTasks)

        workflows = self.listTasksByWorkflow.execute(workflow="TestWorkload")
        self.assertItemsEqual([item['task'] for item in workflows], expWfTasks)

        # returns a tuple of id, name, open and last_update
        filesets = self.listFilesets.execute()
        self.assertItemsEqual([item[1] for item in filesets], expFsets)

        subscriptions = self.listSubsMapping.execute(workflow="TestWorkload",
                                                     returnTuple=True)
        self.assertItemsEqual(subscriptions, subMaps)
示例#19
0
class ResourceControlTest(EmulatedUnitTestCase):
    def setUp(self):
        """
        _setUp_

        Install schema and create a DAO factory for WMBS.
        """
        super(ResourceControlTest, self).setUp()
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS",
                                               "WMCore.ResourceControl",
                                               "WMCore.BossAir"],
                                useDefault=False)

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMCore.WMBS",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)

        self.baDaoFactory = DAOFactory(package="WMCore.BossAir",
                                       logger=myThread.logger,
                                       dbinterface=myThread.dbi)

        self.insertRunJob = self.baDaoFactory(classname="NewJobs")
        self.insertState = self.baDaoFactory(classname="NewState")
        states = ['PEND', 'RUN', 'Idle', 'Running']
        self.insertState.execute(states)

        self.tempDir = self.testInit.generateWorkDir()
        return

    def tearDown(self):
        """
        _tearDown_

        Clear the schema.
        """
        super(ResourceControlTest, self).tearDown()
        self.testInit.clearDatabase()
        return

    def createJobs(self):
        """
        _createJobs_

        Create test jobs in WMBS and BossAir
        """
        testWorkflow = Workflow(spec=makeUUID(), owner="tapas",
                                name=makeUUID(), task="Test")
        testWorkflow.create()

        testFilesetA = Fileset(name="TestFilesetA")
        testFilesetA.create()
        testFilesetB = Fileset(name="TestFilesetB")
        testFilesetB.create()
        testFilesetC = Fileset(name="TestFilesetC")
        testFilesetC.create()

        testFileA = File(lfn="testFileA", locations=set(["testSE1", "testSE2"]))
        testFileA.create()
        testFilesetA.addFile(testFileA)
        testFilesetA.commit()
        testFilesetB.addFile(testFileA)
        testFilesetB.commit()
        testFilesetC.addFile(testFileA)
        testFilesetC.commit()

        testSubscriptionA = Subscription(fileset=testFilesetA,
                                         workflow=testWorkflow,
                                         type="Processing")
        testSubscriptionA.create()
        testSubscriptionA.addWhiteBlackList([{"site_name": "testSite1", "valid": True}])
        testSubscriptionB = Subscription(fileset=testFilesetB,
                                         workflow=testWorkflow,
                                         type="Processing")
        testSubscriptionB.create()
        testSubscriptionB.addWhiteBlackList([{"site_name": "testSite1", "valid": False}])
        testSubscriptionC = Subscription(fileset=testFilesetC,
                                         workflow=testWorkflow,
                                         type="Merge")
        testSubscriptionC.create()

        testJobGroupA = JobGroup(subscription=testSubscriptionA)
        testJobGroupA.create()
        testJobGroupB = JobGroup(subscription=testSubscriptionB)
        testJobGroupB.create()
        testJobGroupC = JobGroup(subscription=testSubscriptionC)
        testJobGroupC.create()

        # Site1, Has been assigned a location and is complete.
        testJobA = Job(name="testJobA", files=[testFileA])
        testJobA["couch_record"] = makeUUID()
        testJobA.create(group=testJobGroupA)
        testJobA["state"] = "success"

        # Site 1, Has been assigned a location and is incomplete.
        testJobB = Job(name="testJobB", files=[testFileA])
        testJobB["couch_record"] = makeUUID()
        testJobB["cache_dir"] = self.tempDir
        testJobB.create(group=testJobGroupA)
        testJobB["state"] = "executing"
        runJobB = RunJob()
        runJobB.buildFromJob(testJobB)
        runJobB["status"] = "PEND"

        # Does not have a location, white listed to site 1
        testJobC = Job(name="testJobC", files=[testFileA])
        testJobC["couch_record"] = makeUUID()
        testJobC.create(group=testJobGroupA)
        testJobC["state"] = "new"

        # Site 2, Has been assigned a location and is complete.
        testJobD = Job(name="testJobD", files=[testFileA])
        testJobD["couch_record"] = makeUUID()
        testJobD.create(group=testJobGroupB)
        testJobD["state"] = "success"

        # Site 2, Has been assigned a location and is incomplete.
        testJobE = Job(name="testJobE", files=[testFileA])
        testJobE["couch_record"] = makeUUID()
        testJobE.create(group=testJobGroupB)
        testJobE["state"] = "executing"
        runJobE = RunJob()
        runJobE.buildFromJob(testJobE)
        runJobE["status"] = "RUN"

        # Does not have a location, site 1 is blacklisted.
        testJobF = Job(name="testJobF", files=[testFileA])
        testJobF["couch_record"] = makeUUID()
        testJobF.create(group=testJobGroupB)
        testJobF["state"] = "new"

        # Site 3, Has been assigned a location and is complete.
        testJobG = Job(name="testJobG", files=[testFileA])
        testJobG["couch_record"] = makeUUID()
        testJobG.create(group=testJobGroupC)
        testJobG["state"] = "cleanout"

        # Site 3, Has been assigned a location and is incomplete.
        testJobH = Job(name="testJobH", files=[testFileA])
        testJobH["couch_record"] = makeUUID()
        testJobH.create(group=testJobGroupC)
        testJobH["state"] = "new"

        # Site 3, Does not have a location.
        testJobI = Job(name="testJobI", files=[testFileA])
        testJobI["couch_record"] = makeUUID()
        testJobI.create(group=testJobGroupC)
        testJobI["state"] = "new"

        # Site 3, Does not have a location and is in cleanout.
        testJobJ = Job(name="testJobJ", files=[testFileA])
        testJobJ["couch_record"] = makeUUID()
        testJobJ.create(group=testJobGroupC)
        testJobJ["state"] = "cleanout"

        changeStateAction = self.daoFactory(classname="Jobs.ChangeState")
        changeStateAction.execute(jobs=[testJobA, testJobB, testJobC, testJobD,
                                        testJobE, testJobF, testJobG, testJobH,
                                        testJobI, testJobJ])

        self.insertRunJob.execute([runJobB, runJobE])

        setLocationAction = self.daoFactory(classname="Jobs.SetLocation")
        setLocationAction.execute(testJobA["id"], "testSite1")
        setLocationAction.execute(testJobB["id"], "testSite1")
        setLocationAction.execute(testJobD["id"], "testSite1")
        setLocationAction.execute(testJobE["id"], "testSite2")
        setLocationAction.execute(testJobG["id"], "testSite1")
        setLocationAction.execute(testJobH["id"], "testSite1")

        return

    def testInsert(self):
        """
        _testInsert_

        Verify that inserting sites and thresholds works correctly, even if the
        site or threshold already exists.
        """
        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1")
        myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1")
        myResourceControl.insertSite("testSite2", 100, 200, "testSE2", "testCE2")

        myResourceControl.insertThreshold("testSite1", "Processing", 20, 10)
        myResourceControl.insertThreshold("testSite1", "Merge", 200, 100)
        myResourceControl.insertThreshold("testSite1", "Merge", 250, 150)
        myResourceControl.insertThreshold("testSite2", "Processing", 50, 30)
        myResourceControl.insertThreshold("testSite2", "Merge", 135, 100)

        createThresholds = myResourceControl.listThresholdsForCreate()

        self.assertEqual(len(createThresholds.keys()), 2,
                         "Error: Wrong number of site in Resource Control DB")

        self.assertTrue("testSite1" in createThresholds.keys(),
                        "Error: Test Site 1 missing from thresholds.")

        self.assertTrue("testSite2" in createThresholds.keys(),
                        "Error: Test Site 2 missing from thresholds.")

        self.assertEqual(createThresholds["testSite1"]["total_slots"], 10,
                         "Error: Wrong number of total slots.")

        self.assertEqual(createThresholds["testSite1"]["pending_jobs"], {0: 0},
                         "Error: Wrong number of running jobs: %s" %
                         createThresholds["testSite1"]["pending_jobs"])

        self.assertEqual(createThresholds["testSite2"]["total_slots"], 100,
                         "Error: Wrong number of total slots.")

        self.assertEqual(createThresholds["testSite2"]["pending_jobs"], {0: 0},
                         "Error: Wrong number of running jobs.")

        thresholds = myResourceControl.listThresholdsForSubmit()

        self.assertEqual(len(thresholds.keys()), 2,
                         "Error: Wrong number of sites in Resource Control DB")

        self.assertTrue("testSite1" in thresholds.keys(),
                        "Error: testSite1 missing from thresholds.")

        self.assertTrue("testSite2" in thresholds.keys(),
                        "Error: testSite2 missing from thresholds.")

        site1Info = thresholds["testSite1"]
        site2Info = thresholds["testSite2"]
        site1Thresholds = site1Info["thresholds"]
        site2Thresholds = site2Info["thresholds"]

        procThreshold1 = None
        procThreshold2 = None
        mergeThreshold1 = None
        mergeThreshold2 = None
        for taskType, threshold in site1Thresholds.items():
            if taskType == "Merge":
                mergeThreshold1 = threshold
            elif taskType == "Processing":
                procThreshold1 = threshold
        for taskType, threshold in site2Thresholds.items():
            if taskType == "Merge":
                mergeThreshold2 = threshold
            elif taskType == "Processing":
                procThreshold2 = threshold

        self.assertEqual(len(site1Thresholds), 2,
                         "Error: Wrong number of task types.")

        self.assertEqual(len(site2Thresholds), 2,
                         "Error: Wrong number of task types.")

        self.assertNotEqual(procThreshold1, None)
        self.assertNotEqual(procThreshold2, None)
        self.assertNotEqual(mergeThreshold1, None)
        self.assertNotEqual(mergeThreshold2, None)

        self.assertEqual(site1Info["total_pending_slots"], 10,
                         "Error: Site thresholds wrong")

        self.assertEqual(site1Info["total_running_slots"], 20,
                         "Error: Site thresholds wrong")

        self.assertEqual(site1Info["total_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(site1Info["total_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold1["task_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold1["task_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold1["max_slots"], 20,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold1["pending_slots"], 10,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold1["task_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold1["task_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold1["max_slots"], 250,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold1["pending_slots"], 150,
                         "Error: Site thresholds wrong")

        self.assertEqual(site2Info["total_pending_slots"], 100,
                         "Error: Site thresholds wrong")

        self.assertEqual(site2Info["total_running_slots"], 200,
                         "Error: Site thresholds wrong")

        self.assertEqual(site2Info["total_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(site2Info["total_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold2["task_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold2["task_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold2["max_slots"], 50,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold2["pending_slots"], 30,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold2["task_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold2["task_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold2["max_slots"], 135,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold2["pending_slots"], 100,
                         "Error: Site thresholds wrong")

    def testList(self):
        """
        _testList_

        Test the functions that list thresholds for creating jobs and submitting
        jobs.
        """
        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1", "T1_US_FNAL", "LsfPlugin")
        myResourceControl.insertSite("testSite2", 20, 40, "testSE2", "testCE2", "T3_US_FNAL", "LsfPlugin")

        myResourceControl.insertThreshold("testSite1", "Processing", 20, 10)
        myResourceControl.insertThreshold("testSite1", "Merge", 200, 100)
        myResourceControl.insertThreshold("testSite2", "Processing", 50, 25)
        myResourceControl.insertThreshold("testSite2", "Merge", 135, 65)

        self.createJobs()

        createThresholds = myResourceControl.listThresholdsForCreate()
        submitThresholds = myResourceControl.listThresholdsForSubmit()

        self.assertEqual(len(createThresholds.keys()), 2,
                         "Error: Wrong number of sites in create thresholds")

        self.assertEqual(createThresholds["testSite1"]["total_slots"], 10,
                         "Error: Wrong number of slots for site 1")

        self.assertEqual(createThresholds["testSite2"]["total_slots"], 20,
                         "Error: Wrong number of slots for site 2")

        # We should have two running jobs with locations at site one,
        # two running jobs without locations at site two, and one running
        # job without a location at site one and two.
        self.assertEqual(createThresholds["testSite1"]["pending_jobs"], {0: 4},
                         "Error: Wrong number of pending jobs for site 1")

        # We should have one running job with a location at site 2 and
        # another running job without a location.
        self.assertEqual(createThresholds["testSite2"]["pending_jobs"], {0: 2},
                         "Error: Wrong number of pending jobs for site 2")

        # We should also have a phedex_name
        self.assertEqual(createThresholds["testSite1"]["cms_name"], "T1_US_FNAL")
        self.assertEqual(createThresholds["testSite2"]["cms_name"], "T3_US_FNAL")

        mergeThreshold1 = None
        mergeThreshold2 = None
        procThreshold1 = None
        procThreshold2 = None
        self.assertEqual(set(submitThresholds.keys()), set(["testSite1", "testSite2"]))
        for taskType, threshold in submitThresholds["testSite1"]["thresholds"].items():
            if taskType == "Merge":
                mergeThreshold1 = threshold
            elif taskType == "Processing":
                procThreshold1 = threshold
        for taskType, threshold in submitThresholds["testSite2"]["thresholds"].items():
            if taskType == "Merge":
                mergeThreshold2 = threshold
            elif taskType == "Processing":
                procThreshold2 = threshold

        self.assertEqual(submitThresholds["testSite1"]["total_running_jobs"], 0,
                         "Error: Wrong number of running jobs for submit thresholds.")
        self.assertEqual(submitThresholds["testSite2"]["total_running_jobs"], 1,
                         "Error: Wrong number of running jobs for submit thresholds.")
        self.assertEqual(submitThresholds["testSite1"]["total_pending_jobs"], 1,
                         "Error: Wrong number of pending jobs for submit thresholds.")
        self.assertEqual(submitThresholds["testSite2"]["total_pending_jobs"], 0,
                         "Error: Wrong number of pending jobs for submit thresholds.")

        self.assertEqual(mergeThreshold1["task_running_jobs"], 0,
                         "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(mergeThreshold1["task_pending_jobs"], 0,
                         "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(procThreshold1["task_running_jobs"], 0,
                         "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(procThreshold1["task_pending_jobs"], 1,
                         "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(mergeThreshold2["task_running_jobs"], 0,
                         "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(mergeThreshold2["task_pending_jobs"], 0,
                         "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(procThreshold2["task_running_jobs"], 1,
                         "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(procThreshold2["task_pending_jobs"], 0,
                         "Error: Wrong number of task running jobs for submit thresholds.")

        return

    def testListSiteInfo(self):
        """
        _testListSiteInfo_

        Verify that the listSiteInfo() methods works properly.
        """
        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1")
        myResourceControl.insertSite("testSite2", 100, 200, "testSE2", "testCE2")

        siteInfo = myResourceControl.listSiteInfo("testSite1")

        self.assertEqual(siteInfo["site_name"], "testSite1",
                         "Error: Site name is wrong.")

        self.assertEqual(siteInfo["pnn"], ["testSE1"],
                         "Error: SE name is wrong.")

        self.assertEqual(siteInfo["ce_name"], "testCE1",
                         "Error: CE name is wrong.")

        self.assertEqual(siteInfo["pending_slots"], 10,
                         "Error: Pending slots is wrong.")

        self.assertEqual(siteInfo["running_slots"], 20,
                         "Error: Pending slots is wrong.")

        return

    def testUpdateJobSlots(self):
        """
        _testUpdateJobSlots_

        Verify that it is possible to update the number of job slots at a site.
        """
        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1")

        siteInfo = myResourceControl.listSiteInfo("testSite1")

        self.assertEqual(siteInfo["pending_slots"], 10, "Error: Pending slots is wrong.")
        self.assertEqual(siteInfo["running_slots"], 20, "Error: Running slots is wrong.")

        myResourceControl.setJobSlotsForSite("testSite1", pendingJobSlots=20)

        siteInfo = myResourceControl.listSiteInfo("testSite1")

        self.assertEqual(siteInfo["pending_slots"], 20, "Error: Pending slots is wrong.")

        myResourceControl.setJobSlotsForSite("testSite1", runningJobSlots=40)

        siteInfo = myResourceControl.listSiteInfo("testSite1")

        self.assertEqual(siteInfo["running_slots"], 40, "Error: Running slots is wrong.")

        myResourceControl.setJobSlotsForSite("testSite1", 5, 10)

        siteInfo = myResourceControl.listSiteInfo("testSite1")

        self.assertEqual(siteInfo["pending_slots"], 5, "Error: Pending slots is wrong.")
        self.assertEqual(siteInfo["running_slots"], 10, "Error: Running slots is wrong.")

        return

    def testThresholdsForSite(self):
        """
        _testThresholdsForSite_

        Check that we can get the thresholds in intelligible form
        for each site
        """

        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 20, 40, "testSE1", "testCE1")
        myResourceControl.insertThreshold("testSite1", "Processing", 10, 8)
        myResourceControl.insertThreshold("testSite1", "Merge", 5, 3)

        result = myResourceControl.thresholdBySite(siteName="testSite1")
        procInfo = {}
        mergInfo = {}
        for res in result:
            if res['task_type'] == 'Processing':
                procInfo = res
            elif res['task_type'] == 'Merge':
                mergInfo = res
        self.assertEqual(procInfo.get('pending_slots', None), 20)
        self.assertEqual(procInfo.get('running_slots', None), 40)
        self.assertEqual(procInfo.get('max_slots', None), 10)
        self.assertEqual(procInfo.get('task_pending_slots', None), 8)
        self.assertEqual(mergInfo.get('pending_slots', None), 20)
        self.assertEqual(mergInfo.get('running_slots', None), 40)
        self.assertEqual(mergInfo.get('max_slots', None), 5)
        self.assertEqual(mergInfo.get('task_pending_slots', None), 3)

        return

    def testThresholdPriority(self):
        """
        _testThresholdPriority_

        Test that we get things back in priority order
        """

        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 20, 40, "testSE1", "testCE1")
        myResourceControl.insertThreshold("testSite1", "Processing", 10, 8)
        myResourceControl.insertThreshold("testSite1", "Merge", 5, 3)

        # test default task priorities
        result = myResourceControl.listThresholdsForSubmit()
        self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 4)
        self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 0)

        myResourceControl.changeTaskPriority("Merge", 3)
        myResourceControl.changeTaskPriority("Processing", 1)

        result = myResourceControl.listThresholdsForSubmit()
        self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 3)
        self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 1)

        myResourceControl.changeTaskPriority("Merge", 1)
        myResourceControl.changeTaskPriority("Processing", 3)

        result = myResourceControl.listThresholdsForSubmit()
        self.assertEqual(result['testSite1']['thresholds']['Merge']['priority'], 1)
        self.assertEqual(result['testSite1']['thresholds']['Processing']['priority'], 3)

        return

    def testChangeSiteState(self):
        """
        _testNewState_

        Check that we can change the state between different values and
        retrieve it through the threshold methods
        """
        self.tempDir = self.testInit.generateWorkDir()
        config = self.createConfig()
        myResourceControl = ResourceControl(config)
        myResourceControl.insertSite("testSite1", 20, 40, "testSE1", "testCE1")
        myResourceControl.insertThreshold("testSite1", "Processing", 10, 5)

        result = myResourceControl.listThresholdsForCreate()
        self.assertEqual(result['testSite1']['state'], 'Normal', 'Error: Wrong site state')

        myResourceControl.changeSiteState("testSite1", "Down")
        result = myResourceControl.listThresholdsForCreate()
        self.assertEqual(result['testSite1']['state'], 'Down', 'Error: Wrong site state')

        # If you set the value to 'Normal' instead of 'Down' this test should FAIL
        # self.assertEqual(result['testSite1']['state'], 'Normal', 'Error: Wrong site state')

    def testAbortedState(self):
        """
        _testAbortedState_

        Check that we can kill jobs when a site is set to aborted
        ### We no longer need this test as we are not killing jobs that are running
        """
        self.tempDir = self.testInit.generateWorkDir()
        config = self.createConfig()
        myResourceControl = ResourceControl(config)
        myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1", "T1_US_FNAL", "MockPlugin")
        myResourceControl.insertSite("testSite2", 20, 40, "testSE2", "testCE2", "T1_IT_CNAF", "MockPlugin")

        myResourceControl.insertThreshold("testSite1", "Processing", 20, 10)
        myResourceControl.insertThreshold("testSite1", "Merge", 200, 100)
        myResourceControl.insertThreshold("testSite2", "Processing", 50, 25)
        myResourceControl.insertThreshold("testSite2", "Merge", 135, 65)

        self.createJobs()

        myResourceControl.changeSiteState("testSite1", "Aborted")

        ## Now check the tempDir for a FWJR for the killed job
        reportPath = os.path.join(self.tempDir, "Report.0.pkl")
        report = Report()
        report.load(reportPath)
        self.assertEqual(report.getExitCode(), 71301)
        return

    def createConfig(self):
        """
        _createConfig_

        Create a config and save it to the temp dir.  Set the WMAGENT_CONFIG
        environment variable so the config gets picked up.
        """
        config = Configuration()
        config.section_("General")
        config.General.workDir = os.getenv("TESTDIR", os.getcwd())
        config.section_("Agent")
        config.Agent.componentName = "resource_control_t"
        config.section_("CoreDatabase")
        config.CoreDatabase.connectUrl = os.getenv("DATABASE")
        config.CoreDatabase.socket = os.getenv("DBSOCK")
        config.section_("JobStateMachine")
        config.JobStateMachine.couchurl = os.getenv('COUCHURL')
        config.JobStateMachine.couchDBName = "bossair_t"
        config.JobStateMachine.jobSummaryDBName = 'wmagent_summary_t'
        config.JobStateMachine.summaryStatsDBName = 'stat_summary_t'
        config.section_("BossAir")
        config.BossAir.pluginDir = "WMCore.BossAir.Plugins"
        config.BossAir.pluginNames = ["MockPlugin"]
        config.BossAir.section_("MockPlugin")
        config.BossAir.MockPlugin.fakeReport = os.path.join(getTestBase(),
                                                            'WMComponent_t/JobAccountant_t/fwjrs',
                                                            "MergeSuccess.pkl")

        configHandle = open(os.path.join(self.tempDir, "config.py"), "w")
        configHandle.write(str(config))
        configHandle.close()

        os.environ["WMAGENT_CONFIG"] = os.path.join(self.tempDir, "config.py")
        return config

    def testInsertSite(self):
        """
        _testInsertSite_

        Test to see if we can insert a fake test site alone
        with a single option
        """
        self.createConfig()

        resControlPath = os.path.join(getTestBase(), "../../bin/wmagent-resource-control")
        env = os.environ
        env['PYTHONPATH'] = ":".join(sys.path)
        cmdline = [resControlPath, "--add-Test"]
        retval = subprocess.Popen(cmdline,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.STDOUT,
                                  env=env)
        (_, _) = retval.communicate()
        myResourceControl = ResourceControl()
        result = myResourceControl.listThresholdsForSubmit()
        self.assertEqual(len(result), 1)
        self.assertTrue('CERN' in result)
        for x in result:
            self.assertEqual(len(result[x]['thresholds']), 7)
            self.assertEqual(result[x]['total_pending_slots'], 500)
            self.assertEqual(result[x]['total_running_slots'], 1)
            for taskType, thresh in result[x]['thresholds'].items():
                if taskType == 'Processing':
                    self.assertEqual(thresh['priority'], 0)
                    self.assertEqual(thresh['max_slots'], 1)

        # Verify that sites with more than one SE were added correctly.
        cernInfo = myResourceControl.listSiteInfo("CERN")
        self.assertTrue(len(cernInfo["pnn"]) == 2)
        return
示例#20
0
class JobTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMCore.WMBS",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)

        locationNew = self.daoFactory(classname="Locations.New")
        locationNew.execute(siteName="test.site.ch", pnn="T2_CH_CERN")
        locationNew.execute(siteName="test2.site.ch", pnn="T2_CH_CERN")

        return

    def tearDown(self):
        """
        _tearDown_

        Drop all the WMBS tables.
        """
        self.testInit.clearDatabase()

    def createTestJob(self, subscriptionType="Merge"):
        """
        _createTestJob_

        Create a test job with two files as input.  This will also create the
        appropriate workflow, jobgroup and subscription.
        """
        testWorkflow = Workflow(spec=makeUUID(),
                                owner="Simon",
                                name=makeUUID(),
                                task="Test")
        testWorkflow.create()

        testWMBSFileset = Fileset(name="TestFileset")
        testWMBSFileset.create()

        testSubscription = Subscription(fileset=testWMBSFileset,
                                        workflow=testWorkflow,
                                        type=subscriptionType)
        testSubscription.create()

        testJobGroup = JobGroup(subscription=testSubscription)
        testJobGroup.create()

        testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10)
        testFileA.addRun(Run(1, *[45]))
        testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10)
        testFileB.addRun(Run(1, *[46]))
        testFileA.create()
        testFileB.create()

        testJob = Job(name=makeUUID(), files=[testFileA, testFileB])
        testJob["couch_record"] = "somecouchrecord"
        testJob["location"] = "test.site.ch"
        testJob.create(group=testJobGroup)
        testJob.associateFiles()

        return testJob

    def testCreateDeleteExists(self):
        """
        _testCreateDeleteExists_

        Create and then delete a job.  Use the job class's exists() method to
        determine if the job has been written to the database before it is
        created, after it has been created and after it has been deleted.
        """
        testWorkflow = Workflow(spec="spec.xml",
                                owner="Simon",
                                name="wf001",
                                task="Test")
        testWorkflow.create()

        testWMBSFileset = Fileset(name="TestFileset")
        testWMBSFileset.create()

        testSubscription = Subscription(fileset=testWMBSFileset,
                                        workflow=testWorkflow)
        testSubscription.create()

        testJobGroup = JobGroup(subscription=testSubscription)
        testJobGroup.create()

        testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10)
        testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10)
        testFileA.create()
        testFileB.create()

        testJob = Job(name="TestJob", files=[testFileA, testFileB])

        assert testJob.exists() is False, \
            "ERROR: Job exists before it was created"

        testJob.create(group=testJobGroup)

        assert testJob.exists() >= 0, \
            "ERROR: Job does not exist after it was created"

        testJob.delete()

        assert testJob.exists() is False, \
            "ERROR: Job exists after it was delete"

        return

    def testCreateTransaction(self):
        """
        _testCreateTransaction_

        Create a job and save it to the database.  Roll back the database
        transaction and verify that the job is no longer in the database.
        """
        testWorkflow = Workflow(spec="spec.xml",
                                owner="Simon",
                                name="wf001",
                                task="Test")
        testWorkflow.create()

        testWMBSFileset = Fileset(name="TestFileset")
        testWMBSFileset.create()

        testSubscription = Subscription(fileset=testWMBSFileset,
                                        workflow=testWorkflow)
        testSubscription.create()

        testJobGroup = JobGroup(subscription=testSubscription)
        testJobGroup.create()

        testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10)
        testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10)
        testFileA.create()
        testFileB.create()

        myThread = threading.currentThread()
        myThread.transaction.begin()

        testJob = Job(name="TestJob", files=[testFileA, testFileB])

        assert testJob.exists() is False, \
            "ERROR: Job exists before it was created"

        testJob.create(group=testJobGroup)

        assert testJob.exists() >= 0, \
            "ERROR: Job does not exist after it was created"

        myThread.transaction.rollback()

        assert testJob.exists() is False, \
            "ERROR: Job exists after transaction was rolled back."

        return

    def testDeleteTransaction(self):
        """
        _testDeleteTransaction_

        Create a new job and commit it to the database.  Start a new transaction
        and delete the file from the database.  Verify that the file has been
        deleted.  After that, roll back the transaction and verify that the
        job is once again in the database.
        """
        testWorkflow = Workflow(spec="spec.xml",
                                owner="Simon",
                                name="wf001",
                                task="Test")
        testWorkflow.create()

        testWMBSFileset = Fileset(name="TestFileset")
        testWMBSFileset.create()

        testSubscription = Subscription(fileset=testWMBSFileset,
                                        workflow=testWorkflow)
        testSubscription.create()

        testJobGroup = JobGroup(subscription=testSubscription)
        testJobGroup.create()

        testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10)
        testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10)
        testFileA.create()
        testFileB.create()

        testJob = Job(name="TestJob", files=[testFileA, testFileB])

        assert testJob.exists() is False, \
            "ERROR: Job exists before it was created"

        testJob.create(group=testJobGroup)

        assert testJob.exists() >= 0, \
            "ERROR: Job does not exist after it was created"

        myThread = threading.currentThread()
        myThread.transaction.begin()

        testJob.delete()

        assert testJob.exists() is False, \
            "ERROR: Job exists after it was delete"

        myThread.transaction.rollback()

        assert testJob.exists() >= 0, \
            "ERROR: Job does not exist after transaction was rolled back."

        return

    def testCreateDeleteExistsNoFiles(self):
        """
        _testCreateDeleteExistsNoFiles_

        Create and then delete a job but don't add any input files to it.
        Use the job class's exists() method to determine if the job has been
        written to the database before it is created, after it has been created
        and after it has been deleted.
        """
        testWorkflow = Workflow(spec="spec.xml",
                                owner="Simon",
                                name="wf001",
                                task="Test")
        testWorkflow.create()

        testWMBSFileset = Fileset(name="TestFileset")
        testWMBSFileset.create()

        testSubscription = Subscription(fileset=testWMBSFileset,
                                        workflow=testWorkflow)
        testSubscription.create()

        testJobGroup = JobGroup(subscription=testSubscription)
        testJobGroup.create()

        testJob = Job(name="TestJob")

        assert testJob.exists() is False, \
            "ERROR: Job exists before it was created"

        testJob.create(group=testJobGroup)

        assert testJob.exists() >= 0, \
            "ERROR: Job does not exist after it was created"

        testJob.delete()

        assert testJob.exists() is False, \
            "ERROR: Job exists after it was delete"

        return

    def testLoad(self):
        """
        _testLoad_

        Create a job and save it to the database.  Load it back from the
        database using the name and the id and then verify that all information
        was loaded correctly.
        """
        testJobA = self.createTestJob()
        testJobB = Job(id=testJobA["id"])
        testJobC = Job(name=testJobA["name"])
        testJobB.load()
        testJobC.load()

        assert (testJobA["id"] == testJobB["id"]) and \
               (testJobA["name"] == testJobB["name"]) and \
               (testJobA["jobgroup"] == testJobB["jobgroup"]) and \
               (testJobA["couch_record"] == testJobB["couch_record"]) and \
               (testJobA["location"] == testJobB["location"]), \
            "ERROR: Load from ID didn't load everything correctly"

        assert (testJobA["id"] == testJobC["id"]) and \
               (testJobA["name"] == testJobC["name"]) and \
               (testJobA["jobgroup"] == testJobC["jobgroup"]) and \
               (testJobA["couch_record"] == testJobC["couch_record"]) and \
               (testJobA["location"] == testJobC["location"]), \
            "ERROR: Load from name didn't load everything correctly"

        self.assertEqual(testJobB['outcome'], 'failure')
        self.assertEqual(testJobC['outcome'], 'failure')
        self.assertEqual(testJobB['fwjr'], None)
        self.assertEqual(testJobC['fwjr'], None)

        return

    def testLoadData(self):
        """
        _testLoadData_

        Create a job and save it to the database.  Load it back from the
        database using the name and the id.  Verify that all job information
        is correct including input files and the job mask.
        """
        testJobA = self.createTestJob()

        testJobA["mask"]["FirstEvent"] = 1
        testJobA["mask"]["LastEvent"] = 2
        testJobA["mask"]["FirstLumi"] = 3
        testJobA["mask"]["LastLumi"] = 4
        testJobA["mask"]["FirstRun"] = 5
        testJobA["mask"]["LastRun"] = 6

        testJobA.save()

        testJobB = Job(id=testJobA["id"])
        testJobC = Job(name=testJobA["name"])
        testJobB.loadData()
        testJobC.loadData()

        assert (testJobA["id"] == testJobB["id"]) and \
               (testJobA["name"] == testJobB["name"]) and \
               (testJobA["jobgroup"] == testJobB["jobgroup"]) and \
               (testJobA["couch_record"] == testJobB["couch_record"]) and \
               (testJobA["location"] == testJobB["location"]), \
            "ERROR: Load from ID didn't load everything correctly"

        assert (testJobA["id"] == testJobC["id"]) and \
               (testJobA["name"] == testJobC["name"]) and \
               (testJobA["jobgroup"] == testJobC["jobgroup"]) and \
               (testJobA["couch_record"] == testJobC["couch_record"]) and \
               (testJobA["location"] == testJobC["location"]), \
            "ERROR: Load from name didn't load everything correctly"

        assert testJobA["mask"] == testJobB["mask"], \
            "ERROR: Job mask did not load properly"

        assert testJobA["mask"] == testJobC["mask"], \
            "ERROR: Job mask did not load properly"

        goldenFiles = testJobA.getFiles()
        for testFile in testJobB.getFiles():
            assert testFile in goldenFiles, \
                "ERROR: Job loaded an unknown file"
            goldenFiles.remove(testFile)

        assert not goldenFiles, "ERROR: Job didn't load all files"

        goldenFiles = testJobA.getFiles()
        for testFile in testJobC.getFiles():
            assert testFile in goldenFiles, \
                "ERROR: Job loaded an unknown file"
            goldenFiles.remove(testFile)

        assert not goldenFiles, "ERROR: Job didn't load all files"

        return

    def testGetFiles(self):
        """
        _testGetFiles_

        Test the Job's getFiles() method.  This should load the files from
        the database if they haven't been loaded already.
        """
        testJobA = self.createTestJob()

        testJobB = Job(id=testJobA["id"])
        testJobB.loadData()

        goldenFiles = testJobA.getFiles()
        for testFile in testJobB.getFiles():
            assert testFile in goldenFiles, \
                "ERROR: Job loaded an unknown file: %s" % testFile
            goldenFiles.remove(testFile)

        assert not goldenFiles, "ERROR: Job didn't load all files"

        return

    def testSaveTransaction(self):
        """
        _testSaveTransaction_

        Create a job and a job mask and save them both to the database.  Load
        the job from the database and verify that everything was written
        correctly.  Begin a new transaction and update the job mask again.
        Load the mask and verify that it's correct.  Finally, rollback the
        transaction and reload the mask to verify that it is in the correct
        state.
        """
        testJobA = self.createTestJob()

        testJobA["mask"]["FirstEvent"] = 1
        testJobA["mask"]["LastEvent"] = 2
        testJobA["mask"]["FirstLumi"] = 3
        testJobA["mask"]["LastLumi"] = 4
        testJobA["mask"]["FirstRun"] = 5
        testJobA["mask"]["LastRun"] = 6

        testJobA.save()

        testJobB = Job(id=testJobA["id"])
        testJobB.loadData()

        assert testJobA["mask"] == testJobB["mask"], \
            "ERROR: Job mask did not load properly"

        myThread = threading.currentThread()
        myThread.transaction.begin()

        testJobA["mask"]["FirstEvent"] = 7
        testJobA["mask"]["LastEvent"] = 8
        testJobA["mask"]["FirstLumi"] = 9
        testJobA["mask"]["LastLumi"] = 10
        testJobA["mask"]["FirstRun"] = 11
        testJobA["mask"]["LastRun"] = 12
        testJobA["name"] = "stevesJob"
        testJobA["couch_record"] = "someCouchRecord"
        testJobA["location"] = "test2.site.ch"

        testJobA.save()
        testJobC = Job(id=testJobA["id"])
        testJobC.loadData()

        assert testJobA["mask"] == testJobC["mask"], \
            "ERROR: Job mask did not load properly"

        assert testJobC["name"] == "stevesJob", \
            "ERROR: Job name did not save"

        assert testJobC["couch_record"] == "someCouchRecord", \
            "ERROR: Job couch record did not save"

        assert testJobC["location"] == "test2.site.ch", \
            "ERROR: Job site did not save"

        myThread.transaction.rollback()

        testJobD = Job(id=testJobA["id"])
        testJobD.loadData()

        assert testJobB["mask"] == testJobD["mask"], \
            "ERROR: Job mask did not load properly"

        return

    def testJobState(self):
        """
        _testJobState_

        Unittest to see if we can figure out what the jobState actually is and set it
        """

        testJobA = self.createTestJob()

        value = testJobA.getState()

        self.assertEqual(value, 'new')

        return

    def testJobCacheDir(self):
        """
        _testJobCacheDir_

        Check retrieval of the jobCache directory.
        """
        testJobA = self.createTestJob()
        value = testJobA.getCache()

        self.assertEqual(value, None)

        testJobA.setCache('UnderTheDeepBlueSea')
        value = testJobA.getCache()

        self.assertEqual(value, 'UnderTheDeepBlueSea')

        return

    def testGetOutputParentLFNs(self):
        """
        _testGetOutputParentLFNs_

        Verify that the getOutputDBSParentLFNs() method returns the correct
        parent LFNs.
        """
        testWorkflow = Workflow(spec="spec.xml",
                                owner="Simon",
                                name="wf001",
                                task="Test")
        testWorkflow.create()

        testWMBSFileset = Fileset(name="TestFileset")
        testWMBSFileset.create()

        testSubscription = Subscription(fileset=testWMBSFileset,
                                        workflow=testWorkflow)
        testSubscription.create()

        testJobGroup = JobGroup(subscription=testSubscription)
        testJobGroup.create()

        testFileA = File(lfn="/this/is/a/lfnA",
                         size=1024,
                         events=10,
                         merged=True)
        testFileB = File(lfn="/this/is/a/lfnB",
                         size=1024,
                         events=10,
                         merged=True)
        testFileC = File(lfn="/this/is/a/lfnC",
                         size=1024,
                         events=10,
                         merged=False)
        testFileD = File(lfn="/this/is/a/lfnD",
                         size=1024,
                         events=10,
                         merged=False)
        testFileE = File(lfn="/this/is/a/lfnE",
                         size=1024,
                         events=10,
                         merged=True)
        testFileF = File(lfn="/this/is/a/lfnF",
                         size=1024,
                         events=10,
                         merged=True)
        testFileA.create()
        testFileB.create()
        testFileC.create()
        testFileD.create()
        testFileE.create()
        testFileF.create()

        testFileE.addChild(testFileC["lfn"])
        testFileF.addChild(testFileD["lfn"])

        testJobA = Job(name="TestJob", files=[testFileA, testFileB])
        testJobA["couch_record"] = "somecouchrecord"
        testJobA["location"] = "test.site.ch"
        testJobA.create(group=testJobGroup)
        testJobA.associateFiles()

        testJobB = Job(name="TestJobB", files=[testFileC, testFileD])
        testJobB["couch_record"] = "somecouchrecord"
        testJobB["location"] = "test.site.ch"
        testJobB.create(group=testJobGroup)
        testJobB.associateFiles()

        goldenLFNs = ["/this/is/a/lfnA", "/this/is/a/lfnB"]

        parentLFNs = testJobA.getOutputDBSParentLFNs()
        for parentLFN in parentLFNs:
            assert parentLFN in goldenLFNs, \
                "ERROR: Unknown lfn: %s" % parentLFN
            goldenLFNs.remove(parentLFN)

        assert not goldenLFNs, "ERROR: LFNs are missing: %s" % goldenLFNs

        goldenLFNs = ["/this/is/a/lfnE", "/this/is/a/lfnF"]

        parentLFNs = testJobB.getOutputDBSParentLFNs()
        for parentLFN in parentLFNs:
            assert parentLFN in goldenLFNs, \
                "ERROR: Unknown lfn: %s" % parentLFN
            goldenLFNs.remove(parentLFN)

        assert not goldenLFNs, "ERROR: LFNs are missing..."

        return

    def testJobFWJRPath(self):
        """
        _testJobFWJRPath_

        Verify the correct operation of the Jobs.SetFWJRPath and
        Jobs.GetFWJRByState DAOs.
        """
        testJobA = self.createTestJob()
        testJobA["state"] = "complete"
        testJobB = self.createTestJob()
        testJobB["state"] = "executing"
        testJobC = self.createTestJob()
        testJobC["state"] = "complete"

        myThread = threading.currentThread()
        setFWJRAction = self.daoFactory(classname="Jobs.SetFWJRPath")
        setFWJRAction.execute(jobID=testJobA["id"],
                              fwjrPath="NonsenseA",
                              conn=myThread.transaction.conn,
                              transaction=True)
        setFWJRAction.execute(jobID=testJobB["id"],
                              fwjrPath="NonsenseB",
                              conn=myThread.transaction.conn,
                              transaction=True)
        setFWJRAction.execute(jobID=testJobC["id"],
                              fwjrPath="NonsenseC",
                              conn=myThread.transaction.conn,
                              transaction=True)

        changeStateAction = self.daoFactory(classname="Jobs.ChangeState")
        changeStateAction.execute(jobs=[testJobA, testJobB, testJobC],
                                  conn=myThread.transaction.conn,
                                  transaction=True)

        getJobsAction = self.daoFactory(classname="Jobs.GetFWJRByState")
        jobs = getJobsAction.execute(state="complete",
                                     conn=myThread.transaction.conn,
                                     transaction=True)

        goldenIDs = [testJobA["id"], testJobC["id"]]
        for job in jobs:
            assert job["id"] in goldenIDs, \
                "Error: Unknown job: %s" % job["id"]

            goldenIDs.remove(job["id"])

            if job["id"] == testJobA["id"]:
                assert job["fwjr_path"] == "NonsenseA", \
                    "Error: Wrong fwjr path: %s" % job["fwjr_path"]
            else:
                assert job["fwjr_path"] == "NonsenseC", \
                    "Error: Wrong fwjr path: %s" % job["fwjr_path"]

        assert not goldenIDs, "Error: Jobs missing: %s" % len(goldenIDs)

        return

    def testFailJobInput(self):
        """
        _testFailJobInput_

        Test the Jobs.FailInput DAO and verify that it doesn't affect other
        jobs/subscriptions that run over the same files.
        """
        testWorkflow = Workflow(spec="spec.xml",
                                owner="Steve",
                                name="wf001",
                                task="Test")
        bogusWorkflow = Workflow(spec="spec1.xml",
                                 owner="Steve",
                                 name="wf002",
                                 task="Test")
        testWorkflow.create()
        bogusWorkflow.create()

        testFileset = Fileset(name="TestFileset")
        bogusFileset = Fileset(name="BogusFileset")
        testFileset.create()
        bogusFileset.create()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=testWorkflow)
        bogusSubscription = Subscription(fileset=bogusFileset,
                                         workflow=bogusWorkflow)
        testSubscription.create()
        bogusSubscription.create()

        testFileA = File(lfn=makeUUID(), locations="T2_CH_CERN")
        testFileB = File(lfn=makeUUID(), locations="T2_CH_CERN")
        testFileC = File(lfn=makeUUID(), locations="T2_CH_CERN")
        testFileA.create()
        testFileB.create()
        testFileC.create()

        testFileset.addFile([testFileA, testFileB, testFileC])
        bogusFileset.addFile([testFileA, testFileB, testFileC])
        testFileset.commit()
        bogusFileset.commit()

        testSubscription.completeFiles([testFileA, testFileB, testFileC])
        bogusSubscription.acquireFiles([testFileA, testFileB, testFileC])

        testJobGroup = JobGroup(subscription=testSubscription)
        bogusJobGroup = JobGroup(subscription=bogusSubscription)
        testJobGroup.create()
        bogusJobGroup.create()

        testJobA = Job(name="TestJobA",
                       files=[testFileA, testFileB, testFileC])
        testJobB = Job(name="TestJobB",
                       files=[testFileA, testFileB, testFileC])

        bogusJob = Job(name="BogusJob",
                       files=[testFileA, testFileB, testFileC])

        testJobA.create(group=testJobGroup)
        testJobB.create(group=testJobGroup)

        bogusJob.create(group=bogusJobGroup)

        testJobA.failInputFiles()
        testJobB.failInputFiles()

        self.assertEqual(len(testSubscription.filesOfStatus("Available")), 0)
        self.assertEqual(len(testSubscription.filesOfStatus("Acquired")), 0)
        self.assertEqual(len(testSubscription.filesOfStatus("Failed")), 3)
        self.assertEqual(len(testSubscription.filesOfStatus("Completed")), 0)

        changeStateAction = self.daoFactory(classname="Jobs.ChangeState")
        testJobB["state"] = "cleanout"
        changeStateAction.execute([testJobB])

        # Try again

        testJobA.failInputFiles()

        # Should now be failed
        self.assertEqual(len(testSubscription.filesOfStatus("Available")), 0)
        self.assertEqual(len(testSubscription.filesOfStatus("Acquired")), 0)
        self.assertEqual(len(testSubscription.filesOfStatus("Failed")), 3)
        self.assertEqual(len(testSubscription.filesOfStatus("Completed")), 0)

        # bogus should be unchanged
        self.assertEqual(len(bogusSubscription.filesOfStatus("Available")), 0)
        self.assertEqual(len(bogusSubscription.filesOfStatus("Acquired")), 3)
        self.assertEqual(len(bogusSubscription.filesOfStatus("Failed")), 0)
        self.assertEqual(len(bogusSubscription.filesOfStatus("Completed")), 0)

        return

    def testCompleteJobInput(self):
        """
        _testCompleteJobInput_

        Verify the correct output of the CompleteInput DAO.  This should mark
        the input for a job as complete once all the jobs that run over a
        particular file have complete successfully.
        """
        testWorkflow = Workflow(spec="spec.xml",
                                owner="Steve",
                                name="wf001",
                                task="Test")
        bogusWorkflow = Workflow(spec="spec1.xml",
                                 owner="Steve",
                                 name="wf002",
                                 task="Test")
        testWorkflow.create()
        bogusWorkflow.create()

        testFileset = Fileset(name="TestFileset")
        bogusFileset = Fileset(name="BogusFileset")
        testFileset.create()
        bogusFileset.create()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=testWorkflow)
        bogusSubscription = Subscription(fileset=bogusFileset,
                                         workflow=bogusWorkflow)
        testSubscription.create()
        bogusSubscription.create()

        testFileA = File(lfn=makeUUID(), locations="T2_CH_CERN")
        testFileB = File(lfn=makeUUID(), locations="T2_CH_CERN")
        testFileC = File(lfn=makeUUID(), locations="T2_CH_CERN")
        testFileA.create()
        testFileB.create()
        testFileC.create()

        testFileset.addFile([testFileA, testFileB, testFileC])
        bogusFileset.addFile([testFileA, testFileB, testFileC])
        testFileset.commit()
        bogusFileset.commit()

        testSubscription.acquireFiles([testFileA, testFileB, testFileC])
        bogusSubscription.acquireFiles([testFileA, testFileB, testFileC])

        testJobGroup = JobGroup(subscription=testSubscription)
        bogusJobGroup = JobGroup(subscription=bogusSubscription)
        testJobGroup.create()
        bogusJobGroup.create()

        testJobA = Job(name="TestJobA", files=[testFileA])
        testJobB = Job(name="TestJobB", files=[testFileA, testFileB])
        testJobC = Job(name="TestJobC", files=[testFileC])
        bogusJob = Job(name="BogusJob",
                       files=[testFileA, testFileB, testFileC])
        testJobA.create(group=testJobGroup)
        testJobB.create(group=testJobGroup)
        testJobC.create(group=testJobGroup)
        bogusJob.create(group=bogusJobGroup)

        testJobA["outcome"] = "success"
        testJobB["outcome"] = "failure"
        testJobC["outcome"] = "success"
        testJobA.save()
        testJobB.save()
        testJobC.save()

        testJobA.completeInputFiles()

        compFiles = len(testSubscription.filesOfStatus("Completed"))
        assert compFiles == 0, \
            "Error: test sub has wrong number of complete files: %s" % compFiles

        testJobB["outcome"] = "success"
        testJobB.save()

        testJobB.completeInputFiles(skipFiles=[testFileB["lfn"]])

        availFiles = len(testSubscription.filesOfStatus("Available"))
        assert availFiles == 0, \
            "Error: test sub has wrong number of available files: %s" % availFiles

        acqFiles = len(testSubscription.filesOfStatus("Acquired"))
        assert acqFiles == 1, \
            "Error: test sub has wrong number of acquired files: %s" % acqFiles

        compFiles = len(testSubscription.filesOfStatus("Completed"))
        assert compFiles == 1, \
            "Error: test sub has wrong number of complete files: %s" % compFiles

        failFiles = len(testSubscription.filesOfStatus("Failed"))
        assert failFiles == 1, \
            "Error: test sub has wrong number of failed files: %s" % failFiles

        availFiles = len(bogusSubscription.filesOfStatus("Available"))
        assert availFiles == 0, \
            "Error: test sub has wrong number of available files: %s" % availFiles

        acqFiles = len(bogusSubscription.filesOfStatus("Acquired"))
        assert acqFiles == 3, \
            "Error: test sub has wrong number of acquired files: %s" % acqFiles

        compFiles = len(bogusSubscription.filesOfStatus("Completed"))
        assert compFiles == 0, \
            "Error: test sub has wrong number of complete files: %s" % compFiles

        failFiles = len(bogusSubscription.filesOfStatus("Failed"))
        assert failFiles == 0, \
            "Error: test sub has wrong number of failed files: %s" % failFiles

        return

    def testJobTypeDAO(self):
        """
        _testJobTypeDAO_

        Verify that the Jobs.GetType DAO returns the correct job type.  The
        job type is retrieved from the subscription type. When only a single
        job is passed.
        """
        testJob = self.createTestJob()

        jobTypeAction = self.daoFactory(classname="Jobs.GetType")
        jobType = jobTypeAction.execute(jobID=testJob["id"])

        assert jobType == "Merge", \
            "Error: GetJobType DAO returned the wrong job type."

        return

    def testJobTypeDAOBulk(self):
        """
        _testJobTypeDAOBulk_

        Verify that the Jobs.GetType DAO returns the correct job type.  The
        job type is retrieved from the subscription type. When a list of jobs
        ids is passed.
        """
        testJobA = self.createTestJob(subscriptionType="Merge")
        testJobB = self.createTestJob(subscriptionType="Processing")
        testJobC = self.createTestJob(subscriptionType="Production")
        testJobD = self.createTestJob(subscriptionType="Merge")
        testJobE = self.createTestJob(subscriptionType="Skim")

        jobIds = []
        jobIds.append(testJobA["id"])
        jobIds.append(testJobB["id"])
        jobIds.append(testJobC["id"])
        jobIds.append(testJobD["id"])
        jobIds.append(testJobE["id"])

        jobTypeAction = self.daoFactory(classname="Jobs.GetType")
        jobTypes = jobTypeAction.execute(jobID=jobIds)

        entryMap = {}
        for entry in jobTypes:
            entryMap[entry["id"]] = entry["type"]

        assert entryMap[testJobA["id"]] == "Merge", \
            "Error: GetJobType DAO returned the wrong job type."
        assert entryMap[testJobB["id"]] == "Processing", \
            "Error: GetJobType DAO returned the wrong job type."
        assert entryMap[testJobC["id"]] == "Production", \
            "Error: GetJobType DAO returned the wrong job type."
        assert entryMap[testJobD["id"]] == "Merge", \
            "Error: GetJobType DAO returned the wrong job type."
        assert entryMap[testJobE["id"]] == "Skim", \
            "Error: GetJobType DAO returned the wrong job type."

        return

    def testGetOutputMapDAO(self):
        """
        _testGetOutputMapDAO_

        Verify the proper behavior of the GetOutputMapDAO for a variety of
        different processing chains.
        """
        recoOutputFileset = Fileset(name="RECO")
        recoOutputFileset.create()
        mergedRecoOutputFileset = Fileset(name="MergedRECO")
        mergedRecoOutputFileset.create()
        alcaOutputFileset = Fileset(name="ALCA")
        alcaOutputFileset.create()
        mergedAlcaOutputFileset = Fileset(name="MergedALCA")
        mergedAlcaOutputFileset.create()
        dqmOutputFileset = Fileset(name="DQM")
        dqmOutputFileset.create()
        mergedDqmOutputFileset = Fileset(name="MergedDQM")
        mergedDqmOutputFileset.create()
        cleanupFileset = Fileset(name="Cleanup")
        cleanupFileset.create()

        testWorkflow = Workflow(spec="wf001.xml",
                                owner="Steve",
                                name="TestWF",
                                task="None")
        testWorkflow.create()
        testWorkflow.addOutput("output", recoOutputFileset,
                               mergedRecoOutputFileset)
        testWorkflow.addOutput("ALCARECOStreamCombined", alcaOutputFileset,
                               mergedAlcaOutputFileset)
        testWorkflow.addOutput("DQM", dqmOutputFileset, mergedDqmOutputFileset)
        testWorkflow.addOutput("output", cleanupFileset)
        testWorkflow.addOutput("ALCARECOStreamCombined", cleanupFileset)
        testWorkflow.addOutput("DQM", cleanupFileset)

        testRecoMergeWorkflow = Workflow(spec="wf002.xml",
                                         owner="Steve",
                                         name="TestRecoMergeWF",
                                         task="None")
        testRecoMergeWorkflow.create()
        testRecoMergeWorkflow.addOutput("anything", mergedRecoOutputFileset,
                                        mergedRecoOutputFileset)

        testRecoProcWorkflow = Workflow(spec="wf004.xml",
                                        owner="Steve",
                                        name="TestRecoProcWF",
                                        task="None")
        testRecoProcWorkflow.create()

        testAlcaChildWorkflow = Workflow(spec="wf003.xml",
                                         owner="Steve",
                                         name="TestAlcaChildWF",
                                         task="None")
        testAlcaChildWorkflow.create()

        inputFile = File(lfn="/path/to/some/lfn",
                         size=600000,
                         events=60000,
                         locations="cmssrm.fnal.gov")
        inputFile.create()

        testFileset = Fileset(name="TestFileset")
        testFileset.create()
        testFileset.addFile(inputFile)
        testFileset.commit()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=testWorkflow,
                                        split_algo="EventBased",
                                        type="Processing")

        testMergeRecoSubscription = Subscription(
            fileset=recoOutputFileset,
            workflow=testRecoMergeWorkflow,
            split_algo="WMBSMergeBySize",
            type="Merge")
        testProcRecoSubscription = Subscription(fileset=recoOutputFileset,
                                                workflow=testRecoProcWorkflow,
                                                split_algo="FileBased",
                                                type="Processing")

        testChildAlcaSubscription = Subscription(
            fileset=alcaOutputFileset,
            workflow=testAlcaChildWorkflow,
            split_algo="FileBased",
            type="Processing")
        testSubscription.create()
        testMergeRecoSubscription.create()
        testProcRecoSubscription.create()
        testChildAlcaSubscription.create()
        testSubscription.acquireFiles()

        testJobGroup = JobGroup(subscription=testSubscription)
        testJobGroup.create()

        testJob = Job(name="SplitJobA", files=[inputFile])
        testJob.create(group=testJobGroup)
        testJob["state"] = "complete"
        testJob.save()

        outputMapAction = self.daoFactory(classname="Jobs.GetOutputMap")
        outputMap = outputMapAction.execute(jobID=testJob["id"])

        assert len(outputMap.keys()) == 3, \
            "Error: Wrong number of outputs for primary workflow."

        goldenMap = {
            "output": (recoOutputFileset.id, mergedRecoOutputFileset.id),
            "ALCARECOStreamCombined":
            (alcaOutputFileset.id, mergedAlcaOutputFileset.id),
            "DQM": (dqmOutputFileset.id, mergedDqmOutputFileset.id)
        }

        for outputID in outputMap.keys():
            for outputFilesets in outputMap[outputID]:
                if outputFilesets["merged_output_fileset"] is None:
                    self.assertEqual(outputFilesets["output_fileset"],
                                     cleanupFileset.id,
                                     "Error: Cleanup fileset is wrong.")
                    continue

                self.assertTrue(outputID in goldenMap.keys(),
                                "Error: Output identifier is missing.")
                self.assertEqual(outputFilesets["output_fileset"],
                                 goldenMap[outputID][0],
                                 "Error: Output fileset is wrong.")
                self.assertEqual(outputFilesets["merged_output_fileset"],
                                 goldenMap[outputID][1],
                                 "Error: Merged output fileset is wrong.")
                del goldenMap[outputID]

        self.assertEqual(len(goldenMap.keys()), 0,
                         "Error: Missing output maps.")

        return

    def testLocations(self):
        """
        _testLocations_

        Test setting and getting locations using DAO objects
        """
        testJob = self.createTestJob()

        jobGetLocation = self.daoFactory(classname="Jobs.GetLocation")
        jobSetLocation = self.daoFactory(classname="Jobs.SetLocation")

        result = jobGetLocation.execute(jobid=testJob['id'])
        self.assertEqual(result, [['test.site.ch']])
        jobSetLocation.execute(jobid=testJob['id'], location="test2.site.ch")
        result = jobGetLocation.execute(jobid=testJob['id'])
        self.assertEqual(result, [['test2.site.ch']])

        testJob2 = self.createTestJob()
        testJob3 = self.createTestJob()

        binds = [{
            'jobid': testJob['id']
        }, {
            'jobid': testJob2['id']
        }, {
            'jobid': testJob3['id']
        }]
        result = jobGetLocation.execute(jobid=binds)
        self.assertEqual(result, [{
            'site_name': 'test2.site.ch',
            'id': 1
        }, {
            'site_name': 'test.site.ch',
            'id': 2
        }, {
            'site_name': 'test.site.ch',
            'id': 3
        }])

        return

    def testGetDataStructsJob(self):
        """
        _testGetDataStructsJob_

        Test the ability to 'cast' as a DataStructs job type
        """
        testJob = self.createTestJob()
        testJob['test'] = 'ThisIsATest'
        testJob.baggage.section_('TestSection')
        testJob.baggage.TestSection.test = 100
        finalJob = testJob.getDataStructsJob()

        for key in finalJob.keys():
            if key == 'input_files':
                for inputFile in testJob['input_files']:
                    self.assertEqual(
                        inputFile.returnDataStructsFile()
                        in finalJob['input_files'], True)
                continue
            self.assertEqual(testJob[key], finalJob[key])

        self.assertEqual(str(finalJob.__class__),
                         "<class 'WMCore.DataStructs.Job.Job'>")
        self.assertEqual(str(finalJob["mask"].__class__),
                         "<class 'WMCore.DataStructs.Mask.Mask'>")

        for key in testJob["mask"]:
            self.assertEqual(testJob["mask"][key], finalJob["mask"][key],
                             "Error: The masks should be the same")

        self.assertEqual(finalJob.baggage.TestSection.test, 100)
        return

    def testLoadOutputID(self):
        """
        _testLoadOutputID_

        Test whether we can load an output ID for a job
        """

        testWorkflow = Workflow(spec="spec.xml",
                                owner="Steve",
                                name="wf001",
                                task="Test")

        testWorkflow.create()

        testFileset = Fileset(name="TestFileset")
        testFileset.create()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=testWorkflow)

        testSubscription.create()

        testFileA = File(lfn=makeUUID(), locations="test.site.ch")
        testFileB = File(lfn=makeUUID(), locations="test.site.ch")
        testFileA.create()
        testFileB.create()

        testFileset.addFile([testFileA, testFileB])
        testFileset.commit()

        testSubscription.acquireFiles([testFileA, testFileB])

        testJobGroup = JobGroup(subscription=testSubscription)
        testJobGroup.create()

        testJob = Job()
        testJob.create(group=testJobGroup)

        self.assertEqual(testJob.loadOutputID(), testJobGroup.output.id)

        return

    def testLoadForTaskArchiver(self):
        """
        _testLoadForTaskArchiver_

        Tests the return of the DAO for the TaskArchiver
        """
        # Create 2 jobs
        jobA = self.createTestJob()
        jobB = self.createTestJob()

        # Put a mask in one
        mask = Mask()
        mask.addRunAndLumis(1, [45])
        mask.save(jobA['id'])

        # Execute the DAO
        taskArchiverDAO = self.daoFactory(classname="Jobs.LoadForTaskArchiver")
        jobs = taskArchiverDAO.execute([jobA['id'], jobB['id']])

        # Sort the jobs and check the results, we care about id, input files and mask
        jobs.sort(key=lambda x: x['id'])

        jobAprime = jobs[0]
        lfns = [x['lfn'] for x in jobAprime['input_files']]
        self.assertTrue('/this/is/a/lfnA' in lfns,
                        'Missing LFN lfnA from the input files')
        self.assertTrue('/this/is/a/lfnB' in lfns,
                        'Missing LFN lfnB from the input files')

        for inFile in jobAprime['input_files']:
            if inFile['lfn'] == '/this/is/a/lfnA':
                run = inFile['runs'].pop()
                self.assertEqual(run.run, 1, 'The run number is wrong')
                self.assertEqual(run.lumis, [45], 'The lumis are wrong')
            else:
                run = inFile['runs'].pop()
                self.assertEqual(run.run, 1, 'The run number is wrong')
                self.assertEqual(run.lumis, [46], 'The lumis are wrong')

        mask = jobAprime['mask']
        self.assertEqual(mask['runAndLumis'], {1: [[45, 45]]},
                         "Wrong run and lumis in mask")

        jobBprime = jobs[1]
        for inFile in jobBprime['input_files']:
            if inFile['lfn'] == '/this/is/a/lfnA':
                run = inFile['runs'].pop()
                self.assertEqual(run.run, 1, 'The run number is wrong')
                self.assertEqual(run.lumis, [45], 'The lumis are wrong')
            else:
                run = inFile['runs'].pop()
                self.assertEqual(run.run, 1, 'The run number is wrong')
                self.assertEqual(run.lumis, [46], 'The lumis are wrong')
        runs = []
        for inputFile in jobBprime['input_files']:
            runs.extend(inputFile.getRuns())
        self.assertEqual(jobBprime['mask'].filterRunLumisByMask(runs=runs),
                         runs, "Wrong mask in jobB")

        return

    def testMask(self):
        """
        _testMask_

        Test the new mask setup
        """

        testWorkflow = Workflow(spec="spec.xml",
                                owner="Steve",
                                name="wf001",
                                task="Test")

        testWorkflow.create()

        testFileset = Fileset(name="TestFileset")
        testFileset.create()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=testWorkflow)

        testSubscription.create()

        testFileA = File(lfn=makeUUID(), locations="test.site.ch")
        testFileB = File(lfn=makeUUID(), locations="test.site.ch")
        testFileA.create()
        testFileB.create()

        testFileset.addFile([testFileA, testFileB])
        testFileset.commit()

        testSubscription.acquireFiles([testFileA, testFileB])

        testJobGroup = JobGroup(subscription=testSubscription)
        testJobGroup.create()

        testJob = Job()
        testJob['mask'].addRunAndLumis(run=100, lumis=[101, 102])
        testJob['mask'].addRunAndLumis(run=200, lumis=[201, 202])
        testJob.create(group=testJobGroup)

        loadJob = Job(id=testJob.exists())
        loadJob.loadData()

        runs = loadJob['mask'].getRunAndLumis()
        self.assertEqual(len(runs), 2)
        self.assertEqual(runs[100], [[101, 102]])
        self.assertEqual(runs[200], [[201, 202]])

        bigRun = Run(100, *[101, 102, 103, 104])
        badRun = Run(300, *[1001, 1002])
        result = loadJob['mask'].filterRunLumisByMask([bigRun, badRun])

        self.assertEqual(len(result), 1)
        alteredRun = result.pop()
        self.assertEqual(alteredRun.run, 100)
        self.assertEqual(alteredRun.lumis, [101, 102])

        run0 = Run(300, *[1001, 1002])
        run1 = Run(300, *[1001, 1002])
        loadJob['mask'].filterRunLumisByMask([run0, run1])

        return

    def testAutoIncrementCheck(self):
        """
        _AutoIncrementCheck_

        Test and see whether we can find and set the auto_increment values
        """
        myThread = threading.currentThread()
        if not myThread.dialect.lower() == 'mysql':
            return

        testWorkflow = Workflow(spec="spec.xml",
                                owner="Steve",
                                name="wf001",
                                task="Test")

        testWorkflow.create()

        testFileset = Fileset(name="TestFileset")
        testFileset.create()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=testWorkflow)

        testSubscription.create()

        testFileA = File(lfn=makeUUID(), locations="test.site.ch")
        testFileB = File(lfn=makeUUID(), locations="test.site.ch")
        testFileA.create()
        testFileB.create()

        testFileset.addFile([testFileA, testFileB])
        testFileset.commit()

        testSubscription.acquireFiles([testFileA, testFileB])

        testJobGroup = JobGroup(subscription=testSubscription)
        testJobGroup.create()

        incrementDAO = self.daoFactory(classname="Jobs.AutoIncrementCheck")
        incrementDAO.execute()

        testJob = Job()
        testJob.create(group=testJobGroup)
        self.assertEqual(testJob.exists(), 1)

        incrementDAO.execute()

        testJob = Job()
        testJob.create(group=testJobGroup)
        self.assertEqual(testJob.exists(), 2)

        incrementDAO.execute(input=10)

        testJob = Job()
        testJob.create(group=testJobGroup)
        self.assertEqual(testJob.exists(), 11)

        incrementDAO.execute(input=5)

        testJob = Job()
        testJob.create(group=testJobGroup)
        self.assertEqual(testJob.exists(), 12)

        return
示例#21
0
class RepackTest(unittest.TestCase):
    """
    _RepackTest_

    Test for Repack job splitter
    """
    def setUp(self):
        """
        _setUp_

        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules=["T0.WMBS"])

        self.splitterFactory = SplitterFactory(package="T0.JobSplitting")

        myThread = threading.currentThread()
        daoFactory = DAOFactory(package="T0.WMBS",
                                logger=logging,
                                dbinterface=myThread.dbi)

        myThread.dbi.processData("""INSERT INTO wmbs_location
                                    (id, site_name, state)
                                    VALUES (1, 'SomeSite', 1)
                                    """,
                                 transaction=False)
        myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
                                    (location, pnn)
                                    VALUES (1, 'SomePNN')
                                    """,
                                 transaction=False)

        myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
                                    (location, pnn)
                                    VALUES (1, 'SomePNN2')
                                    """,
                                 transaction=False)

        insertRunDAO = daoFactory(classname="RunConfig.InsertRun")
        insertRunDAO.execute(binds={
            'RUN': 1,
            'TIME': int(time.time()),
            'HLTKEY': "someHLTKey"
        },
                             transaction=False)

        insertLumiDAO = daoFactory(classname="RunConfig.InsertLumiSection")
        for lumi in [1, 2, 3, 4]:
            insertLumiDAO.execute(binds={
                'RUN': 1,
                'LUMI': lumi
            },
                                  transaction=False)

        insertStreamDAO = daoFactory(classname="RunConfig.InsertStream")
        insertStreamDAO.execute(binds={'STREAM': "A"}, transaction=False)

        insertStreamFilesetDAO = daoFactory(
            classname="RunConfig.InsertStreamFileset")
        insertStreamFilesetDAO.execute(1, "A", "TestFileset1")

        self.fileset1 = Fileset(name="TestFileset1")
        self.fileset1.load()

        workflow1 = Workflow(spec="spec.xml",
                             owner="hufnagel",
                             name="TestWorkflow1",
                             task="Test")
        workflow1.create()

        self.subscription1 = Subscription(fileset=self.fileset1,
                                          workflow=workflow1,
                                          split_algo="Repack",
                                          type="Repack")
        self.subscription1.create()

        # keep for later
        self.insertClosedLumiDAO = daoFactory(
            classname="RunLumiCloseout.InsertClosedLumi")
        self.currentTime = int(time.time())

        # default split parameters
        self.splitArgs = {}
        self.splitArgs['maxSizeSingleLumi'] = 20 * 1024 * 1024 * 1024
        self.splitArgs['maxSizeMultiLumi'] = 10 * 1024 * 1024 * 1024
        self.splitArgs['maxInputEvents'] = 500000
        self.splitArgs['maxInputFiles'] = 1000

        return

    def tearDown(self):
        """
        _tearDown_

        """
        self.testInit.clearDatabase()

        return

    def getNumActiveSplitLumis(self):
        """
        _getNumActiveSplitLumis_

        helper function that counts the number of active split lumis
        """
        myThread = threading.currentThread()

        results = myThread.dbi.processData("""SELECT COUNT(*)
                                              FROM lumi_section_split_active
                                              """,
                                           transaction=False)[0].fetchall()

        return results[0][0]

    def test00(self):
        """
        _test00_

        Test that the job name prefix feature works
        Test multi lumi size threshold
        Multi lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2, 3, 4]:
            filecount = 2
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)

        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        mySplitArgs['maxSizeMultiLumi'] = self.splitArgs['maxSizeMultiLumi']
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxSizeMultiLumi'] = 5000
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertTrue(job['name'].startswith("Repack-"),
                        "ERROR: Job has wrong name")

        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.fileset1.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertTrue(job['name'].startswith("Repack-"),
                        "ERROR: Job has wrong name")

        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.assertEqual(self.getNumActiveSplitLumis(), 0,
                         "ERROR: Split lumis were created")

        return

    def test01(self):
        """
        _test01_

        Test multi lumi event threshold
        Multi lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1, 2, 3, 4]:
            filecount = 2
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputEvents'] = 500
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.fileset1.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.assertEqual(self.getNumActiveSplitLumis(), 0,
                         "ERROR: Split lumis were created")

        return

    def test02(self):
        """
        _test02_

        Test single lumi size threshold
        Single lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1]:
            filecount = 8
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxSizeSingleLumi'] = 6500
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 6,
                         "ERROR: Job does not process 6 files")

        job = jobGroups[0].jobs[1]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        self.assertEqual(self.getNumActiveSplitLumis(), 1,
                         "ERROR: Split lumis were not created")

        return

    def test03(self):
        """
        _test03_

        Test single lumi event threshold
        Single lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1]:
            filecount = 8
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputEvents'] = 650
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 6,
                         "ERROR: Job does not process 6 files")

        job = jobGroups[0].jobs[1]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        self.assertEqual(self.getNumActiveSplitLumis(), 1,
                         "ERROR: Split lumis were not created")

        return

    def test04(self):
        """
        _test04_

        Test streamer count threshold (only multi lumi)
        Multi lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1, 2, 3, 4]:
            filecount = 2
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputFiles'] = 5
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.fileset1.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.assertEqual(self.getNumActiveSplitLumis(), 0,
                         "ERROR: Split lumis were created")

        return

    def test05(self):
        """
        _test05_

        Test repacking of multiple lumis with holes in the lumi sequence
        Multi lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1, 2, 4]:
            filecount = 2
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        mySplitArgs['maxInputFiles'] = 5
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        self.insertClosedLumiDAO.execute(binds={
            'RUN': 1,
            'LUMI': 3,
            'STREAM': "A",
            'FILECOUNT': 0,
            'INSERT_TIME': self.currentTime,
            'CLOSE_TIME': self.currentTime
        },
                                         transaction=False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4,
                         "ERROR: first job does not process 4 files")

        return

    def test06(self):
        """
        _test06_

        Test repacking of 3 lumis
        2 small lumis (single job), followed by a big one (multiple jobs)

        files for lumi 1 and 2 are below multi-lumi thresholds
        files for lumi 3 are above single-lumi threshold

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1, 2, 3]:
            filecount = 2
            for i in range(filecount):
                if lumi == 3:
                    nevents = 500
                else:
                    nevents = 100
                newFile = File(makeUUID(), size=1000, events=nevents)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        mySplitArgs['maxInputEvents'] = 900
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 3,
                         "ERROR: JobFactory didn't create three jobs")

        self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4,
                         "ERROR: first job does not process 4 files")

        self.assertEqual(len(jobGroups[0].jobs[1].getFiles()), 1,
                         "ERROR: second job does not process 1 file")

        self.assertEqual(len(jobGroups[0].jobs[2].getFiles()), 1,
                         "ERROR: third job does not process 1 file")

        return
示例#22
0
class HeartbeatTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        Heartbeat tables.  Also add some dummy locations.
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()  # logLevel = logging.SQLDEBUG
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.Agent.Database"],
                                useDefault=False)
        self.heartbeat = HeartbeatAPI("testComponent")

    def tearDown(self):
        """
        _tearDown_

        Drop all the Heartbeat tables.
        """

        self.testInit.clearDatabase()

    def testHeartbeat(self):
        testComponent = HeartbeatAPI("testComponent")
        testComponent.registerComponent()
        self.assertEqual(testComponent.getHeartbeatInfo(), [])

        testComponent.updateWorkerHeartbeat("testWorker")
        result = testComponent.getHeartbeatInfo()
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['worker_name'], "testWorker")
        time.sleep(1)

        testComponent.updateWorkerHeartbeat("testWorker2")
        result = testComponent.getHeartbeatInfo()
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['worker_name'], "testWorker2")

        time.sleep(1)
        testComponent.updateWorkerHeartbeat("testWorker")
        result = testComponent.getHeartbeatInfo()
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['worker_name'], "testWorker")

        testComponent = HeartbeatAPI("test2Component")
        testComponent.registerComponent()
        time.sleep(1)
        testComponent.updateWorkerHeartbeat("test2Worker")

        result = testComponent.getHeartbeatInfo()
        self.assertEqual(len(result), 2)
        self.assertEqual(result[0]['worker_name'], "testWorker")
        self.assertEqual(result[1]['worker_name'], "test2Worker")

        time.sleep(1)
        testComponent.updateWorkerHeartbeat("test2Worker2")
        result = testComponent.getHeartbeatInfo()
        self.assertEqual(len(result), 2)
        self.assertEqual(result[0]['worker_name'], "testWorker")
        self.assertEqual(result[1]['worker_name'], "test2Worker2")

        time.sleep(1)
        testComponent.updateWorkerHeartbeat("test2Worker")
        result = testComponent.getHeartbeatInfo()
        self.assertEqual(len(result), 2)
        self.assertEqual(result[0]['worker_name'], "testWorker")
        self.assertEqual(result[1]['worker_name'], "test2Worker")

        testComponent.updateWorkerError("test2Worker", "Error1")
        result = testComponent.getHeartbeatInfo()
        self.assertEqual(result[1]['error_message'], "Error1")
示例#23
0
class FileBasedTest(unittest.TestCase):
    """
    _FileBasedTest_

    Test file based job splitting.
    """
    def setUp(self):
        """
        _setUp_

        Create two subscriptions: One that contains a single file and one that
        contains multiple files.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMCore.WMBS",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)

        self.nSites = 2
        locationAction = daofactory(classname="Locations.New")
        for site in range(self.nSites):
            locationAction.execute(siteName="site%i" % site,
                                   pnn="T2_CH_CERN_%i" % site)

        return

    def tearDown(self):
        """
        _tearDown_

        Clear out WMBS.
        """
        self.testInit.clearDatabase(modules=["WMCore.WMBS"])

        return

    def createTestSubscription(self, nFiles, nSites=1, closeFileset=False):
        """
        _createTestSubscription_

        Create a set of test subscriptions for testing purposes.
        """

        if nSites > self.nSites:
            nSites = self.nSites

        testFileset = Fileset(name="TestFileset")
        testFileset.create()

        # Create a testWorkflow
        testWorkflow = Workflow(spec="spec.xml",
                                owner="Steve",
                                name="wf001",
                                task="Test")
        testWorkflow.create()

        # Create the files for each site
        for s in range(nSites):
            for i in range(nFiles):
                newFile = File(makeUUID(),
                               size=1024,
                               events=100,
                               locations=set(["T2_CH_CERN_%i" % s]))
                newFile.create()
                testFileset.addFile(newFile)
        testFileset.commit()

        testSubscription = Subscription(fileset=testFileset,
                                        workflow=testWorkflow,
                                        split_algo="MinFileBased",
                                        type="Processing")
        testSubscription.create()

        # Close the fileset
        if closeFileset:
            testFileset.markOpen(isOpen=False)

        return testSubscription

    def testA_ExactFiles(self):
        """
        _testExactFiles_

        Test file based job splitting when the number of files per job is
        exactly the same as the number of files in the input fileset.
        """
        nFiles = 5
        sub = self.createTestSubscription(nFiles=nFiles)
        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS", subscription=sub)

        jobGroups = jobFactory(files_per_job=nFiles)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 1)
        self.assertEqual(len(jobGroups[0].jobs[0]['input_files']), nFiles)
        return

    def testB_LessFilesOpen(self):
        """
        _LessFilesOpen_

        Test with less files then required.
        If the fileset is open, this should produce no jobs.
        """

        sub = self.createTestSubscription(nFiles=5)
        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS", subscription=sub)

        jobGroups = jobFactory(files_per_job=10)

        self.assertEqual(len(jobGroups), 0)
        return

    def testC_LessFilesClosed(self):
        """
        _LessFilesClosed_

        Test with less files then required.
        If the fileset is closed, this should produce one job.
        """

        sub = self.createTestSubscription(nFiles=5, closeFileset=True)
        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS", subscription=sub)

        jobGroups = jobFactory(files_per_job=10)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 1)
        self.assertEqual(len(jobGroups[0].jobs[0]['input_files']), 5)
        return

    def testD_MoreFilesOpen(self):
        """
        _MoreFilesOpen_

        If you pass it more files then files_per_job, it should produce
        jobs until it hits the limit, then stop.
        """

        sub = self.createTestSubscription(nFiles=10)
        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS", subscription=sub)

        jobGroups = jobFactory(files_per_job=3)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 3)
        for job in jobGroups[0].jobs:
            self.assertEqual(len(job['input_files']), 3)
        return

    def testE_MoreFilesClosed(self):
        """
        _MoreFilesClosed_

        If you pass it more files then files_per_job, it should produce
        jobs enough to hold all the files in the fileset if the
        fileset is closed
        """

        sub = self.createTestSubscription(nFiles=10, closeFileset=True)
        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS", subscription=sub)

        jobGroups = jobFactory(files_per_job=3)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 4)
        for job in jobGroups[0].jobs[:3]:
            self.assertEqual(len(job['input_files']), 3)
        self.assertEqual(len(jobGroups[0].jobs[3]['input_files']), 1)
        return
示例#24
0
class ResourceControlTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        Install schema and create a DAO factory for WMBS.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=[
            "WMCore.WMBS", "WMCore.ResourceControl", "WMCore.BossAir"
        ],
                                useDefault=False)

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMCore.WMBS",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)

        self.baDaoFactory = DAOFactory(package="WMCore.BossAir",
                                       logger=myThread.logger,
                                       dbinterface=myThread.dbi)

        self.insertRunJob = self.baDaoFactory(classname="NewJobs")
        self.insertState = self.baDaoFactory(classname="NewState")
        states = ['PEND', 'RUN', 'Idle', 'Running']
        self.insertState.execute(states)

        self.tempDir = self.testInit.generateWorkDir()
        return

    def tearDown(self):
        """
        _tearDown_

        Clear the schema.
        """
        self.testInit.clearDatabase()
        return

    def testInsert(self):
        """
        _testInsert_

        Verify that inserting sites and thresholds works correctly, even if the
        site or threshold already exists.
        """
        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1")
        myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1")
        myResourceControl.insertSite("testSite2", 100, 200, "testSE2",
                                     "testCE2")

        myResourceControl.insertThreshold("testSite1", "Processing", 20, 10)
        myResourceControl.insertThreshold("testSite1", "Merge", 200, 100)
        myResourceControl.insertThreshold("testSite1", "Merge", 250, 150)
        myResourceControl.insertThreshold("testSite2", "Processing", 50, 30)
        myResourceControl.insertThreshold("testSite2", "Merge", 135, 100)

        createThresholds = myResourceControl.listThresholdsForCreate()

        self.assertEqual(len(createThresholds.keys()), 2,
                         "Error: Wrong number of site in Resource Control DB")

        self.assertTrue("testSite1" in createThresholds.keys(),
                        "Error: Test Site 1 missing from thresholds.")

        self.assertTrue("testSite2" in createThresholds.keys(),
                        "Error: Test Site 2 missing from thresholds.")

        self.assertEqual(createThresholds["testSite1"]["total_slots"], 10,
                         "Error: Wrong number of total slots.")

        self.assertEqual(
            createThresholds["testSite1"]["pending_jobs"], 0,
            "Error: Wrong number of running jobs: %s" %
            createThresholds["testSite1"]["pending_jobs"])

        self.assertEqual(createThresholds["testSite2"]["total_slots"], 100,
                         "Error: Wrong number of total slots.")

        self.assertEqual(createThresholds["testSite2"]["pending_jobs"], 0,
                         "Error: Wrong number of running jobs.")

        thresholds = myResourceControl.listThresholdsForSubmit()

        self.assertEqual(
            len(thresholds.keys()), 2,
            "Error: Wrong number of sites in Resource Control DB")

        self.assertTrue("testSite1" in thresholds.keys(),
                        "Error: testSite1 missing from thresholds.")

        self.assertTrue("testSite2" in thresholds.keys(),
                        "Error: testSite2 missing from thresholds.")

        site1Info = thresholds["testSite1"]
        site2Info = thresholds["testSite2"]
        site1Thresholds = site1Info["thresholds"]
        site2Thresholds = site2Info["thresholds"]

        procThreshold1 = None
        procThreshold2 = None
        mergeThreshold1 = None
        mergeThreshold2 = None
        for threshold in site1Thresholds:
            if threshold["task_type"] == "Merge":
                mergeThreshold1 = threshold
            elif threshold["task_type"] == "Processing":
                procThreshold1 = threshold
        for threshold in site2Thresholds:
            if threshold["task_type"] == "Merge":
                mergeThreshold2 = threshold
            elif threshold["task_type"] == "Processing":
                procThreshold2 = threshold

        self.assertEqual(len(site1Thresholds), 2,
                         "Error: Wrong number of task types.")

        self.assertEqual(len(site2Thresholds), 2,
                         "Error: Wrong number of task types.")

        self.assertNotEqual(procThreshold1, None)
        self.assertNotEqual(procThreshold2, None)
        self.assertNotEqual(mergeThreshold1, None)
        self.assertNotEqual(mergeThreshold2, None)

        self.assertEqual(site1Info["total_pending_slots"], 10,
                         "Error: Site thresholds wrong")

        self.assertEqual(site1Info["total_running_slots"], 20,
                         "Error: Site thresholds wrong")

        self.assertEqual(site1Info["total_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(site1Info["total_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold1["task_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold1["task_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold1["max_slots"], 20,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold1["pending_slots"], 10,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold1["task_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold1["task_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold1["max_slots"], 250,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold1["pending_slots"], 150,
                         "Error: Site thresholds wrong")

        self.assertEqual(site2Info["total_pending_slots"], 100,
                         "Error: Site thresholds wrong")

        self.assertEqual(site2Info["total_running_slots"], 200,
                         "Error: Site thresholds wrong")

        self.assertEqual(site2Info["total_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(site2Info["total_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold2["task_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold2["task_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold2["max_slots"], 50,
                         "Error: Site thresholds wrong")

        self.assertEqual(procThreshold2["pending_slots"], 30,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold2["task_running_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold2["task_pending_jobs"], 0,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold2["max_slots"], 135,
                         "Error: Site thresholds wrong")

        self.assertEqual(mergeThreshold2["pending_slots"], 100,
                         "Error: Site thresholds wrong")

    def testList(self):
        """
        _testList_

        Test the functions that list thresholds for creating jobs and submitting
        jobs.
        """
        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1",
                                     "T1_US_FNAL", "LsfPlugin")
        myResourceControl.insertSite("testSite2", 20, 40, "testSE2", "testCE2")

        myResourceControl.insertThreshold("testSite1", "Processing", 20, 10)
        myResourceControl.insertThreshold("testSite1", "Merge", 200, 100)
        myResourceControl.insertThreshold("testSite2", "Processing", 50, 25)
        myResourceControl.insertThreshold("testSite2", "Merge", 135, 65)

        testWorkflow = Workflow(spec=makeUUID(),
                                owner="Steve",
                                name=makeUUID(),
                                task="Test")
        testWorkflow.create()

        testFilesetA = Fileset(name="TestFilesetA")
        testFilesetA.create()
        testFilesetB = Fileset(name="TestFilesetB")
        testFilesetB.create()
        testFilesetC = Fileset(name="TestFilesetC")
        testFilesetC.create()

        testFileA = File(lfn="testFileA",
                         locations=set(["testSE1", "testSE2"]))
        testFileA.create()
        testFilesetA.addFile(testFileA)
        testFilesetA.commit()
        testFilesetB.addFile(testFileA)
        testFilesetB.commit()
        testFilesetC.addFile(testFileA)
        testFilesetC.commit()

        testSubscriptionA = Subscription(fileset=testFilesetA,
                                         workflow=testWorkflow,
                                         type="Processing")
        testSubscriptionA.create()
        testSubscriptionA.addWhiteBlackList([{
            "site_name": "testSite1",
            "valid": True
        }])
        testSubscriptionB = Subscription(fileset=testFilesetB,
                                         workflow=testWorkflow,
                                         type="Processing")
        testSubscriptionB.create()
        testSubscriptionB.addWhiteBlackList([{
            "site_name": "testSite1",
            "valid": False
        }])
        testSubscriptionC = Subscription(fileset=testFilesetC,
                                         workflow=testWorkflow,
                                         type="Merge")
        testSubscriptionC.create()

        testJobGroupA = JobGroup(subscription=testSubscriptionA)
        testJobGroupA.create()
        testJobGroupB = JobGroup(subscription=testSubscriptionB)
        testJobGroupB.create()
        testJobGroupC = JobGroup(subscription=testSubscriptionC)
        testJobGroupC.create()

        # Site1, Has been assigned a location and is complete.
        testJobA = Job(name="testJobA", files=[testFileA])
        testJobA["couch_record"] = makeUUID()
        testJobA.create(group=testJobGroupA)
        testJobA["state"] = "success"

        # Site 1, Has been assigned a location and is incomplete.
        testJobB = Job(name="testJobB", files=[testFileA])
        testJobB["couch_record"] = makeUUID()
        testJobB.create(group=testJobGroupA)
        testJobB["state"] = "executing"
        runJobB = RunJob()
        runJobB.buildFromJob(testJobB)
        runJobB["status"] = "PEND"

        # Does not have a location, white listed to site 1
        testJobC = Job(name="testJobC", files=[testFileA])
        testJobC["couch_record"] = makeUUID()
        testJobC.create(group=testJobGroupA)
        testJobC["state"] = "new"

        # Site 2, Has been assigned a location and is complete.
        testJobD = Job(name="testJobD", files=[testFileA])
        testJobD["couch_record"] = makeUUID()
        testJobD.create(group=testJobGroupB)
        testJobD["state"] = "success"

        # Site 2, Has been assigned a location and is incomplete.
        testJobE = Job(name="testJobE", files=[testFileA])
        testJobE["couch_record"] = makeUUID()
        testJobE.create(group=testJobGroupB)
        testJobE["state"] = "executing"
        runJobE = RunJob()
        runJobE.buildFromJob(testJobE)
        runJobE["status"] = "RUN"

        # Does not have a location, site 1 is blacklisted.
        testJobF = Job(name="testJobF", files=[testFileA])
        testJobF["couch_record"] = makeUUID()
        testJobF.create(group=testJobGroupB)
        testJobF["state"] = "new"

        # Site 3, Has been assigned a location and is complete.
        testJobG = Job(name="testJobG", files=[testFileA])
        testJobG["couch_record"] = makeUUID()
        testJobG.create(group=testJobGroupC)
        testJobG["state"] = "cleanout"

        # Site 3, Has been assigned a location and is incomplete.
        testJobH = Job(name="testJobH", files=[testFileA])
        testJobH["couch_record"] = makeUUID()
        testJobH.create(group=testJobGroupC)
        testJobH["state"] = "new"

        # Site 3, Does not have a location.
        testJobI = Job(name="testJobI", files=[testFileA])
        testJobI["couch_record"] = makeUUID()
        testJobI.create(group=testJobGroupC)
        testJobI["state"] = "new"

        # Site 3, Does not have a location and is in cleanout.
        testJobJ = Job(name="testJobJ", files=[testFileA])
        testJobJ["couch_record"] = makeUUID()
        testJobJ.create(group=testJobGroupC)
        testJobJ["state"] = "cleanout"

        changeStateAction = self.daoFactory(classname="Jobs.ChangeState")
        changeStateAction.execute(jobs=[
            testJobA, testJobB, testJobC, testJobD, testJobE, testJobF,
            testJobG, testJobH, testJobI, testJobJ
        ])

        self.insertRunJob.execute([runJobB, runJobE])

        setLocationAction = self.daoFactory(classname="Jobs.SetLocation")
        setLocationAction.execute(testJobA["id"], "testSite1")
        setLocationAction.execute(testJobB["id"], "testSite1")
        setLocationAction.execute(testJobD["id"], "testSite1")
        setLocationAction.execute(testJobE["id"], "testSite1")
        setLocationAction.execute(testJobG["id"], "testSite1")
        setLocationAction.execute(testJobH["id"], "testSite1")

        createThresholds = myResourceControl.listThresholdsForCreate()
        submitThresholds = myResourceControl.listThresholdsForSubmit()

        self.assertEqual(len(createThresholds.keys()), 2,
                         "Error: Wrong number of sites in create thresholds")

        self.assertEqual(createThresholds["testSite1"]["total_slots"], 10,
                         "Error: Wrong number of slots for site 1")

        self.assertEqual(createThresholds["testSite2"]["total_slots"], 20,
                         "Error: Wrong number of slots for site 2")

        # We should have two running jobs with locations at site one,
        # two running jobs without locations at site two, and one running
        # job without a location at site one and two.
        self.assertEqual(createThresholds["testSite1"]["pending_jobs"], 4,
                         "Error: Wrong number of pending jobs for site 1")

        # We should have one running job with a location at site 2 and
        # another running job without a location.
        self.assertEqual(createThresholds["testSite2"]["pending_jobs"], 2,
                         "Error: Wrong number of pending jobs for site 2")

        # We should also have a phedex_name
        self.assertEqual(createThresholds["testSite1"]["cms_name"],
                         "T1_US_FNAL")
        self.assertEqual(createThresholds["testSite2"]["cms_name"], None)

        mergeThreshold1 = None
        mergeThreshold2 = None
        procThreshold1 = None
        procThreshold2 = None
        self.assertEqual(submitThresholds["testSite1"]['cms_name'],
                         'T1_US_FNAL')
        for threshold in submitThresholds["testSite1"]["thresholds"]:
            if threshold['task_type'] == "Merge":
                mergeThreshold1 = threshold
            elif threshold['task_type'] == "Processing":
                procThreshold1 = threshold
        self.assertEqual(submitThresholds["testSite2"]['cms_name'], None)
        for threshold in submitThresholds["testSite2"]["thresholds"]:
            if threshold['task_type'] == "Merge":
                mergeThreshold2 = threshold
            elif threshold['task_type'] == "Processing":
                procThreshold2 = threshold

        self.assertEqual(
            submitThresholds["testSite1"]["total_running_jobs"], 1,
            "Error: Wrong number of running jobs for submit thresholds.")
        self.assertEqual(
            submitThresholds["testSite2"]["total_running_jobs"], 0,
            "Error: Wrong number of running jobs for submit thresholds.")
        self.assertEqual(
            submitThresholds["testSite1"]["total_pending_jobs"], 1,
            "Error: Wrong number of pending jobs for submit thresholds.")
        self.assertEqual(
            submitThresholds["testSite2"]["total_pending_jobs"], 0,
            "Error: Wrong number of pending jobs for submit thresholds.")

        self.assertEqual(
            mergeThreshold1["task_running_jobs"], 0,
            "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(
            mergeThreshold1["task_pending_jobs"], 0,
            "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(
            procThreshold1["task_running_jobs"], 1,
            "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(
            procThreshold1["task_pending_jobs"], 1,
            "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(
            mergeThreshold2["task_running_jobs"], 0,
            "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(
            mergeThreshold2["task_pending_jobs"], 0,
            "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(
            procThreshold2["task_running_jobs"], 0,
            "Error: Wrong number of task running jobs for submit thresholds.")
        self.assertEqual(
            procThreshold2["task_pending_jobs"], 0,
            "Error: Wrong number of task running jobs for submit thresholds.")

        return

    def testListSiteInfo(self):
        """
        _testListSiteInfo_

        Verify that the listSiteInfo() methods works properly.
        """
        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1")
        myResourceControl.insertSite("testSite2", 100, 200, "testSE2",
                                     "testCE2")

        siteInfo = myResourceControl.listSiteInfo("testSite1")

        self.assertEqual(siteInfo["site_name"], "testSite1",
                         "Error: Site name is wrong.")

        self.assertEqual(siteInfo["se_name"], ["testSE1"],
                         "Error: SE name is wrong.")

        self.assertEqual(siteInfo["ce_name"], "testCE1",
                         "Error: CE name is wrong.")

        self.assertEqual(siteInfo["pending_slots"], 10,
                         "Error: Pending slots is wrong.")

        self.assertEqual(siteInfo["running_slots"], 20,
                         "Error: Pending slots is wrong.")

        return

    def testUpdateJobSlots(self):
        """
        _testUpdateJobSlots_

        Verify that it is possible to update the number of job slots at a site.
        """
        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 10, 20, "testSE1", "testCE1")

        siteInfo = myResourceControl.listSiteInfo("testSite1")

        self.assertEqual(siteInfo["pending_slots"], 10,
                         "Error: Pending slots is wrong.")
        self.assertEqual(siteInfo["running_slots"], 20,
                         "Error: Running slots is wrong.")

        myResourceControl.setJobSlotsForSite("testSite1", pendingJobSlots=20)

        siteInfo = myResourceControl.listSiteInfo("testSite1")

        self.assertEqual(siteInfo["pending_slots"], 20,
                         "Error: Pending slots is wrong.")

        myResourceControl.setJobSlotsForSite("testSite1", runningJobSlots=40)

        siteInfo = myResourceControl.listSiteInfo("testSite1")

        self.assertEqual(siteInfo["running_slots"], 40,
                         "Error: Running slots is wrong.")

        myResourceControl.setJobSlotsForSite("testSite1", 5, 10)

        siteInfo = myResourceControl.listSiteInfo("testSite1")

        self.assertEqual(siteInfo["pending_slots"], 5,
                         "Error: Pending slots is wrong.")
        self.assertEqual(siteInfo["running_slots"], 10,
                         "Error: Running slots is wrong.")

        return

    def testThresholdsForSite(self):
        """
        _testThresholdsForSite_

        Check that we can get the thresholds in intelligible form
        for each site
        """

        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 20, 40, "testSE1", "testCE1")
        myResourceControl.insertThreshold("testSite1", "Processing", 10, 8)
        myResourceControl.insertThreshold("testSite1", "Merge", 5, 3)

        result = myResourceControl.thresholdBySite(siteName="testSite1")
        procInfo = {}
        mergInfo = {}
        for res in result:
            if res['task_type'] == 'Processing':
                procInfo = res
            elif res['task_type'] == 'Merge':
                mergInfo = res
        self.assertEqual(procInfo.get('pending_slots', None), 20)
        self.assertEqual(procInfo.get('running_slots', None), 40)
        self.assertEqual(procInfo.get('max_slots', None), 10)
        self.assertEqual(procInfo.get('task_pending_slots', None), 8)
        self.assertEqual(mergInfo.get('pending_slots', None), 20)
        self.assertEqual(mergInfo.get('running_slots', None), 40)
        self.assertEqual(mergInfo.get('max_slots', None), 5)
        self.assertEqual(mergInfo.get('task_pending_slots', None), 3)

        return

    def testThresholdPriority(self):
        """
        _testThresholdPriority_

        Test that we get things back in priority order
        """

        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 20, 40, "testSE1", "testCE1")
        myResourceControl.insertThreshold("testSite1",
                                          "Processing",
                                          10,
                                          8,
                                          priority=1)
        myResourceControl.insertThreshold("testSite1",
                                          "Merge",
                                          5,
                                          3,
                                          priority=2)

        result = myResourceControl.listThresholdsForSubmit()

        self.assertEqual(result['testSite1']['thresholds'][0]['task_type'],
                         'Merge')
        self.assertEqual(result['testSite1']['thresholds'][1]['task_type'],
                         'Processing')

        myResourceControl.insertThreshold("testSite1",
                                          "Processing",
                                          10,
                                          8,
                                          priority=2)
        myResourceControl.insertThreshold("testSite1",
                                          "Merge",
                                          5,
                                          3,
                                          priority=1)

        # Should now be in reverse order
        result = myResourceControl.listThresholdsForSubmit()
        self.assertEqual(result['testSite1']['thresholds'][1]['task_type'],
                         'Merge')
        self.assertEqual(result['testSite1']['thresholds'][0]['task_type'],
                         'Processing')

        myResourceControl.insertSite("testSite2", 20, 40, "testSE2", "testCE2")
        myResourceControl.insertThreshold("testSite2",
                                          "Processing",
                                          10,
                                          8,
                                          priority=1)
        myResourceControl.insertThreshold("testSite2",
                                          "Merge",
                                          5,
                                          3,
                                          priority=2)

        # Should be in proper order for site 2
        result = myResourceControl.listThresholdsForSubmit()
        self.assertEqual(result['testSite2']['thresholds'][0]['task_type'],
                         'Merge')
        self.assertEqual(result['testSite2']['thresholds'][1]['task_type'],
                         'Processing')

        # Should now be in reverse order for site 1
        self.assertEqual(result['testSite1']['thresholds'][1]['task_type'],
                         'Merge')
        self.assertEqual(result['testSite1']['thresholds'][0]['task_type'],
                         'Processing')

        myResourceControl.insertThreshold("testSite2", "Merge", 20, 10)
        result = myResourceControl.listThresholdsForSubmit()
        self.assertEqual(result['testSite2']['thresholds'][0]['priority'], 2)

        return

    def testChangeState(self):
        """
        _testChangeState_

        Check that we can change the state between different values and
        retrieve it through the threshold methods
        """
        myResourceControl = ResourceControl()
        myResourceControl.insertSite("testSite1", 20, 40, "testSE1", "testCE1")
        myResourceControl.insertThreshold("testSite1",
                                          "Processing",
                                          10,
                                          5,
                                          priority=1)

        result = myResourceControl.listThresholdsForCreate()
        self.assertEqual(result['testSite1']['state'], 'Normal',
                         'Error: Wrong site state')

        myResourceControl.changeSiteState("testSite1", "Down")
        result = myResourceControl.listThresholdsForCreate()
        self.assertEqual(result['testSite1']['state'], 'Down',
                         'Error: Wrong site state')

    def createConfig(self):
        """
        _createConfig_

        Create a config and save it to the temp dir.  Set the WMAGENT_CONFIG
        environment variable so the config gets picked up.
        """
        config = Configuration()
        config.section_("General")
        config.General.workDir = os.getenv("TESTDIR", os.getcwd())
        config.section_("Agent")
        config.Agent.componentName = "resource_control_t"
        config.section_("CoreDatabase")
        config.CoreDatabase.connectUrl = os.getenv("DATABASE")
        config.CoreDatabase.socket = os.getenv("DBSOCK")

        configHandle = open(os.path.join(self.tempDir, "config.py"), "w")
        configHandle.write(str(config))
        configHandle.close()

        os.environ["WMAGENT_CONFIG"] = os.path.join(self.tempDir, "config.py")
        return

    def testInsertAllSEs(self):
        """
        _testInsertAllSEs_

        Test to see if we can insert all SEs and Thresholds at once
        Depending on the WMCore.Services.SiteDB interface
        """
        self.createConfig()

        resControlPath = os.path.join(WMCore.WMBase.getTestBase(),
                                      "../../bin/wmagent-resource-control")
        env = os.environ
        env['PYTHONPATH'] = ":".join(sys.path)
        cmdline = [
            resControlPath, "--add-all-sites", "--plugin=CondorPlugin",
            "--pending-slots=100", "--running-slots=500"
        ]
        retval = subprocess.Popen(cmdline,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.STDOUT,
                                  env=env)
        (output, _) = retval.communicate()

        myResourceControl = ResourceControl()
        result = myResourceControl.listThresholdsForSubmit()
        self.assertTrue('T1_US_FNAL' in result.keys())
        for x in result.keys():
            self.assertEqual(len(result[x]['thresholds']), 8)
            self.assertEqual(result[x]['total_pending_slots'], 100)
            self.assertEqual(result[x]['total_running_slots'], 500)
            for thresh in result[x]['thresholds']:
                if thresh['task_type'] == 'Processing':
                    self.assertEqual(thresh['priority'], 1)
                    self.assertEqual(thresh['max_slots'], 500)

        # Verify that sites with more than one SE were added correctly.
        nebInfo = myResourceControl.listSiteInfo("T2_US_Nebraska")
        self.assertTrue(len(nebInfo["se_name"]) == 3)
        return

    def testInsertAllSEs2(self):
        """
        _testInsertAllSEs2_

        Test to see if we can insert all SEs and Thresholds at once
        Depending on the WMCore.Services.SiteDB interface
        """
        myResourceControl = ResourceControl()
        taskList = [{
            'taskType': 'Processing',
            'maxSlots': 100,
            'pendingSlots': 80,
            'priority': 1
        }, {
            'taskType': 'Merge',
            'maxSlots': 50,
            'pendingSlots': 30,
            'priority': 2
        }]

        myResourceControl.insertAllSEs(siteName='test',
                                       pendingSlots=200,
                                       runningSlots=400,
                                       ceName='glidein-ce.fnal.gov',
                                       plugin='CondorPlugin',
                                       taskList=taskList)
        result = myResourceControl.listThresholdsForSubmit()
        self.assertTrue('test_cmssrm.fnal.gov' in result.keys())
        self.assertEqual(result['test_cmssrm.fnal.gov']['cms_name'],
                         'T1_US_FNAL')
        for x in result.keys():
            self.assertEqual(len(result[x]['thresholds']), 2)
            self.assertEqual(result[x]['total_pending_slots'], 200)
            self.assertEqual(result[x]['total_running_slots'], 400)
            for thresh in result[x]['thresholds']:
                if thresh['task_type'] == 'Processing':
                    self.assertEqual(thresh['priority'], 1)
                    self.assertEqual(thresh['max_slots'], 100)
                    self.assertEqual(thresh['pending_slots'], 80)

                else:
                    self.assertEqual(thresh['priority'], 2)
                    self.assertEqual(thresh['max_slots'], 50)
                    self.assertEqual(thresh['pending_slots'], 30)
        return

    def testInsertT0(self):
        """
        _testInsertT0_

        Test to see if we can insert the Tier-0 alone
        with a single option
        """
        self.createConfig()

        resControlPath = os.path.join(WMCore.WMBase.getTestBase(),
                                      "../../bin/wmagent-resource-control")
        env = os.environ
        env['PYTHONPATH'] = ":".join(sys.path)
        cmdline = [resControlPath, "--add-T0"]
        retval = subprocess.Popen(cmdline,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.STDOUT,
                                  env=env)
        (_, _) = retval.communicate()

        myResourceControl = ResourceControl()
        result = myResourceControl.listThresholdsForSubmit()
        self.assertTrue(len(result), 1)
        self.assertTrue('CERN' in result)
        for x in result:
            self.assertEqual(len(result[x]['thresholds']), 10)
            self.assertEqual(result[x]['total_pending_slots'], 500)
            self.assertEqual(result[x]['total_running_slots'], -1)
            for thresh in result[x]['thresholds']:
                if thresh['task_type'] == 'Processing':
                    self.assertEqual(thresh['priority'], 1)
                    self.assertEqual(thresh['max_slots'], -1)

        # Verify that sites with more than one SE were added correctly.
        cernInfo = myResourceControl.listSiteInfo("CERN")
        self.assertTrue(len(cernInfo["se_name"]) == 2)
        return
示例#25
0
class testJSONRequests(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        tmp = self.testInit.generateWorkDir()
        self.request = Requests.JSONRequests(idict={'req_cache_path' : tmp})

    def roundTrip(self, data):
        encoded = self.request.encode(data)
        #print encoded
        #print encoded.__class__.__name__
        decoded = self.request.decode(encoded)
        #print decoded.__class__.__name__
        self.assertEqual( data, decoded )

    def roundTripLax(self, data):
        encoded = self.request.encode(data)
        decoded = self.request.decode(encoded)
        datakeys = data.keys()

        for k in decoded.keys():
            assert k in datakeys
            datakeys.pop(datakeys.index(k))
        #print 'the following keys were dropped\n\t',datakeys

    def testSet1(self):
        self.roundTrip(set([]))

    def testSet2(self):
        self.roundTrip(set([1, 2, 3, 4, Run(1)]))

    def testSet3(self):
        self.roundTrip(set(['a', 'b', 'c', 'd']))

    def testSet4(self):
        self.roundTrip(set([1, 2, 3, 4, 'a', 'b']))

    def testRun1(self):
        self.roundTrip(Run(1))

    def testRun2(self):
        self.roundTrip(Run(1, 1))

    def testRun3(self):
        self.roundTrip(Run(1, 2, 3))

    def testMask1(self):
        self.roundTrip(Mask())

    def testMask2(self):
        mymask = Mask()
        mymask['FirstEvent'] = 9999
        mymask['LastEvent'] = 999
        self.roundTrip(mymask)

    def testMask3(self):
        mymask = Mask()
        mymask['FirstEvent'] = 9999
        mymask['LastEvent'] = 999
        myjob = DataStructsJob()
        myjob["mask"] = mymask
        self.roundTrip(myjob)

    def testMask4(self):
        self.roundTrip({'LastRun': None, 'FirstRun': None, 'LastEvent': None,
        'FirstEvent': None, 'LastLumi': None, 'FirstLumi': None})

    def testMask5(self):
        mymask = Mask()
        mymask['FirstEvent'] = 9999
        mymask['LastEvent'] = 999
        myjob = DataStructsJob()
        myjob["mask"] = mymask
        self.roundTripLax(myjob)

    def testMask6(self):
        mymask = Mask()
        myjob = DataStructsJob()
        myjob["mask"] = mymask
        self.roundTripLax(myjob)

    def testSpecialCharacterPasswords(self):
        url = 'http://*****:*****@ssw:rd@localhost:6666'
        req = JSONRequests(url)
        self.assertEqual(req['host'], 'http://localhost:6666')
        self.assertEqual(req.additionalHeaders['Authorization'], 'Basic dXNlcm5hbWU6cEBzc3c6cmQ=')
示例#26
0
class DBSBufferFileTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        DBSBuffer tables.  Also add some dummy locations.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMComponent.DBS3Buffer"],
                                useDefault=False)

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMComponent.DBS3Buffer",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)

        locationAction = self.daoFactory(
            classname="DBSBufferFiles.AddLocation")
        locationAction.execute(siteName="se1.cern.ch")
        locationAction.execute(siteName="se1.fnal.gov")

    def tearDown(self):
        """
        _tearDown_

        Drop all the DBSBuffer tables.
        """
        self.testInit.clearDatabase()

    def testCreateDeleteExists(self):
        """
        _testCreateDeleteExists_

        Test the create(), delete() and exists() methods of the file class
        by creating and deleting a file.  The exists() method will be
        called before and after creation and after deletion.
        """
        testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFile.setAlgorithm(appName="cmsRun",
                              appVer="CMSSW_2_1_8",
                              appFam="RECO",
                              psetHash="GIBBERISH",
                              configContent="MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")

        assert testFile.exists() == False, \
            "ERROR: File exists before it was created"

        testFile.addRun(Run(1, *[45]))
        testFile.create()

        assert testFile.exists() > 0, \
            "ERROR: File does not exist after it was created"

        testFile.delete()

        assert testFile.exists() == False, \
            "ERROR: File exists after it has been deleted"
        return

    def testCreateTransaction(self):
        """
        _testCreateTransaction_

        Begin a transaction and then create a file in the database.  Afterwards,
        rollback the transaction.  Use the File class's exists() method to
        to verify that the file doesn't exist before it was created, exists
        after it was created and doesn't exist after the transaction was rolled
        back.
        """
        myThread = threading.currentThread()
        myThread.transaction.begin()

        testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFile.setAlgorithm(appName="cmsRun",
                              appVer="CMSSW_2_1_8",
                              appFam="RECO",
                              psetHash="GIBBERISH",
                              configContent="MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")

        assert testFile.exists() == False, \
            "ERROR: File exists before it was created"
        testFile.addRun(Run(1, *[45]))
        testFile.create()

        assert testFile.exists() > 0, \
            "ERROR: File does not exist after it was created"

        myThread.transaction.rollback()

        assert testFile.exists() == False, \
            "ERROR: File exists after transaction was rolled back."
        return

    def testDeleteTransaction(self):
        """
        _testDeleteTransaction_

        Create a file and commit it to the database.  Start a new transaction
        and delete the file.  Rollback the transaction after the file has been
        deleted.  Use the file class's exists() method to verify that the file
        does not exist after it has been deleted but does exist after the
        transaction is rolled back.
        """
        testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFile.setAlgorithm(appName="cmsRun",
                              appVer="CMSSW_2_1_8",
                              appFam="RECO",
                              psetHash="GIBBERISH",
                              configContent="MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")

        assert testFile.exists() == False, \
            "ERROR: File exists before it was created"

        testFile.addRun(Run(1, *[45]))
        testFile.create()

        assert testFile.exists() > 0, \
            "ERROR: File does not exist after it was created"

        myThread = threading.currentThread()
        myThread.transaction.begin()

        testFile.delete()

        assert testFile.exists() == False, \
            "ERROR: File exists after it has been deleted"

        myThread.transaction.rollback()

        assert testFile.exists() > 0, \
            "ERROR: File does not exist after transaction was rolled back."
        return

    def testGetParentLFNs(self):
        """
        _testGetParentLFNs_

        Create three files and set them to be parents of a fourth file.  Check
        to make sure that getParentLFNs() on the child file returns the correct
        LFNs.
        """
        testFileParentA = DBSBufferFile(lfn="/this/is/a/parent/lfnA",
                                        size=1024,
                                        events=20)
        testFileParentA.setAlgorithm(appName="cmsRun",
                                     appVer="CMSSW_2_1_8",
                                     appFam="RECO",
                                     psetHash="GIBBERISH",
                                     configContent="MOREGIBBERISH")
        testFileParentA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileParentA.addRun(Run(1, *[45]))

        testFileParentB = DBSBufferFile(lfn="/this/is/a/parent/lfnB",
                                        size=1024,
                                        events=20)
        testFileParentB.setAlgorithm(appName="cmsRun",
                                     appVer="CMSSW_2_1_8",
                                     appFam="RECO",
                                     psetHash="GIBBERISH",
                                     configContent="MOREGIBBERISH")
        testFileParentB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileParentB.addRun(Run(1, *[45]))

        testFileParentC = DBSBufferFile(lfn="/this/is/a/parent/lfnC",
                                        size=1024,
                                        events=20)
        testFileParentC.setAlgorithm(appName="cmsRun",
                                     appVer="CMSSW_2_1_8",
                                     appFam="RECO",
                                     psetHash="GIBBERISH",
                                     configContent="MOREGIBBERISH")
        testFileParentC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileParentC.addRun(Run(1, *[45]))

        testFileParentA.create()
        testFileParentB.create()
        testFileParentC.create()

        testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFile.setAlgorithm(appName="cmsRun",
                              appVer="CMSSW_2_1_8",
                              appFam="RECO",
                              psetHash="GIBBERISH",
                              configContent="MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFile.addRun(Run(1, *[45]))
        testFile.create()

        testFile.addParents([
            testFileParentA["lfn"], testFileParentB["lfn"],
            testFileParentC["lfn"]
        ])

        parentLFNs = testFile.getParentLFNs()

        assert len(parentLFNs) == 3, \
            "ERROR: Child does not have the right amount of parents"

        goldenLFNs = [
            "/this/is/a/parent/lfnA", "/this/is/a/parent/lfnB",
            "/this/is/a/parent/lfnC"
        ]
        for parentLFN in parentLFNs:
            assert parentLFN in goldenLFNs, \
                "ERROR: Unknown parent lfn"
            goldenLFNs.remove(parentLFN)

        testFile.delete()
        testFileParentA.delete()
        testFileParentB.delete()
        testFileParentC.delete()
        return

    def testLoad(self):
        """
        _testLoad_

        Test the loading of file meta data using the ID of a file and the
        LFN of a file.
        """
        checksums = {"adler32": "adler32", "cksum": "cksum", "md5": "md5"}
        testFileA = DBSBufferFile(lfn="/this/is/a/lfn",
                                  size=1024,
                                  events=10,
                                  checksums=checksums)
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.create()

        testFileB = DBSBufferFile(lfn=testFileA["lfn"])
        testFileB.load()
        testFileC = DBSBufferFile(id=testFileA["id"])
        testFileC.load()

        assert testFileA == testFileB, \
            "ERROR: File load by LFN didn't work"

        assert testFileA == testFileC, \
            "ERROR: File load by ID didn't work"

        assert isinstance(testFileB["id"], int), \
            "ERROR: File id is not an integer type."
        assert isinstance(testFileB["size"], int), \
            "ERROR: File size is not an integer type."
        assert isinstance(testFileB["events"], int), \
            "ERROR: File events is not an integer type."

        assert isinstance(testFileC["id"], int), \
            "ERROR: File id is not an integer type."
        assert isinstance(testFileC["size"], int), \
            "ERROR: File size is not an integer type."
        assert isinstance(testFileC["events"], int), \
            "ERROR: File events is not an integer type."

        testFileA.delete()
        return

    def testFilesize(self):
        """
        _testFilesize_

        Test storing and loading the file information from dbsbuffer_file.
        Make sure filesize can be bigger than 32 bits
        """
        checksums = {"adler32": "adler32", "cksum": "cksum"}
        testFileA = DBSBufferFile(lfn="/this/is/a/lfn",
                                  size=3221225472,
                                  events=1500000,
                                  checksums=checksums)
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_7_6_0",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.create()

        testFileB = DBSBufferFile(lfn=testFileA["lfn"])
        testFileB.load()
        self.assertEqual(testFileB["size"], 3221225472,
                         "Error: the filesize should be 3GB")
        self.assertEqual(testFileB["events"], 1500000,
                         "Error: the number of events should be 1.5M")

    def testAddChild(self):
        """
        _testAddChild_

        Add a child to some parent files and make sure that all the parentage
        information is loaded/stored correctly from the database.
        """
        testFileParentA = DBSBufferFile(lfn="/this/is/a/parent/lfnA",
                                        size=1024,
                                        events=20)
        testFileParentA.setAlgorithm(appName="cmsRun",
                                     appVer="CMSSW_2_1_8",
                                     appFam="RECO",
                                     psetHash="GIBBERISH",
                                     configContent="MOREGIBBERISH")
        testFileParentA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")

        testFileParentA.addRun(Run(1, *[45]))
        testFileParentB = DBSBufferFile(lfn="/this/is/a/parent/lfnB",
                                        size=1024,
                                        events=20)
        testFileParentB.setAlgorithm(appName="cmsRun",
                                     appVer="CMSSW_2_1_8",
                                     appFam="RECO",
                                     psetHash="GIBBERISH",
                                     configContent="MOREGIBBERISH")
        testFileParentB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileParentB.addRun(Run(1, *[45]))
        testFileParentA.create()
        testFileParentB.create()

        testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.addRun(Run(1, *[45]))
        testFileA.create()

        testFileParentA.addChildren("/this/is/a/lfn")
        testFileParentB.addChildren("/this/is/a/lfn")

        testFileB = DBSBufferFile(id=testFileA["id"])
        testFileB.load(parentage=1)

        goldenFiles = [testFileParentA, testFileParentB]
        for parentFile in testFileB["parents"]:
            assert parentFile in goldenFiles, \
                "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)

        assert len(goldenFiles) == 0, \
            "ERROR: Some parents are missing"
        return

    def testAddChildTransaction(self):
        """
        _testAddChildTransaction_

        Add a child to some parent files and make sure that all the parentage
        information is loaded/stored correctly from the database.  Rollback the
        addition of one of the childs and then verify that it does in fact only
        have one parent.
        """
        testFileParentA = DBSBufferFile(lfn="/this/is/a/parent/lfnA",
                                        size=1024,
                                        events=20)
        testFileParentA.setAlgorithm(appName="cmsRun",
                                     appVer="CMSSW_2_1_8",
                                     appFam="RECO",
                                     psetHash="GIBBERISH",
                                     configContent="MOREGIBBERISH")
        testFileParentA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileParentA.addRun(Run(1, *[45]))

        testFileParentB = DBSBufferFile(lfn="/this/is/a/parent/lfnB",
                                        size=1024,
                                        events=20)
        testFileParentB.setAlgorithm(appName="cmsRun",
                                     appVer="CMSSW_2_1_8",
                                     appFam="RECO",
                                     psetHash="GIBBERISH",
                                     configContent="MOREGIBBERISH")
        testFileParentB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileParentB.addRun(Run(1, *[45]))
        testFileParentA.create()
        testFileParentB.create()

        testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.addRun(Run(1, *[45]))
        testFileA.create()

        testFileParentA.addChildren("/this/is/a/lfn")

        myThread = threading.currentThread()
        myThread.transaction.begin()

        testFileParentB.addChildren("/this/is/a/lfn")

        testFileB = DBSBufferFile(id=testFileA["id"])
        testFileB.load(parentage=1)

        goldenFiles = [testFileParentA, testFileParentB]
        for parentFile in testFileB["parents"]:
            assert parentFile in goldenFiles, \
                "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)

        assert len(goldenFiles) == 0, \
            "ERROR: Some parents are missing"

        myThread.transaction.rollback()
        testFileB.load(parentage=1)

        goldenFiles = [testFileParentA]
        for parentFile in testFileB["parents"]:
            assert parentFile in goldenFiles, \
                "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)

        assert len(goldenFiles) == 0, \
            "ERROR: Some parents are missing"

        return

    def testSetLocation(self):
        """
        _testSetLocation_

        Create a file and add a couple locations.  Load the file from the
        database to make sure that the locations were set correctly.
        """
        testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.addRun(Run(1, *[45]))
        testFileA.create()

        testFileA.setLocation(["se1.fnal.gov", "se1.cern.ch"])
        testFileA.setLocation(["bunkse1.fnal.gov", "bunkse1.cern.ch"],
                              immediateSave=False)

        testFileB = DBSBufferFile(id=testFileA["id"])
        testFileB.load()

        goldenLocations = ["se1.fnal.gov", "se1.cern.ch"]

        for location in testFileB["locations"]:
            assert location in goldenLocations, \
                "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
            "ERROR: Some locations are missing"
        return

    def testSetLocationTransaction(self):
        """
        _testSetLocationTransaction_

        Create a file at specific locations and commit everything to the
        database.  Reload the file from the database and verify that the
        locations are correct.  Rollback the database transaction and once
        again reload the file.  Verify that the original locations are back.
        """
        testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.addRun(Run(1, *[45]))
        testFileA.create()

        testFileA.setLocation(["se1.fnal.gov"])

        myThread = threading.currentThread()
        myThread.transaction.begin()

        testFileA.setLocation(["se1.cern.ch"])
        testFileA.setLocation(["bunkse1.fnal.gov", "bunkse1.cern.ch"],
                              immediateSave=False)

        testFileB = DBSBufferFile(id=testFileA["id"])
        testFileB.load()

        goldenLocations = ["se1.fnal.gov", "se1.cern.ch"]

        for location in testFileB["locations"]:
            assert location in goldenLocations, \
                "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
            "ERROR: Some locations are missing"

        myThread.transaction.rollback()
        testFileB.load()

        goldenLocations = ["se1.fnal.gov"]

        for location in testFileB["locations"]:
            assert location in goldenLocations, \
                "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
            "ERROR: Some locations are missing"
        return

    def testLocationsConstructor(self):
        """
        _testLocationsConstructor_

        Test to make sure that locations passed into the File() constructor
        are loaded from and save to the database correctly.  Also test to make
        sure that the class behaves well when the location is passed in as a
        single string instead of a set.
        """
        testFileA = DBSBufferFile(lfn="/this/is/a/lfn",
                                  size=1024,
                                  events=10,
                                  locations=set(["se1.fnal.gov"]))
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.addRun(Run(1, *[45]))
        testFileA.create()

        testFileB = DBSBufferFile(lfn="/this/is/a/lfn2",
                                  size=1024,
                                  events=10,
                                  locations="se1.fnal.gov")
        testFileB.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileB.addRun(Run(1, *[45]))
        testFileB.create()

        testFileC = DBSBufferFile(id=testFileA["id"])
        testFileC.load()

        goldenLocations = ["se1.fnal.gov"]
        for location in testFileC["locations"]:
            assert location in goldenLocations, \
                "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
            "ERROR: Some locations are missing"

        testFileC = DBSBufferFile(id=testFileB["id"])
        testFileC.load()

        goldenLocations = ["se1.fnal.gov"]
        for location in testFileC["locations"]:
            assert location in goldenLocations, \
                "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
            "ERROR: Some locations are missing"
        return

    def testAddRunSet(self):
        """
        _testAddRunSet_

        Test the ability to add run and lumi information to a file.
        """
        testFile = DBSBufferFile(lfn="/this/is/a/lfn",
                                 size=1024,
                                 events=10,
                                 locations="se1.fnal.gov")
        testFile.setAlgorithm(appName="cmsRun",
                              appVer="CMSSW_2_1_8",
                              appFam="RECO",
                              psetHash="GIBBERISH",
                              configContent="MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")

        testFile.create()
        runSet = set()
        runSet.add(Run(1, *[45]))
        runSet.add(Run(2, *[67, 68]))
        testFile.addRunSet(runSet)

        assert (runSet - testFile["runs"]) == set(), \
            "Error: addRunSet is not updating set correctly"

    def testSetBlock(self):
        """
        _testSetBlock_

        Verify that the [Set|Get]Block DAOs work correctly.
        """
        myThread = threading.currentThread()

        dataset = "/Cosmics/CRUZET09-PromptReco-v1/RECO"

        uploadFactory = DAOFactory(package="WMComponent.DBS3Buffer",
                                   logger=myThread.logger,
                                   dbinterface=myThread.dbi)

        datasetAction = uploadFactory(classname="NewDataset")
        createAction = uploadFactory(classname="CreateBlocks")

        datasetAction.execute(datasetPath=dataset)

        newBlock = DBSBufferBlock(name="someblockname",
                                  location="se1.cern.ch",
                                  datasetpath=None)
        newBlock.setDataset(dataset, 'data', 'VALID')

        createAction.execute(blocks=[newBlock])

        setBlockAction = self.daoFactory(classname="DBSBufferFiles.SetBlock")
        getBlockAction = self.daoFactory(classname="DBSBufferFiles.GetBlock")

        testFile = DBSBufferFile(lfn="/this/is/a/lfn",
                                 size=1024,
                                 events=10,
                                 locations="se1.fnal.gov")
        testFile.setAlgorithm(appName="cmsRun",
                              appVer="CMSSW_2_1_8",
                              appFam="RECO",
                              psetHash="GIBBERISH",
                              configContent="MOREGIBBERISH")
        testFile.setDatasetPath(dataset)

        testFile.create()

        setBlockAction.execute(lfn=testFile["lfn"], blockName="someblockname")
        blockName = getBlockAction.execute(lfn=testFile["lfn"])

        assert blockName[0][0] == "someblockname", \
            "Error: Incorrect block returned: %s" % blockName[0][0]
        return

    def testCountFilesDAO(self):
        """
        _testCountFilesDAO_

        Verify that the CountFiles DAO object functions correctly.
        """
        testFileA = DBSBufferFile(lfn="/this/is/a/lfnA",
                                  size=1024,
                                  events=10,
                                  locations="se1.fnal.gov")
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.create()

        testFileB = DBSBufferFile(lfn="/this/is/a/lfnB",
                                  size=1024,
                                  events=10,
                                  locations="se1.fnal.gov")
        testFileB.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileB.create()

        testFileC = DBSBufferFile(lfn="/this/is/a/lfnC",
                                  size=1024,
                                  events=10,
                                  locations="se1.fnal.gov")
        testFileC.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileC.create()

        countAction = self.daoFactory(classname="CountFiles")

        assert countAction.execute() == 3, \
            "Error: Wrong number of files counted in DBS Buffer."

        return

    def testAddParents(self):
        """
        _testAddParents_

        Verify that the addParents() method works correctly even if the parents
        do not already exist in the database.
        """
        myThread = threading.currentThread()

        testFile = DBSBufferFile(lfn="/this/is/a/lfnA",
                                 size=1024,
                                 events=10,
                                 locations="se1.fnal.gov")
        testFile.setAlgorithm(appName="cmsRun",
                              appVer="CMSSW_2_1_8",
                              appFam="RECO",
                              psetHash="GIBBERISH",
                              configContent="MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFile.create()

        testParent = DBSBufferFile(lfn="/this/is/a/lfnB",
                                   size=1024,
                                   events=10,
                                   locations="se1.fnal.gov")
        testParent.setAlgorithm(appName="cmsRun",
                                appVer="CMSSW_2_1_8",
                                appFam="RECO",
                                psetHash="GIBBERISH",
                                configContent="MOREGIBBERISH")
        testParent.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RAW")
        testParent.create()

        goldenLFNs = ["lfn1", "lfn2", "lfn3", "/this/is/a/lfnB"]
        testFile.addParents(goldenLFNs)

        verifyFile = DBSBufferFile(id=testFile["id"])
        verifyFile.load(parentage=1)
        parentLFNs = verifyFile.getParentLFNs()

        for parentLFN in parentLFNs:
            self.assertTrue(parentLFN in goldenLFNs,
                            "Error: unknown lfn %s" % parentLFN)
            goldenLFNs.remove(parentLFN)

        self.assertEqual(len(goldenLFNs), 0, "Error: missing LFNs...")

        # Check that the bogus dataset is listed as inDBS
        sqlCommand = """SELECT in_dbs FROM dbsbuffer_algo_dataset_assoc das
                          INNER JOIN dbsbuffer_dataset ds ON das.dataset_id = ds.id
                          WHERE ds.path = 'bogus'"""

        status = myThread.dbi.processData(sqlCommand)[0].fetchall()[0][0]

        self.assertEqual(status, 1)

        # Now make sure the dummy files are listed as being in DBS
        sqlCommand = """SELECT status FROM dbsbuffer_file df
                          INNER JOIN dbsbuffer_algo_dataset_assoc das ON das.id = df.dataset_algo
                          INNER JOIN dbsbuffer_dataset ds ON das.dataset_id = ds.id
                          WHERE ds.path = '/bogus/dataset/path' """

        status = myThread.dbi.processData(sqlCommand)[0].fetchall()

        for entry in status:
            self.assertEqual(entry, ('AlreadyInDBS', ))

        return

    def testGetChildrenDAO(self):
        """
        _testGetChildrenDAO_

        Verify that the GetChildren DAO correctly returns the LFNs of a file's
        children.
        """
        testFileChildA = DBSBufferFile(lfn="/this/is/a/child/lfnA",
                                       size=1024,
                                       events=20)
        testFileChildA.setAlgorithm(appName="cmsRun",
                                    appVer="CMSSW_2_1_8",
                                    appFam="RECO",
                                    psetHash="GIBBERISH",
                                    configContent="MOREGIBBERISH")
        testFileChildA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileChildB = DBSBufferFile(lfn="/this/is/a/child/lfnB",
                                       size=1024,
                                       events=20)
        testFileChildB.setAlgorithm(appName="cmsRun",
                                    appVer="CMSSW_2_1_8",
                                    appFam="RECO",
                                    psetHash="GIBBERISH",
                                    configContent="MOREGIBBERISH")
        testFileChildB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileChildC = DBSBufferFile(lfn="/this/is/a/child/lfnC",
                                       size=1024,
                                       events=20)
        testFileChildC.setAlgorithm(appName="cmsRun",
                                    appVer="CMSSW_2_1_8",
                                    appFam="RECO",
                                    psetHash="GIBBERISH",
                                    configContent="MOREGIBBERISH")
        testFileChildC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")

        testFileChildA.create()
        testFileChildB.create()
        testFileChildC.create()

        testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFile.setAlgorithm(appName="cmsRun",
                              appVer="CMSSW_2_1_8",
                              appFam="RECO",
                              psetHash="GIBBERISH",
                              configContent="MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFile.create()

        testFileChildA.addParents([testFile["lfn"]])
        testFileChildB.addParents([testFile["lfn"]])
        testFileChildC.addParents([testFile["lfn"]])

        getChildrenAction = self.daoFactory(
            classname="DBSBufferFiles.GetChildren")
        childLFNs = getChildrenAction.execute(testFile["lfn"])

        assert len(childLFNs) == 3, \
            "ERROR: Parent does not have the right amount of children."

        goldenLFNs = [
            "/this/is/a/child/lfnA", "/this/is/a/child/lfnB",
            "/this/is/a/child/lfnC"
        ]
        for childLFN in childLFNs:
            assert childLFN in goldenLFNs, \
                "ERROR: Unknown child lfn"
            goldenLFNs.remove(childLFN)

        return

    def testGetParentStatusDAO(self):
        """
        _testGetParentStatusDAO_

        Verify that the GetParentStatus DAO correctly returns the status of a
        file's children.
        """
        testFileChild = DBSBufferFile(lfn="/this/is/a/child/lfnA",
                                      size=1024,
                                      events=20)
        testFileChild.setAlgorithm(appName="cmsRun",
                                   appVer="CMSSW_2_1_8",
                                   appFam="RECO",
                                   psetHash="GIBBERISH",
                                   configContent="MOREGIBBERISH")
        testFileChild.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileChild.create()

        testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFile.setAlgorithm(appName="cmsRun",
                              appVer="CMSSW_2_1_8",
                              appFam="RECO",
                              psetHash="GIBBERISH",
                              configContent="MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFile.create()

        testFileChild.addParents([testFile["lfn"]])

        getStatusAction = self.daoFactory(
            classname="DBSBufferFiles.GetParentStatus")
        parentStatus = getStatusAction.execute(testFileChild["lfn"])

        assert len(parentStatus) == 1, \
            "ERROR: Wrong number of statuses returned."
        assert parentStatus[0] == "NOTUPLOADED", \
            "ERROR: Wrong status returned."

        return

    def testSetLocationByLFN(self):
        """
        _testSetLocationByLFN_

        """
        testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.addRun(Run(1, *[45]))
        testFileA.create()

        setLocationAction = self.daoFactory(
            classname="DBSBufferFiles.SetLocationByLFN")
        setLocationAction.execute(binds=[{
            'lfn': "/this/is/a/lfn",
            'pnn': 'se1.cern.ch'
        }])

        testFileB = DBSBufferFile(id=testFileA["id"])
        testFileB.load()

        self.assertEqual(testFileB['locations'], set(['se1.cern.ch']))

        return

    def testAddCKSumByLFN(self):
        """
        _testAddCKSumByLFN_

        """

        testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.create()

        setCksumAction = self.daoFactory(
            classname="DBSBufferFiles.AddChecksumByLFN")
        binds = [{
            'lfn': "/this/is/a/lfn",
            'cktype': 'adler32',
            'cksum': 201
        }, {
            'lfn': "/this/is/a/lfn",
            'cktype': 'cksum',
            'cksum': 101
        }]
        setCksumAction.execute(bulkList=binds)

        testFileB = DBSBufferFile(id=testFileA["id"])
        testFileB.load()

        self.assertEqual(testFileB['checksums'], {
            'adler32': '201',
            'cksum': '101'
        })

        return

    def testBulkLoad(self):
        """
        _testBulkLoad_

        Can we load in bulk?
        """

        addToBuffer = DBSBufferUtil()

        testFileChildA = DBSBufferFile(lfn="/this/is/a/child/lfnA",
                                       size=1024,
                                       events=20)
        testFileChildA.setAlgorithm(appName="cmsRun",
                                    appVer="CMSSW_2_1_8",
                                    appFam="RECO",
                                    psetHash="GIBBERISH",
                                    configContent="MOREGIBBERISH")
        testFileChildA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileChildB = DBSBufferFile(lfn="/this/is/a/child/lfnB",
                                       size=1024,
                                       events=20)
        testFileChildB.setAlgorithm(appName="cmsRun",
                                    appVer="CMSSW_2_1_8",
                                    appFam="RECO",
                                    psetHash="GIBBERISH",
                                    configContent="MOREGIBBERISH")
        testFileChildB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileChildC = DBSBufferFile(lfn="/this/is/a/child/lfnC",
                                       size=1024,
                                       events=20)
        testFileChildC.setAlgorithm(appName="cmsRun",
                                    appVer="CMSSW_2_1_8",
                                    appFam="RECO",
                                    psetHash="GIBBERISH",
                                    configContent="MOREGIBBERISH")
        testFileChildC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")

        testFileChildA.create()
        testFileChildB.create()
        testFileChildC.create()

        testFileChildA.setLocation(["se1.fnal.gov", "se1.cern.ch"])
        testFileChildB.setLocation(["se1.fnal.gov", "se1.cern.ch"])
        testFileChildC.setLocation(["se1.fnal.gov", "se1.cern.ch"])

        runSet = set()
        runSet.add(Run(1, *[45]))
        runSet.add(Run(2, *[67, 68]))
        testFileChildA.addRunSet(runSet)
        testFileChildB.addRunSet(runSet)
        testFileChildC.addRunSet(runSet)

        testFileChildA.save()
        testFileChildB.save()
        testFileChildC.save()

        setCksumAction = self.daoFactory(
            classname="DBSBufferFiles.AddChecksumByLFN")
        binds = [{
            'lfn': "/this/is/a/child/lfnA",
            'cktype': 'adler32',
            'cksum': 201
        }, {
            'lfn': "/this/is/a/child/lfnA",
            'cktype': 'cksum',
            'cksum': 101
        }, {
            'lfn': "/this/is/a/child/lfnB",
            'cktype': 'adler32',
            'cksum': 201
        }, {
            'lfn': "/this/is/a/child/lfnB",
            'cktype': 'cksum',
            'cksum': 101
        }, {
            'lfn': "/this/is/a/child/lfnC",
            'cktype': 'adler32',
            'cksum': 201
        }, {
            'lfn': "/this/is/a/child/lfnC",
            'cktype': 'cksum',
            'cksum': 101
        }]
        setCksumAction.execute(bulkList=binds)

        testFile = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFile.setAlgorithm(appName="cmsRun",
                              appVer="CMSSW_2_1_8",
                              appFam="RECO",
                              psetHash="GIBBERISH",
                              configContent="MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFile.create()

        testFileChildA.addParents([testFile["lfn"]])
        testFileChildB.addParents([testFile["lfn"]])
        testFileChildC.addParents([testFile["lfn"]])

        binds = [{
            'id': testFileChildA.exists()
        }, {
            'id': testFileChildB.exists()
        }, {
            'id': testFileChildC.exists()
        }]

        listOfFiles = addToBuffer.loadDBSBufferFilesBulk(fileObjs=binds)

        # print listOfFiles

        compareList = [
            'locations', 'psetHash', 'configContent', 'appName', 'appVer',
            'appFam', 'events', 'datasetPath', 'runs'
        ]

        for f in listOfFiles:
            self.assertTrue(
                f['lfn'] in [
                    "/this/is/a/child/lfnA", "/this/is/a/child/lfnB",
                    "/this/is/a/child/lfnC"
                ], "Unknown file in loaded results")
            self.assertEqual(f['checksums'], {
                'adler32': '201',
                'cksum': '101'
            })
            for parent in f['parents']:
                self.assertEqual(parent['lfn'], testFile['lfn'])
            for key in compareList:
                self.assertEqual(f[key], testFileChildA[key])

    def testProperties(self):
        """
        _testProperties_

        Test added tags that use DBSBuffer to transfer from workload to DBS
        """

        testFileA = DBSBufferFile(lfn="/this/is/a/lfn", size=1024, events=10)
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.setValidStatus(validStatus="VALID")
        testFileA.setProcessingVer(ver="ProcVer")
        testFileA.setAcquisitionEra(era="AcqEra")
        testFileA.setGlobalTag(globalTag="GlobalTag")
        testFileA.setDatasetParent(datasetParent="Parent")
        testFileA.create()

        return
示例#27
0
文件: File_t.py 项目: stuartw/WMCore
class FileTest(unittest.TestCase):

    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.  Also add some dummy locations.
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS"],
                                useDefault = False)

        myThread = threading.currentThread()
        self.daofactory = DAOFactory(package = "WMCore.WMBS",
                                     logger = myThread.logger,
                                     dbinterface = myThread.dbi)

        locationAction = self.daofactory(classname = "Locations.New")
        locationAction.execute(siteName = "site1", seName = "se1.cern.ch")
        locationAction.execute(siteName = "site2", seName = "se1.fnal.gov")

        return
          
    def tearDown(self):        
        """
        _tearDown_
        
        Drop all the WMBS tables.
        """
        self.testInit.clearDatabase()
        return
            
    def testCreateDeleteExists(self):
        """
        _testCreateDeleteExists_

        Test the create(), delete() and exists() methods of the file class
        by creating and deleting a file.  The exists() method will be
        called before and after creation and after deletion.
        """
        testFile = File(lfn = "/this/is/a/lfn", size = 1024, events = 10, checksums={'cksum':1111})

        assert testFile.exists() == False, \
               "ERROR: File exists before it was created"

        testFile.addRun(Run(1, *[45]))
        testFile.create()

        assert testFile.exists() > 0, \
               "ERROR: File does not exist after it was created"

        testFile.delete()

        assert testFile.exists() == False, \
               "ERROR: File exists after it has been deleted"
        return

    def testCreateTransaction(self):
        """
        _testCreateTransaction_

        Begin a transaction and then create a file in the database.  Afterwards,
        rollback the transaction.  Use the File class's exists() method to
        to verify that the file doesn't exist before it was created, exists
        after it was created and doesn't exist after the transaction was rolled
        back.
        """
        myThread = threading.currentThread()
        myThread.transaction.begin()
        
        testFile = File(lfn = "/this/is/a/lfn", size = 1024, events = 10, checksums={'cksum':1111})

        assert testFile.exists() == False, \
               "ERROR: File exists before it was created"

        testFile.addRun(Run(1, *[45]))
        testFile.create()

        assert testFile.exists() > 0, \
               "ERROR: File does not exist after it was created"

        myThread.transaction.rollback()

        assert testFile.exists() == False, \
               "ERROR: File exists after transaction was rolled back."
        return    

    def testDeleteTransaction(self):
        """
        _testDeleteTransaction_

        Create a file and commit it to the database.  Start a new transaction
        and delete the file.  Rollback the transaction after the file has been
        deleted.  Use the file class's exists() method to verify that the file
        does not exist after it has been deleted but does exist after the
        transaction is rolled back.
        """
        testFile = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                        checksums={'cksum': 1111})

        assert testFile.exists() == False, \
               "ERROR: File exists before it was created"

        testFile.addRun(Run(1, *[45]))
        testFile.create()

        assert testFile.exists() > 0, \
               "ERROR: File does not exist after it was created"

        myThread = threading.currentThread()
        myThread.transaction.begin()
        
        testFile.delete()

        assert testFile.exists() == False, \
               "ERROR: File exists after it has been deleted"

        myThread.transaction.rollback()

        assert testFile.exists() > 0, \
               "ERROR: File does not exist after transaction was rolled back."
        return

    def testGetInfo(self):
        """
        _testGetInfo_

        Test the getInfo() method of the File class to make sure that it
        returns the correct information.
        """
        testFileParent = File(lfn = "/this/is/a/parent/lfn", size = 1024,
                              events = 20, checksums={'cksum': 1111})
        testFileParent.addRun(Run(1, *[45]))
        testFileParent.create()

        testFile = File(lfn = "/this/is/a/lfn", size = 1024, events = 10, checksums={'cksum': 222})
        testFile.addRun(Run(1, *[45]))
        testFile.addRun(Run(2, *[46, 47]))
        testFile.addRun(Run(2, *[47, 48]))
        testFile.create()
        testFile.setLocation(se = "se1.fnal.gov", immediateSave = False)
        testFile.setLocation(se = "se1.cern.ch", immediateSave = False)
        testFile.addParent("/this/is/a/parent/lfn")

        info = testFile.getInfo()

        assert info[0] == testFile["lfn"], \
               "ERROR: File returned wrong LFN"
        
        assert info[1] == testFile["id"], \
               "ERROR: File returned wrong ID"
        
        assert info[2] == testFile["size"], \
               "ERROR: File returned wrong size"
        
        assert info[3] == testFile["events"], \
               "ERROR: File returned wrong events"
        
        assert info[4] == testFile["checksums"], \
               "ERROR: File returned wrong cksum"
        
        assert len(info[5]) == 2, \
		      "ERROR: File returned wrong runs"
        
        assert info[5] == [Run(1, *[45]), Run(2, *[46, 47, 48])], \
               "Error: Run hasn't been combined correctly"
               
        assert len(info[6]) == 2, \
               "ERROR: File returned wrong locations"

        for testLocation in info[6]:
            assert testLocation in ["se1.fnal.gov", "se1.cern.ch"], \
                   "ERROR: File returned wrong locations"

        assert len(info[7]) == 1, \
               "ERROR: File returned wrong parents"

        assert info[7][0] == testFileParent, \
               "ERROR: File returned wrong parents"

        testFile.delete()
        testFileParent.delete()
        return
        
    def testGetParentLFNs(self):
        """
        _testGetParentLFNs_

        Create three files and set them to be parents of a fourth file.  Check
        to make sure that getParentLFNs() on the child file returns the correct
        LFNs.
        """
        testFileParentA = File(lfn = "/this/is/a/parent/lfnA", size = 1024,
                               events = 20, checksums = {'cksum': 1})
        testFileParentA.addRun(Run(1, *[45]))
        testFileParentB = File(lfn = "/this/is/a/parent/lfnB", size = 1024,
                               events = 20, checksums = {'cksum': 2})
        testFileParentB.addRun(Run(1, *[45]))
        testFileParentC = File(lfn = "/this/is/a/parent/lfnC", size = 1024,
                               events = 20, checksums = {'cksum': 3})
        testFileParentC.addRun(Run( 1, *[45]))

        testFileParentA.create()
        testFileParentB.create()
        testFileParentC.create()

        testFile = File(lfn = "/this/is/a/lfn", size = 1024,
                        events = 10, checksums = {'cksum': 1})
        testFile.addRun(Run( 1, *[45]))
        testFile.create()

        testFile.addParent(testFileParentA["lfn"])
        testFile.addParent(testFileParentB["lfn"])
        testFile.addParent(testFileParentC["lfn"])

        parentLFNs = testFile.getParentLFNs()
        
        assert len(parentLFNs) == 3, \
               "ERROR: Child does not have the right amount of parents"

        goldenLFNs = ["/this/is/a/parent/lfnA",
                      "/this/is/a/parent/lfnB",
                      "/this/is/a/parent/lfnC"]
        for parentLFN in parentLFNs:
            assert parentLFN in goldenLFNs, \
                   "ERROR: Unknown parent lfn"
            goldenLFNs.remove(parentLFN)
                   
        testFile.delete()
        testFileParentA.delete()
        testFileParentB.delete()
        testFileParentC.delete()
        return
    
    def testLoad(self):
        """
        _testLoad_

        Test the loading of file meta data using the ID of a file and the
        LFN of a file.
        """
        testFileA = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                        checksums = {'cksum': 101}, first_event = 2, merged = True)
        testFileA.create()
                                                        
        testFileB = File(lfn = testFileA["lfn"])
        testFileB.load()
        testFileC = File(id = testFileA["id"])
        testFileC.load()

        assert testFileA == testFileB, \
               "ERROR: File load by LFN didn't work"

        assert testFileA == testFileC, \
               "ERROR: File load by ID didn't work"

        assert type(testFileB["id"]) == int, \
               "ERROR: File id is not an integer type."
        assert type(testFileB["size"]) == int, \
               "ERROR: File size is not an integer type."
        assert type(testFileB["events"]) == int, \
               "ERROR: File events is not an integer type."
        assert type(testFileB["checksums"]) == dict, \
               "ERROR: File cksum is not a string type."
        assert type(testFileB["first_event"]) == int, \
               "ERROR: File first_event is not an integer type."
        
        assert type(testFileC["id"]) == int, \
               "ERROR: File id is not an integer type."
        assert type(testFileC["size"]) == int, \
               "ERROR: File size is not an integer type."
        assert type(testFileC["events"]) == int, \
               "ERROR: File events is not an integer type."
        assert type(testFileC["checksums"]) == dict, \
               "ERROR: File cksum is not an string type."
        assert type(testFileC["first_event"]) == int, \
               "ERROR: File first_event is not an integer type."

        self.assertEqual(testFileC['checksums'], {'cksum': '101'})

        testFileA.delete()
        return

    def testLoadData(self):
        """
        _testLoadData_

        Test the loading of all data from a file, including run/lumi
        associations, location information and parentage information.
        """
        testFileParentA = File(lfn = "/this/is/a/parent/lfnA", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileParentA.addRun(Run( 1, *[45]))
        testFileParentB = File(lfn = "/this/is/a/parent/lfnB", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileParentB.addRun(Run( 1, *[45]))
        testFileParentA.create()
        testFileParentB.create()

        testFileA = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                        checksums = {'cksum':1})
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()
        testFileA.setLocation(se = "se1.fnal.gov", immediateSave = False)
        testFileA.setLocation(se = "se1.cern.ch", immediateSave = False)
        testFileA.addParent("/this/is/a/parent/lfnA")
        testFileA.addParent("/this/is/a/parent/lfnB")
        testFileA.updateLocations()
                                                        
        testFileB = File(lfn = testFileA["lfn"])
        testFileB.loadData(parentage = 1)
        testFileC = File(id = testFileA["id"])
        testFileC.loadData(parentage = 1)

        assert testFileA == testFileB, \
               "ERROR: File load by LFN didn't work"

        assert testFileA == testFileC, \
               "ERROR: File load by ID didn't work"

        testFileA.delete()
        testFileParentA.delete()
        testFileParentB.delete()
        return    

    def testAddChild(self):
        """
        _testAddChild_

        Add a child to some parent files and make sure that all the parentage
        information is loaded/stored correctly from the database.
        """
        testFileParentA = File(lfn = "/this/is/a/parent/lfnA", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileParentA.addRun(Run( 1, *[45]))
        testFileParentB = File(lfn = "/this/is/a/parent/lfnB", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileParentB.addRun(Run( 1, *[45]))
        testFileParentA.create()
        testFileParentB.create()

        testFileA = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                         checksums = {'cksum':1})
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()

        testFileParentA.addChild("/this/is/a/lfn")
        testFileParentB.addChild("/this/is/a/lfn")

        testFileB = File(id = testFileA["id"])
        testFileB.loadData(parentage = 1)

        goldenFiles = [testFileParentA, testFileParentB]
        for parentFile in testFileB["parents"]:
            assert parentFile in goldenFiles, \
                   "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)

        assert len(goldenFiles) == 0, \
              "ERROR: Some parents are missing"
        return

    def testAddChildTransaction(self):
        """
        _testAddChildTransaction_

        Add a child to some parent files and make sure that all the parentage
        information is loaded/stored correctly from the database.  Rollback the
        addition of one of the childs and then verify that it does in fact only
        have one parent.
        """
        testFileParentA = File(lfn = "/this/is/a/parent/lfnA", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileParentA.addRun(Run( 1, *[45]))
        testFileParentB = File(lfn = "/this/is/a/parent/lfnB", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileParentB.addRun(Run( 1, *[45]))
        testFileParentA.create()
        testFileParentB.create()

        testFileA = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                         checksums = {'cksum': 1})
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()

        testFileParentA.addChild("/this/is/a/lfn")

        myThread = threading.currentThread()
        myThread.transaction.begin()
        
        testFileParentB.addChild("/this/is/a/lfn")

        testFileB = File(id = testFileA["id"])
        testFileB.loadData(parentage = 1)

        goldenFiles = [testFileParentA, testFileParentB]
        for parentFile in testFileB["parents"]:
            assert parentFile in goldenFiles, \
                   "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)

        assert len(goldenFiles) == 0, \
              "ERROR: Some parents are missing"

        myThread.transaction.rollback()
        testFileB.loadData(parentage = 1)

        goldenFiles = [testFileParentA]
        for parentFile in testFileB["parents"]:
            assert parentFile in goldenFiles, \
                   "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)

        assert len(goldenFiles) == 0, \
              "ERROR: Some parents are missing"
        
        return
    
    def testCreateWithLocation(self):
        """
        _testCreateWithLocation_

        Create a file and add a couple locations.  Load the file from the
        database to make sure that the locations were set correctly.
        """
        testFileA = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                        checksums = {'cksum':1}, 
                        locations = set(["se1.fnal.gov", "se1.cern.ch"]))
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()

        
        testFileB = File(id = testFileA["id"])
        testFileB.loadData()

        goldenLocations = ["se1.fnal.gov", "se1.cern.ch"]

        for location in testFileB["locations"]:
            assert location in goldenLocations, \
                   "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
              "ERROR: Some locations are missing"    
        return

    def testSetLocation(self):
        """
        _testSetLocation_

        Create a file and add a couple locations.  Load the file from the
        database to make sure that the locations were set correctly.
        """
        testFileA = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                        checksums = {'cksum':1})
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()

        testFileA.setLocation(["se1.fnal.gov", "se1.cern.ch"])
        testFileA.setLocation(["bunkse1.fnal.gov", "bunkse1.cern.ch"],
                              immediateSave = False)

        testFileB = File(id = testFileA["id"])
        testFileB.loadData()

        goldenLocations = ["se1.fnal.gov", "se1.cern.ch"]

        for location in testFileB["locations"]:
            assert location in goldenLocations, \
                   "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
              "ERROR: Some locations are missing"    
        return

    def testSetLocationTransaction(self):
        """
        _testSetLocationTransaction_

        Create a file at specific locations and commit everything to the
        database.  Reload the file from the database and verify that the
        locations are correct.  Rollback the database transaction and once
        again reload the file.  Verify that the original locations are back.
        """
        testFileA = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                        checksums = {'cksum':1})
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()
        testFileA.setLocation(["se1.fnal.gov"])

        myThread = threading.currentThread()
        myThread.transaction.begin()
        
        testFileA.setLocation(["se1.cern.ch"])
        testFileA.setLocation(["bunkse1.fnal.gov", "bunkse1.cern.ch"],
                              immediateSave = False)

        testFileB = File(id = testFileA["id"])
        testFileB.loadData()

        goldenLocations = ["se1.fnal.gov", "se1.cern.ch"]

        for location in testFileB["locations"]:
            assert location in goldenLocations, \
                   "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
              "ERROR: Some locations are missing"

        myThread.transaction.rollback()
        testFileB.loadData()

        goldenLocations = ["se1.fnal.gov"]

        for location in testFileB["locations"]:
            assert location in goldenLocations, \
                   "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
              "ERROR: Some locations are missing"
        return    

    def testLocationsConstructor(self):
        """
        _testLocationsConstructor_

        Test to make sure that locations passed into the File() constructor
        are loaded from and save to the database correctly.  Also test to make
        sure that the class behaves well when the location is passed in as a
        single string instead of a set.
        """
        testFileA = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                        checksums = {'cksum':1}, locations = set(["se1.fnal.gov"]))
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()

        testFileB = File(lfn = "/this/is/a/lfn2", size = 1024, events = 10,
                        checksums = {'cksum':1}, locations = "se1.fnal.gov")
        testFileB.addRun(Run( 1, *[45]))
        testFileB.create()        

        testFileC = File(id = testFileA["id"])
        testFileC.loadData()

        goldenLocations = ["se1.fnal.gov"]
        for location in testFileC["locations"]:
            assert location in goldenLocations, \
                   "ERROR: Unknown file location"
            goldenLocations.remove(location)
            
        assert len(goldenLocations) == 0, \
              "ERROR: Some locations are missing"

        testFileC = File(id = testFileB["id"])
        testFileC.loadData()

        goldenLocations = ["se1.fnal.gov"]
        for location in testFileC["locations"]:
            assert location in goldenLocations, \
                   "ERROR: Unknown file location"
            goldenLocations.remove(location)
            
        assert len(goldenLocations) == 0, \
              "ERROR: Some locations are missing"        
        return


    def testSetLocationOrder(self):
        """
        _testSetLocationOrder_

        This tests that you can specify a location before creating the file,
        instead of having to do it afterwards.
        """
        myThread = threading.currentThread()

        testFileA = File(lfn = "/this/is/a/lfn", size = 1024, events = 10)
        testFileA.setLocation("se1.cern.ch")
        testFileA.create()

        testFileB = File(lfn = testFileA["lfn"])
        testFileB.load()

        daoFactory = DAOFactory(package = "WMCore.WMBS", logger = logging, dbinterface = myThread.dbi)

        locationFac = daoFactory(classname = "Files.GetLocation")
        location  = locationFac.execute(testFileB['lfn']).pop()

        self.assertEqual(location, 'se1.cern.ch')

        return
        
    def testAddRunSet(self):
        """
        _testAddRunSet_

        Test the ability to add run and lumi information to a file.
        """
        testFile = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                        checksums = {'cksum':1}, locations = "se1.fnal.gov")
        testFile.create()
        runSet = set()
        runSet.add(Run( 1, *[45]))
        runSet.add(Run( 2, *[67, 68]))
        testFile.addRunSet(runSet)
        
        assert (runSet - testFile["runs"]) == set(), \
            "Error: addRunSet is not updating set correctly"

        return
    
    def testGetAncestorLFNs(self):
        """
        _testGenAncestorLFNs_

        Create a series of files that have several generations of parentage
        information.  Verify that the parentage information is reported
        correctly.
        """
        testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se1.fnal.gov")
        testFileA.create()
        
        testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se1.fnal.gov")
        testFileB.create()
        
        testFileC = File(lfn = "/this/is/a/lfnC", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se1.fnal.gov")
        testFileC.create()
        
        testFileD = File(lfn = "/this/is/a/lfnD", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se1.fnal.gov")
        testFileD.create()
        
        testFileE = File(lfn = "/this/is/a/lfnE", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se1.fnal.gov")
        testFileE.create()
        
        testFileE = File(lfn = "/this/is/a/lfnF", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se1.fnal.gov")
        testFileE.create()
        
        testFileA.addParent(lfn = "/this/is/a/lfnB")
        testFileA.addParent(lfn = "/this/is/a/lfnC")
        testFileB.addParent(lfn = "/this/is/a/lfnD")
        testFileC.addParent(lfn = "/this/is/a/lfnD")
        testFileD.addParent(lfn = "/this/is/a/lfnE")
        testFileD.addParent(lfn = "/this/is/a/lfnF")
        
        level1 = ["/this/is/a/lfnB", "/this/is/a/lfnC"]
        level2 = ["/this/is/a/lfnD"]
        level3 = ["/this/is/a/lfnE", "/this/is/a/lfnF"]
        level4 = level5 = []
        
        decs2 = ["/this/is/a/lfnA"]
        
        assert testFileA.getAncestors(level=1, type='lfn') == level1, \
              "ERROR: level 1 test failed"
        assert testFileA.getAncestors(level=2, type='lfn') == level2, \
              "ERROR: level 2 test failed"
        assert testFileA.getAncestors(level=3, type='lfn') == level3, \
              "ERROR: level 3 test failed"
        assert testFileA.getAncestors(level=4, type='lfn') == level4, \
              "ERROR: level 4 test failed"
        assert testFileA.getAncestors(level=5, type='lfn') == level5, \
              "ERROR: level 5 test failed"
        
        assert testFileD.getDescendants(level=1, type='lfn') == level1, \
              "ERROR: level 1 desc test failed"
        assert testFileD.getDescendants(level=2, type='lfn') == decs2, \
              "ERROR: level 2 desc test failed"
        assert testFileD.getDescendants(level=3, type='lfn') == level4, \
              "ERROR: level 3 desc test failed"

        return

    def testGetBulkLocations(self):
        """
        _testGetBulkLocations_

        Checks to see whether the code that we have will enable us to get the locations
        of all files at once
        """
        myThread = threading.currentThread()

        daoFactory = DAOFactory(package = "WMCore.WMBS", logger = logging, dbinterface = myThread.dbi)
        locationAction = daoFactory(classname = "Locations.New")
        locationAction.execute(siteName = "site3", seName = "se2.fnal.gov")
        locationAction.execute(siteName = "site4", seName = "se3.fnal.gov")
        locationAction.execute(siteName = "site5", seName = "se4.fnal.gov")
        locationAction.execute(siteName = "site6", seName = "se5.fnal.gov")
        locationAction.execute(siteName = "site7", seName = "se6.fnal.gov")
        
        testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se1.fnal.gov")
        testFileA.create()
        
        testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se2.fnal.gov")
        testFileB.create()
        
        testFileC = File(lfn = "/this/is/a/lfnC", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se3.fnal.gov")
        testFileC.create()
        
        testFileD = File(lfn = "/this/is/a/lfnD", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se4.fnal.gov")
        testFileD.create()
        
        testFileE = File(lfn = "/this/is/a/lfnE", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se5.fnal.gov")
        testFileE.create()
        
        testFileF = File(lfn = "/this/is/a/lfnF", size = 1024, events = 10,
                        checksums = {'cksum': 1}, locations = "se6.fnal.gov")
        testFileF.create()

        files = [testFileA, testFileB, testFileC, testFileD, testFileE, testFileF]
        

        locationFac = daoFactory(classname = "Files.GetBulkLocation")
        location  = locationFac.execute(files = files)

        for f in files:
            self.assertEqual(location[f.exists()], list(f['locations']))

        return

    def testBulkParentage(self):
        """
        _testBulkParentage_

        Verify that the bulk parentage dao correctly sets file parentage.
        """
        testFileChildA = File(lfn = "/this/is/a/child/lfnA", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileChildB = File(lfn = "/this/is/a/child/lfnB", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileChildA.create()
        testFileChildB.create()

        testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10,
                         checksums = {'cksum':1})
        testFileA.create()
        testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10,
                         checksums = {'cksum':1})
        testFileB.create()
        testFileC = File(lfn = "/this/is/a/lfnC", size = 1024, events = 10,
                         checksums = {'cksum':1})
        testFileC.create()

        parentage = [{"child": testFileChildA["id"], "parent": testFileA["id"]},
                     {"child": testFileChildA["id"], "parent": testFileB["id"]},
                     {"child": testFileChildA["id"], "parent": testFileC["id"]},
                     {"child": testFileChildB["id"], "parent": testFileA["id"]},
                     {"child": testFileChildB["id"], "parent": testFileB["id"]}]

        myThread = threading.currentThread()
        daofactory = DAOFactory(package = "WMCore.WMBS",
                                logger = myThread.logger,
                                dbinterface = myThread.dbi)        
        bulkParentageAction = daofactory(classname = "Files.AddBulkParentage")
        bulkParentageAction.execute(parentage)
        
        testFileD = File(id = testFileChildA["id"])
        testFileD.loadData(parentage = 1)
        testFileE = File(id = testFileChildB["id"])
        testFileE.loadData(parentage = 1)        

        goldenFiles = [testFileA, testFileB, testFileC]
        for parentFile in testFileD["parents"]:
            assert parentFile in goldenFiles, \
                   "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)

        assert len(goldenFiles) == 0, \
              "ERROR: Some parents are missing"

        goldenFiles = [testFileA, testFileB]
        for parentFile in testFileE["parents"]:
            assert parentFile in goldenFiles, \
                   "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)

        assert len(goldenFiles) == 0, \
              "ERROR: Some parents are missing"        
        return

    def testDataStructsFile(self):
        """
        _testDataStructsFile_

        Tests our ability to create a WMBS file from a DataStructs File and vice versa
        """

        myThread = threading.currentThread()
        
        testLFN     = "lfn1"
        testSize    = 1024
        testEvents  = 100
        testCksum   = {"cksum": '1'}
        testParents = set(["lfn2"])
        testRun     = Run( 1, *[45])
        testSE      = "se1.cern.ch"

        parentFile = File(lfn= "lfn2")
        parentFile.create()

        testFile = File()

        inputFile = WMFile(lfn = testLFN, size = testSize, events = testEvents, checksums = testCksum, parents = testParents)
        inputFile.addRun(testRun)
        inputFile.setLocation(se = testSE)

        testFile.loadFromDataStructsFile(file = inputFile)
        testFile.create()
        testFile.save()

        
        loadFile = File(lfn = "lfn1")
        loadFile.loadData(parentage = 1)

        self.assertEqual(loadFile['size'],   testSize)
        self.assertEqual(loadFile['events'], testEvents)
        self.assertEqual(loadFile['checksums'], testCksum)
        self.assertEqual(loadFile['locations'], set([testSE]))
        #self.assertEqual(loadFile['parents'].pop()['lfn'], 'lfn2')

        wmFile = loadFile.returnDataStructsFile()
        self.assertEqual(wmFile == inputFile, True)

        return


    def testParentageByJob(self):
        """
        _testParentageByJob_
        
        Tests the DAO that assigns parentage by Job
        """

        testWorkflow = Workflow(spec = 'hello', owner = "mnorman",
                                name = "wf001", task="basicWorkload/Production")
        testWorkflow.create()
        testFileset = Fileset(name = "TestFileset")
        testFileset.create()
        testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow, type = "Processing", split_algo = "FileBased")
        testSubscription.create()
        testJobGroup = JobGroup(subscription = testSubscription)
        testJobGroup.create()

        testFileParentA = File(lfn = "/this/is/a/parent/lfnA", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileParentA.addRun(Run( 1, *[45]))
        testFileParentB = File(lfn = "/this/is/a/parent/lfnB", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileParentB.addRun(Run( 1, *[45]))
        testFileParentA.create()
        testFileParentB.create()

        testFileA = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                         checksums = {'cksum':1})
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()

        testJobA = Job()
        testJobA.create(group = testJobGroup)
        testJobA.addFile(testFileParentA)
        testJobA.addFile(testFileParentB)
        testJobA.associateFiles()


        parentAction = self.daofactory(classname = "Files.SetParentageByJob")
        parentAction.execute(binds = {'jobid': testJobA.exists(), 'child': testFileA['lfn']})


        testFileB = File(id = testFileA["id"])
        testFileB.loadData(parentage = 1)

        goldenFiles = [testFileParentA, testFileParentB]
        for parentFile in testFileB["parents"]:
            self.assertEqual(parentFile in goldenFiles, True,
                   "ERROR: Unknown parent file")
            goldenFiles.remove(parentFile)

        self.assertEqual(len(goldenFiles), 0,
                         "ERROR: Some parents are missing")


    def testAddChecksumsByLFN(self):
        """
        _testAddChecksumsByLFN_
        
        Tests for adding checksums by DAO by LFN
        """

        testWorkflow = Workflow(spec = 'hello', owner = "mnorman",
                                name = "wf001", task="basicWorkload/Production")
        testWorkflow.create()
        testFileset = Fileset(name = "TestFileset")
        testFileset.create()
        testSubscription = Subscription(fileset = testFileset, workflow = testWorkflow, type = "Processing", split_algo = "FileBased")
        testSubscription.create()
        testJobGroup = JobGroup(subscription = testSubscription)
        testJobGroup.create()

        testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10)
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()
        testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10)
        testFileB.addRun(Run( 1, *[45]))
        testFileB.create()

        testJobA = Job()
        testJobA.create(group = testJobGroup)
        testJobA.associateFiles()

        parentAction = self.daofactory(classname = "Files.AddChecksumByLFN")
        binds = [{'lfn': testFileA['lfn'], 'cktype': 'cksum', 'cksum': 101},
                 {'lfn': testFileA['lfn'], 'cktype': 'adler32', 'cksum': 201},
                 {'lfn': testFileB['lfn'], 'cktype': 'cksum', 'cksum': 101}]
        parentAction.execute(bulkList = binds)

        testFileC = File(id = testFileA["id"])
        testFileC.loadData()
        testFileD = File(id = testFileB["id"])
        testFileD.loadData()

        self.assertEqual(testFileC['checksums'], {'adler32': '201', 'cksum': '101'})
        self.assertEqual(testFileD['checksums'], {'cksum': '101'})

        return


    def testSetLocationByLFN(self):
        """
        _testSetLocationByLFN_

        Create a file and add a couple locations.  Load the file from the
        database to make sure that the locations were set correctly.
        """
        testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10,
                        checksums = {'cksum':1})
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()
        testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10,
                        checksums = {'cksum':1})
        testFileB.addRun(Run( 1, *[45]))
        testFileB.create()

        parentAction = self.daofactory(classname = "Files.SetLocationByLFN")
        binds = [{'lfn': "/this/is/a/lfnA", 'location': 'se1.fnal.gov'},
                 {'lfn': "/this/is/a/lfnB", 'location': 'se1.fnal.gov'}]
        parentAction.execute(lfn = binds)

        testFileC = File(id = testFileA["id"])
        testFileC.loadData()
        testFileD = File(id = testFileB["id"])
        testFileD.loadData()

        self.assertEqual(testFileC['locations'], set(['se1.fnal.gov']))
        self.assertEqual(testFileD['locations'], set(['se1.fnal.gov']))

        
        return

    def testCreateWithParent(self):
        
        """
        Test passing parnents arguments in file creation.
        check if parent file does not exist, it create the file and set the parentage
        """
        
        # create parent file before it got added to child file.
        testFileParentA = File(lfn = "/this/is/a/parent/lfnA", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileParentA.addRun(Run( 1, *[45]))
        testFileParentA.create()
        
        # don't create create parent file before it got added to child file.
        testFileParentB = File(lfn = "/this/is/a/parent/lfnB", size = 1024,
                              events = 20, checksums = {'cksum': 1})
        testFileParentB.addRun(Run( 1, *[45]))
        
        testFileA = File(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                         checksums = {'cksum':1}, 
                         parents = [testFileParentA, testFileParentB])
        testFileA.addRun(Run( 1, *[45]))
        
        testFileA.create()


        testFileB = File(id = testFileA["id"])
        testFileB.loadData(parentage = 1)

        goldenFiles = [testFileParentA, testFileParentB]
        for parentFile in testFileB["parents"]:
            assert parentFile in goldenFiles, \
                   "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)

        assert len(goldenFiles) == 0, \
              "ERROR: Some parents are missing"

    def testAddToFileset(self):
        """
        _AddToFileset_

        Test to see if we can add to a fileset using the DAO
        """
        testFileset = Fileset(name = "inputFileset")
        testFileset.create()

        testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10)
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()
        testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10)
        testFileB.addRun(Run( 1, *[45]))
        testFileB.create()

        addToFileset = self.daofactory(classname = "Files.AddToFileset")
        addToFileset.execute(file = [testFileA['lfn'], testFileB['lfn']],
                             fileset = testFileset.id)

        testFileset2 = Fileset(name = "inputFileset")
        testFileset2.loadData()

        self.assertEqual(len(testFileset2.files), 2)
        for file in testFileset2.files:
            self.assertTrue(file in [testFileA, testFileB])

        # Check that adding twice doesn't crash
        addToFileset.execute(file = [testFileA['lfn'], testFileB['lfn']],
                             fileset = testFileset.id)

    def testAddDupsToFileset(self):
        """
        _AddToDupsFileset_

        Verify the the dups version of the AddToFileset DAO will not add files
        to a fileset if they're already associated to another fileset with the
        same workflow.
        """
        testWorkflowA = Workflow(spec = 'hello', owner = "mnorman",
                                 name = "wf001", task="basicWorkload/Production")
        testWorkflowA.create()
        testWorkflowB = Workflow(spec = 'hello', owner = "mnorman",
                                 name = "wf001", task="basicWorkload/Production2")
        testWorkflowB.create()        

        testFilesetA = Fileset(name = "inputFilesetA")
        testFilesetA.create()
        testFilesetB = Fileset(name = "inputFilesetB")
        testFilesetB.create()        

        testSubscriptionA = Subscription(workflow = testWorkflowA, fileset = testFilesetA)
        testSubscriptionA.create()
        testSubscriptionB = Subscription(workflow = testWorkflowB, fileset = testFilesetB)
        testSubscriptionB.create()        

        testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10)
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()
        testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10)
        testFileB.addRun(Run( 1, *[45]))
        testFileB.create()

        addToFileset = self.daofactory(classname = "Files.AddDupsToFileset")
        addToFileset.execute(file = [testFileA['lfn'], testFileB['lfn']],
                             fileset = testFilesetA.id, workflow = "wf001")

        testFileset2 = Fileset(name = "inputFilesetA")
        testFileset2.loadData()

        self.assertEqual(len(testFileset2.files), 2)
        for file in testFileset2.files:
            self.assertTrue(file in [testFileA, testFileB])

        # Check that adding twice doesn't crash
        addToFileset.execute(file = [testFileA['lfn'], testFileB['lfn']],
                             fileset = testFilesetA.id, workflow = "wf001")

        # Files should not get added to fileset B because fileset A is associated
        # with wf001.
        addToFileset.execute(file = [testFileA['lfn'], testFileB['lfn']],
                             fileset = testFilesetB.id, workflow = "wf001")

        testFileset2 = Fileset(name = "inputFilesetB")
        testFileset2.loadData()

        self.assertEqual(len(testFileset2.files), 0)
        return

    def testAddDupsToFilesetBulk(self):
        """
        _AddToDupsFilesetBulk_

        Same as testAddDupsToFileset() but faster
        """
        testWorkflowA = Workflow(spec = 'hello', owner = "mnorman",
                                 name = "wf001", task="basicWorkload/Production")
        testWorkflowA.create()
        testWorkflowB = Workflow(spec = 'hello', owner = "mnorman",
                                 name = "wf001", task="basicWorkload/Production2")
        testWorkflowB.create()

        testFilesetA = Fileset(name = "inputFilesetA")
        testFilesetA.create()
        testFilesetB = Fileset(name = "inputFilesetB")
        testFilesetB.create()

        testSubscriptionA = Subscription(workflow = testWorkflowA, fileset = testFilesetA)
        testSubscriptionA.create()
        testSubscriptionB = Subscription(workflow = testWorkflowB, fileset = testFilesetB)
        testSubscriptionB.create()

        testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10, locations = ['SiteA'])
        testFileA.addRun(Run( 1, *[45]))
        testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10, locations = ['SiteB'])
        testFileB.addRun(Run( 1, *[45]))

        addFilesToWMBSInBulk(testFilesetA.id, "wf001",
                                     [testFileA, testFileB],
                                     conn = testFileA.getDBConn(),
                                     transaction = testFileA.existingTransaction())

        testFileset2 = Fileset(name = "inputFilesetA")
        testFileset2.loadData()

        self.assertEqual(len(testFileset2.files), 2)
        for file in testFileset2.files:
            self.assertTrue(file in [testFileA, testFileB])

        # Check that adding twice doesn't crash
        addFilesToWMBSInBulk(testFilesetA.id, "wf001",
                                     [testFileA, testFileB],
                                     conn = testFileA.getDBConn(),
                                     transaction = testFileA.existingTransaction())

        # Files should not get added to fileset B because fileset A is associated
        # with wf001.
        addFilesToWMBSInBulk(testFilesetB.id, "wf001",
                                     [testFileA, testFileB],
                                     conn = testFileA.getDBConn(),
                                     transaction = testFileA.existingTransaction())

        testFileset2 = Fileset(name = "inputFilesetB")
        testFileset2.loadData()

        self.assertEqual(len(testFileset2.files), 0)
        return

    def test_SetLocationsForWorkQueue(self):
        """
        _SetLocationsForWorkQueue_

        Test the code that sets locations for the WorkQueue
        This is more complicated then it seems.
        """

        action = self.daofactory(classname = "Files.SetLocationForWorkQueue")

        testFile = File(lfn = "myLFN", size = 1024,
                        events = 10, checksums={'cksum':1111})
        testFile.create()

        tFile1 = File(lfn = "myLFN")
        tFile1.loadData()
        locations = tFile1.getLocations()

        self.assertEqual(locations, [])

        binds = [{'lfn': 'myLFN', 'location': 'se1.cern.ch'}]
        action.execute(lfns = ['myLFN'], locations = binds)

        tFile1.loadData()
        locations = tFile1.getLocations()
        self.assertEqual(locations, ['se1.cern.ch'])

        binds = [{'lfn': 'myLFN', 'location': 'se1.fnal.gov'}]
        action.execute(lfns = ['myLFN'], locations = binds)

        tFile1.loadData()
        locations = tFile1.getLocations()
        self.assertEqual(locations, ['se1.fnal.gov'])
        
        return
示例#28
0
class DashboardInterfaceTest(unittest.TestCase):
    """
    Test for the dashboard interface and its monitoring interaction

    Well, once I've written them it will be
    """
    def setUp(self):
        """
        Basically, do nothing

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()

        self.testDir = self.testInit.generateWorkDir()

        return

    def tearDown(self):
        """
        Clean up the test directory

        """

        self.testInit.delWorkDir()

        return

    def createWorkload(self):
        """
        Create a workload in order to test things

        """
        generator = WMSpecGenerator()
        workload = generator.createReRecoSpec("Tier1ReReco")
        return workload

    def createTestJob(self):
        """
        Create a test job to pass to the DashboardInterface

        """

        job = Job(name="ThisIsASillyName")

        testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10)
        testFileA.addRun(Run(1, *[45]))
        testFileB = File(lfn="/this/is/a/lfnB", size=1024, events=10)
        testFileB.addRun(Run(1, *[46]))

        job.addFile(testFileA)
        job.addFile(testFileB)

        job['id'] = 1

        return job

    def createReport(self, outcome=0):
        """
        Create a test report

        """

        jobReport = Report()
        jobReport.addStep('cmsRun1')
        jobReport.setStepStartTime(stepName='cmsRun1')
        jobReport.setStepStopTime(stepName='cmsRun1')
        if outcome:
            jobReport.addError('cmsRun1', 200, 'FakeError', 'FakeError')

        return jobReport

    def setupJobEnvironment(self, name='test'):
        """
        _setupJobEnvironment_

        Make some sort of environment in which to run tests
        """

        os.environ['WMAGENT_SITE_CONFIG_OVERRIDE'] = os.path.join(
            getTestBase(), "WMCore_t/Storage_t",
            "T1_US_FNAL_SiteLocalConfig.xml")
        return

    def testASuccessfulJobMonitoring(self):
        """
        _testASuccessfulJobMonitoring_

        Check that the data packets make sense when a job completes successfully
        """

        # Get the necessary objects
        name = 'testA'
        job = self.createTestJob()
        workload = self.createWorkload()
        task = workload.getTask(taskName="DataProcessing")
        report = self.createReport()

        # Fill the job environment
        self.setupJobEnvironment(name=name)

        # Instantiate DBInfo
        dbInfo = DashboardInfo(job=job, task=task)
        dbInfo.addDestination('127.0.0.1', 8884)

        # Check jobStart information
        data = dbInfo.jobStart()
        self.assertEqual(data['MessageType'], 'JobStatus')
        self.assertEqual(data['StatusValue'], 'running')
        self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
        self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')

        # Do the first step
        step = task.getStep(stepName="cmsRun1")

        # Do the step start
        data = dbInfo.stepStart(step=step.data)
        self.assertNotEqual(data['jobStart'], None)
        self.assertEqual(data['jobStart']['ExeStart'], step.name())
        self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
        self.assertEqual(data['1_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step=step.data, stepReport=report)
        self.assertEqual(data['1_ExeEnd'], step.name())
        self.assertEqual(data['1_ExeExitCode'], 0)
        self.assertTrue(data['1_ExeWCTime'] >= 0)
        self.assertEqual(data['1_NCores'], 1)
        self.assertEqual(report.retrieveStep("cmsRun1").counter, 1)

        #Do a second step
        step = task.getStep(stepName="cmsRun1")

        #Do the step start (It's not the first step)
        data = dbInfo.stepStart(step=step.data)
        self.assertEqual(data['jobStart'], None)
        self.assertEqual(data['2_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step=step.data, stepReport=report)
        self.assertEqual(data['2_ExeEnd'], step.name())
        self.assertEqual(data['2_ExeExitCode'], 0)
        self.assertTrue(data['2_ExeWCTime'] >= 0)
        self.assertEqual(data['2_NCores'], 1)
        self.assertEqual(report.retrieveStep("cmsRun1").counter, 2)

        # End the job!
        data = dbInfo.jobEnd()
        self.assertEqual(data['ExeEnd'], "cmsRun1")
        self.assertEqual(data['JobExitCode'], 0)
        self.assertEqual(data['WrapperCPUTime'], 0)
        self.assertTrue(data['WrapperWCTime'] >= 0)
        self.assertNotEqual(data['JobExitReason'], "")

        return

    def testMultithreadedApplication(self):
        """
        _testMultithreadedApplication_

        Check that the data packets have NCores and it picks it up successfully from the CMSSW step
        """

        # Get the necessary objects
        name = 'testMT'
        job = self.createTestJob()
        workload = self.createWorkload()
        task = workload.getTask(taskName="DataProcessing")
        report = self.createReport()

        # Fill the job environment
        self.setupJobEnvironment(name=name)

        # Instantiate DBInfo
        dbInfo = DashboardInfo(job=job, task=task)
        dbInfo.addDestination('127.0.0.1', 8884)

        # Modify the first step
        step = task.getStep(stepName="cmsRun1")
        step.getTypeHelper().setNumberOfCores(8)

        # Check jobStart information
        data = dbInfo.jobStart()
        self.assertEqual(data['NCores'], 8)

        # Do the first step
        step = task.getStep(stepName="cmsRun1")

        # Do the step start
        data = dbInfo.stepStart(step=step.data)

        #Do the step end
        data = dbInfo.stepEnd(step=step.data, stepReport=report)
        self.assertEqual(data['1_NCores'], 8)
        self.assertEqual(report.retrieveStep("cmsRun1").counter, 1)

        # End the job and test the final NCores report
        data = dbInfo.jobEnd()
        self.assertEqual(data['NCores'], 8)

        return

    def testAFailedJobMonitoring(self):
        """
        _TestAFailedJobMonitoring_

        Simulate a job that completes but fails, check that the data sent is
        correct
        """

        # Get the necessary objects
        name = 'testB'
        job = self.createTestJob()
        workload = self.createWorkload()
        task = workload.getTask(taskName="DataProcessing")
        report = self.createReport(outcome=1)

        # Fill the job environment
        self.setupJobEnvironment(name=name)

        # Instantiate DBInfo
        dbInfo = DashboardInfo(job=job, task=task)
        dbInfo.addDestination('127.0.0.1', 8884)

        # Check jobStart information
        data = dbInfo.jobStart()
        self.assertEqual(data['MessageType'], 'JobStatus')
        self.assertEqual(data['StatusValue'], 'running')
        self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
        self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')

        # Do the first step
        step = task.getStep(stepName="cmsRun1")

        # Do the step start
        data = dbInfo.stepStart(step=step.data)
        self.assertNotEqual(data['jobStart'], None)
        self.assertEqual(data['jobStart']['ExeStart'], step.name())
        self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
        self.assertEqual(data['1_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step=step.data, stepReport=report)
        self.assertEqual(data['1_ExeEnd'], step.name())
        self.assertNotEqual(data['1_ExeExitCode'], 0)
        self.assertTrue(data['1_ExeWCTime'] >= 0)
        self.assertEqual(report.retrieveStep("cmsRun1").counter, 1)

        # End the job!
        data = dbInfo.jobEnd()
        self.assertEqual(data['ExeEnd'], "cmsRun1")
        self.assertNotEqual(data['JobExitCode'], 0)
        self.assertEqual(data['WrapperCPUTime'], 0)
        self.assertTrue(data['WrapperWCTime'] >= 0)
        self.assertNotEqual(data['JobExitReason'].find('cmsRun1'), -1)

        return

    def testAKilledJobMonitoring(self):
        """
        _TestAKilledJobMonitoring_

        Simulate a job that is killed check that the data sent is
        correct
        """

        # Get the necessary objects
        name = 'testC'
        job = self.createTestJob()
        workload = self.createWorkload()
        task = workload.getTask(taskName="DataProcessing")
        report = self.createReport(outcome=1)

        # Fill the job environment
        self.setupJobEnvironment(name=name)

        # Instantiate DBInfo
        dbInfo = DashboardInfo(job=job, task=task)
        dbInfo.addDestination('127.0.0.1', 8884)

        # Check jobStart information
        data = dbInfo.jobStart()
        self.assertEqual(data['MessageType'], 'JobStatus')
        self.assertEqual(data['StatusValue'], 'running')
        self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
        self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')

        # Do the first step
        step = task.getStep(stepName="cmsRun1")

        # Do the step start
        data = dbInfo.stepStart(step=step.data)
        self.assertNotEqual(data['jobStart'], None)
        self.assertEqual(data['jobStart']['ExeStart'], step.name())
        self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
        self.assertEqual(data['1_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step=step.data, stepReport=report)
        self.assertEqual(data['1_ExeEnd'], step.name())
        self.assertNotEqual(data['1_ExeExitCode'], 0)
        self.assertTrue(data['1_ExeWCTime'] >= 0)

        # Kill the job!
        data = dbInfo.jobKilled()
        self.assertEqual(data['ExeEnd'], "cmsRun1")
        self.assertNotEqual(data['JobExitCode'], 0)
        self.assertEqual(data['WrapperCPUTime'], 0)
        self.assertTrue(data['WrapperWCTime'] >= 0)
        self.assertNotEqual(data['JobExitReason'].find('killed'), -1)

        return

    @attr('integration')
    def testGetDN(self):
        """
        _testGetDN_

        Checks that we can get a DN
        """
        dn = getUserProxyDN()
        if 'X509_USER_PROXY' in os.environ:
            self.assertNotEqual(
                dn, None, 'Error: This should get a DN, if you have set one')
        else:
            self.assertEqual(
                dn, None,
                'Error: There is no proxy in the environment, it should not get one'
            )
示例#29
0
class testJSONRequests(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        tmp = self.testInit.generateWorkDir()
        self.request = Requests.JSONRequests(idict={'req_cache_path': tmp})

    def roundTrip(self, data):
        encoded = self.request.encode(data)
        # print encoded
        # print encoded.__class__.__name__
        decoded = self.request.decode(encoded)
        # print decoded.__class__.__name__
        self.assertEqual(data, decoded)

    def roundTripLax(self, data):
        encoded = self.request.encode(data)
        decoded = self.request.decode(encoded)
        datakeys = data.keys()

        for k in decoded.keys():
            assert k in datakeys
            datakeys.pop(datakeys.index(k))
            # print 'the following keys were dropped\n\t',datakeys

    def testSet1(self):
        self.roundTrip(set([]))

    def testSet2(self):
        self.roundTrip(set([1, 2, 3, 4, Run(1)]))

    def testSet3(self):
        self.roundTrip(set(['a', 'b', 'c', 'd']))

    def testSet4(self):
        self.roundTrip(set([1, 2, 3, 4, 'a', 'b']))

    def testRun1(self):
        self.roundTrip(Run(1))

    def testRun2(self):
        self.roundTrip(Run(1, 1))

    def testRun3(self):
        self.roundTrip(Run(1, 2, 3))

    def testMask1(self):
        self.roundTrip(Mask())

    def testMask2(self):
        mymask = Mask()
        mymask['FirstEvent'] = 9999
        mymask['LastEvent'] = 999
        self.roundTrip(mymask)

    def testMask3(self):
        mymask = Mask()
        mymask['FirstEvent'] = 9999
        mymask['LastEvent'] = 999
        myjob = DataStructsJob()
        myjob["mask"] = mymask
        self.roundTrip(myjob)

    def testMask4(self):
        self.roundTrip({
            'LastRun': None,
            'FirstRun': None,
            'LastEvent': None,
            'FirstEvent': None,
            'LastLumi': None,
            'FirstLumi': None
        })

    def testMask5(self):
        mymask = Mask()
        mymask['FirstEvent'] = 9999
        mymask['LastEvent'] = 999
        myjob = DataStructsJob()
        myjob["mask"] = mymask
        self.roundTripLax(myjob)

    def testMask6(self):
        mymask = Mask()
        myjob = DataStructsJob()
        myjob["mask"] = mymask
        self.roundTripLax(myjob)

    def testSpecialCharacterPasswords(self):
        url = 'http://*****:*****@ssw:rd@localhost:6666'
        req = JSONRequests(url)
        self.assertEqual(req['host'], 'http://localhost:6666')
        self.assertEqual(req.additionalHeaders['Authorization'],
                         'Basic dXNlcm5hbWU6cEBzc3c6cmQ=')
示例#30
0
class ThreadPoolTest(unittest.TestCase):
    """
    _ThreadPool_t_

    Unit tests for threadpool

    """
    _nrOfThreads = 10
    _nrOfPools = 5

    def setUp(self):
        "make a logger instance and create tables"

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema()

    def tearDown(self):
        """
        Deletion of database
        """
        # FIXME: this might not work if your not using socket.

        self.testInit.clearDatabase()

    def testA(self):
        """
        __testSubscribe__

        Test subscription of a component.
        """
        raise nose.SkipTest

        myThread = threading.currentThread()
        # create a 'fake' component that contains a arg dictionary.
        component = Dummy()
        # load default parameters.
        config = self.testInit.getConfiguration()
        # normally assigned by the harness of the test component.
        config.Agent.componentName = "TestComponent"

        component.config = config

        threadPools = []
        for i in range(0, ThreadPoolTest._nrOfPools):
            threadPool = ThreadPool("WMCore.ThreadPool.ThreadSlave", \
                component, 'MyPool_'+str(i), ThreadPoolTest._nrOfThreads)
            threadPools.append(threadPool)

        # this is how you would use the threadpool. The threadpool retrieves
        # events/payloads from the message service. If a thread is available
        # it is dispatched, otherwise it is stored in the trheadpool.
        # make the number of tasks bigger than number of threads to tesT
        # the persistent queue.
        for i in range(0, ThreadPoolTest._nrOfThreads * 10):
            event = 'eventNr_' + str(i)
            payload = 'payloadNr_' + str(i)
            # normally you would have different events per threadpool and
            # even different objects per pool. the payload part will be
            # pickled into the database enabling flexibility in passing
            # information.
            for j in range(0, ThreadPoolTest._nrOfPools):
                threadPools[j].enqueue(event, \
                    {'event' : event, 'payload' : payload})

        # this commit you want to be in the agent harness, so the message is
        # actual removed from the msgService. we can do this as the threadpool
        # acts as a dispatcher and is a shortlived action: dispatch to thread
        # or queue and tell agent harness it is finished.
        finished = False
        timeout = 60  # secs
        currenttime = 0
        while not finished:
            print('waiting for threads to finishs. Work left:')
            for j in range(0, ThreadPoolTest._nrOfPools):
                print('pool_' + str(j) + ':' + str(threadPools[j].callQueue))
            time.sleep(1)
            finished = True
            currenttime += 1
            if (timeout == currenttime):
                raise RuntimeError
            for j in range(0, ThreadPoolTest._nrOfPools):
                if (len(threadPools[j].resultsQueue) <
                        ThreadPoolTest._nrOfThreads * 10):
                    finished = False
                    break
        # check if the tables are really empty and all messages
        # have been processed.
        for j in range(0, ThreadPoolTest._nrOfPools):
            assert len(threadPools[j].resultsQueue) == \
                ThreadPoolTest._nrOfThreads*10
        myThread.transaction.begin()
        for j in range(0, ThreadPoolTest._nrOfPools):
            self.assertEqual(threadPools[j].countMessages(), 0)
        myThread.transaction.commit()
示例#31
0
class WMBSServiceTest(unittest.TestCase):

    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.  Also, create some dummy locations.

        This doesn't start server automatically.
        You need to start server before - make sure change self.server_url,
        if it is not the same as given one - localhost:8080.

        WMCORE/src/python/WMCore/WebTools/Root.py --ini=WMCORE/src/python/WMCore/HTTPFrontEnd/WMBSDefaultConfig.py
        """
        self.server_url = 'http://*****:*****@attr('integration')
    def testAllMethods(self):
        pass

    @attr('integration')
    def testJobs(self):
        print "\nTesting jobs service: Should return all the job id and state of jobs"
        print self.wmbsServiceSetup('jobs')

    @attr('integration')
    def testJobCount(self):
        print "\nTesting job count service: Should return the job count by and state of jobs"

        print self.wmbsServiceSetup('jobcount')

    @attr('integration')
    def testJobsBySubs(self):
        print "\nTesting jobsbysubs service: Should return the jobs by given fileset and workflow and specified time"
        param = {"fileset_name": 'TestFileset', 'workflow_name':'wf001', 'state_time': 0}
        print self.wmbsServiceSetup('jobsbysubs', param)

    @attr('integration')
    def testJobCountBySubsAndRun(self):
        print "\nTesting jobcountbysubs service: Should return the job count by given subscription and run"
        param = {"fileset_name": 'TestFileset', 'workflow_name':'wf001', 'run':1 }
        print self.wmbsServiceSetup('jobcountbysubs', param)
示例#32
0
class Scram_t(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testDir = self.testInit.generateWorkDir()
        self.oldCwd = os.getcwd()

    def tearDown(self):
        self.testInit.delWorkDir()

    def testA(self):
        """
        instantiate a Scram instance in test mode.
        """
        try:
            Scram(
                initialise="/bin/date",
                architecture="slc5_amd64_gcc454",
                version="CMSSW_X_Y_Z",
                test=True
            )
        except Exception as ex:
            msg = "Failed to instantiate Scram in test mode:\n %s " % str(ex)
            self.fail(msg)

    def testB(self):
        """
        instantiante a Scram instance in non-test mode
        limited what we can test here since we dont have scram etc in unittest env
        """
        try:
            Scram(
                initialise="/bin/date",
                architecture="slc5_amd64_gcc454",
                version="CMSSW_X_Y_Z"
            )
        except Exception as ex:
            msg = "Failed to instantiate Scram:\n %s " % str(ex)
            self.fail(msg)

    def testC(self):
        """
        test all method calls in test mode

        """
        s = Scram(
            initialise="/bin/date",
            architecture="slc5_amd64_gcc454",
            version="CMSSW_X_Y_Z",
            directory=self.testDir,
            test=True

        )

        try:
            status = s.project()
        except Exception as ex:
            msg = "Error running Scram.project:\n %s" % str(ex)
            self.fail(msg)

        self.assertEqual(status, 0)
        self.assertTrue(os.path.exists(s.projectArea))
        self.assertTrue("project" in s.lastExecuted)
        self.assertTrue("CMSSW_X_Y_Z" in s.lastExecuted)

        try:
            status = s.runtime()
        except Exception as ex:
            msg = "Error running Scram.runtime:\n %s" % str(ex)
            self.fail(msg)

        self.assertEqual(status, 0)
        self.assertTrue("ru -sh" in s.lastExecuted)
        self.assertTrue("TEST_MODE" in s.runtimeEnv)

        comm = "echo \"Hello World\""
        try:
            status = s(comm)
        except Exception as ex:
            msg = "Failed to call Scram object:\n %s" % str(ex)

        self.assertEqual(status, 0)
        self.assertEqual(s.lastExecuted, comm)

    def testArchMap(self):
        self.assertItemsEqual(OS_TO_ARCH['rhel6'], ['slc5', 'slc6'])
        self.assertItemsEqual(OS_TO_ARCH['rhel7'], ['slc7'])
        self.assertItemsEqual(ARCH_TO_OS['slc6'], ['rhel6'])
        self.assertItemsEqual(ARCH_TO_OS['slc7'], ['rhel7'])

    def testScramArchParsing(self):
        """
        Test the various modes of parsing for the scram arch
        """
        try:
            os.chdir(self.testDir)
            with tempfile.NamedTemporaryFile() as tf:
                tf.write('GLIDEIN_REQUIRED_OS = "rhel6" \n')
                tf.write('Memory = 2048\n')
                tf.flush()
                with tmpEnv(_CONDOR_MACHINE_AD=tf.name):
                    self.assertEqual(getSingleScramArch('slc6_blah_blah'), 'slc6_blah_blah')
                    self.assertEqual(getSingleScramArch('slc5_blah_blah'), 'slc5_blah_blah')
                    self.assertEqual(getSingleScramArch(['slc6_blah_blah', 'slc7_blah_blah']),
                                      'slc6_blah_blah')
                    self.assertEqual(getSingleScramArch(['slc6_blah_blah', 'slc5_blah_blah']),
                                      'slc6_blah_blah')
                    self.assertEqual(getSingleScramArch(['slc7_blah_blah', 'slc8_blah_blah']), None)
            with tempfile.NamedTemporaryFile() as tf:
                tf.write('GLIDEIN_REQUIRED_OS = "rhel7" \n')
                tf.write('Memory = 2048\n')
                tf.flush()
                with tmpEnv(_CONDOR_MACHINE_AD=tf.name):
                    self.assertEqual(getSingleScramArch('slc6_blah_blah'), 'slc6_blah_blah')
                    self.assertEqual(getSingleScramArch('slc7_blah_blah'), 'slc7_blah_blah')
                    self.assertEqual(getSingleScramArch(['slc6_blah_blah', 'slc7_blah_blah']),
                                      'slc7_blah_blah')
                    self.assertEqual(getSingleScramArch(['slc6_blah_blah', 'slc5_blah_blah']), None)
                    self.assertEqual(getSingleScramArch(['slc7_blah_blah', 'slc8_blah_blah']),
                                      'slc7_blah_blah')
        except Exception:
            raise
        finally:
            os.chdir(self.oldCwd)
        return
示例#33
0
class SetupCMSSWPsetTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testDir = self.testInit.generateWorkDir()
        sys.path.insert(
            0,
            os.path.join(WMCore.WMBase.getTestBase(),
                         "WMCore_t/WMRuntime_t/Scripts_t"))

    def tearDown(self):
        sys.path.remove(
            os.path.join(WMCore.WMBase.getTestBase(),
                         "WMCore_t/WMRuntime_t/Scripts_t"))
        del sys.modules["WMTaskSpace"]
        self.testInit.delWorkDir()
        os.unsetenv("WMAGENT_SITE_CONFIG_OVERRIDE")

    def createTestStep(self):
        """
        _createTestStep_

        Create a test step that can be passed to the setup script.

        """
        newStep = WMStep("cmsRun1")
        newStepHelper = CMSSWStepHelper(newStep)
        newStepHelper.setStepType("CMSSW")
        newStepHelper.setGlobalTag("SomeGlobalTag")
        stepTemplate = StepFactory.getStepTemplate("CMSSW")
        stepTemplate(newStep)
        newStep.application.command.configuration = "PSet.py"
        return newStepHelper

    def createTestJob(self):
        """
        _createTestJob_

        Create a test job that has parents for each input file.

        """
        newJob = Job(name="TestJob")
        newJob.addFile(
            File(lfn="/some/file/one",
                 parents=set([File(lfn="/some/parent/one")])))
        newJob.addFile(
            File(lfn="/some/file/two",
                 parents=set([File(lfn="/some/parent/two")])))
        return newJob

    def loadProcessFromPSet(self):
        """
        _loadProcessFromPSet_

        This requires changing the working directory,
        do so in a safe manner to encapsulate the change to this method only
        """

        currentPath = os.getcwd()
        loadedProcess = None
        try:
            if not os.path.isdir(self.testDir):
                raise
            os.chdir(self.testDir)
            testFile = "PSet.py"
            pset = imp.load_source('process', testFile)
            loadedProcess = pset.process
        except Exception, ex:
            self.fail(
                "An exception was caught while trying to load the PSet, %s" %
                str(ex))
        finally:
示例#34
0
class RepackMergeTest(unittest.TestCase):
    """
    _RepackMergeTest_
    Test for RepackMerge job splitter
    """

    def setUp(self):
        """
        _setUp_
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer", "T0.WMBS"])

        self.splitterFactory = SplitterFactory(package = "T0.JobSplitting")

        myThread = threading.currentThread()

        daoFactory = DAOFactory(package = "T0.WMBS",
                                logger = logging,
                                dbinterface = myThread.dbi)

        wmbsDaoFactory = DAOFactory(package = "WMCore.WMBS",
                                    logger = logging,
                                    dbinterface = myThread.dbi)

        myThread.dbi.processData("""INSERT INTO wmbs_location
                                    (id, site_name, state, state_time)
                                    VALUES (1, 'SomeSite', 1, 1)
                                    """, transaction = False)
        myThread.dbi.processData("""INSERT INTO wmbs_pnns
                                    (id, pnn)
                                    VALUES (2, 'SomePNN')
                                    """, transaction = False)
        
        myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
                                    (location, pnn)
                                    VALUES (1, 2)
                                    """, transaction = False)


        insertRunDAO = daoFactory(classname = "RunConfig.InsertRun")
        insertRunDAO.execute(binds = { 'RUN' : 1,
                                       'HLTKEY' : "someHLTKey" },
                             transaction = False)

        insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection")
        insertLumiDAO.execute(binds = { 'RUN' : 1,
                                        'LUMI' : 1 },
                              transaction = False)
        insertLumiDAO.execute(binds = { 'RUN' : 1,
                                        'LUMI' : 2 },
                              transaction = False)
        insertLumiDAO.execute(binds = { 'RUN' : 1,
                                        'LUMI' : 3 },
                              transaction = False)
        insertLumiDAO.execute(binds = { 'RUN' : 1,
                                        'LUMI' : 4 },
                              transaction = False)
        insertLumiDAO.execute(binds = { 'RUN' : 1,
                                        'LUMI' : 5 },
                              transaction = False)

        insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream")
        insertStreamDAO.execute(binds = { 'STREAM' : "A" },
                                transaction = False)

        insertCMSSVersionDAO = daoFactory(classname = "RunConfig.InsertCMSSWVersion")
        insertCMSSVersionDAO.execute(binds = { 'VERSION' : "CMSSW_4_2_7" },
                                     transaction = False)

        insertStreamCMSSWVersionDAO = daoFactory(classname = "RunConfig.InsertStreamCMSSWVersion")
        insertStreamCMSSWVersionDAO.execute(binds = { 'RUN' : 1,
                                                      'STREAM' : 'A',
                                                      'VERSION' : "CMSSW_4_2_7" },
                                            transaction = False)

        insertStreamerDAO = daoFactory(classname = "RunConfig.InsertStreamer")
        insertStreamerDAO.execute(streamerPNN = "SomePNN",
                                  binds = { 'RUN' : 1,
                                            'P5_ID' : 1,
                                            'LUMI' : 4,
                                            'STREAM' : "A",
                                            'LFN' : "/testLFN/A",
                                            'FILESIZE' : 100,
                                            'EVENTS' : 100,
                                            'TIME' : int(time.time()) },
                                  transaction = False)

        insertStreamFilesetDAO = daoFactory(classname = "RunConfig.InsertStreamFileset")
        insertStreamFilesetDAO.execute(1, "A", "TestFileset1")

        self.fileset1 = Fileset(name = "TestFileset1")
        self.fileset2 = Fileset(name = "TestFileset2")
        self.fileset1.load()
        self.fileset2.create()

        workflow1 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow1", task="Test")
        workflow2 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow2", task="Test")
        workflow1.create()
        workflow2.create()

        self.subscription1  = Subscription(fileset = self.fileset1,
                                           workflow = workflow1,
                                           split_algo = "Repack",
                                           type = "Repack")
        self.subscription2  = Subscription(fileset = self.fileset2,
                                           workflow = workflow2,
                                           split_algo = "RepackMerge",
                                           type = "RepackMerge")
        self.subscription1.create()
        self.subscription2.create()

        myThread.dbi.processData("""INSERT INTO wmbs_workflow_output
                                    (WORKFLOW_ID, OUTPUT_IDENTIFIER, OUTPUT_FILESET)
                                    VALUES (%d, 'SOMEOUTPUT', %d)
                                    """ % (workflow1.id, self.fileset2.id),
                                 transaction = False)

        # keep for later
        self.insertSplitLumisDAO = daoFactory(classname = "JobSplitting.InsertSplitLumis")
        self.insertClosedLumiDAO = daoFactory(classname = "RunLumiCloseout.InsertClosedLumi")
        self.feedStreamersDAO = daoFactory(classname = "Tier0Feeder.FeedStreamers")                                                      
        self.acquireFilesDAO = wmbsDaoFactory(classname = "Subscriptions.AcquireFiles")
        self.completeFilesDAO = wmbsDaoFactory(classname = "Subscriptions.CompleteFiles")
        self.currentTime = int(time.time())

        # default split parameters
        self.splitArgs = {}
        self.splitArgs['minInputSize'] = 2.1 * 1024 * 1024 * 1024
        self.splitArgs['maxInputSize'] = 4.0 * 1024 * 1024 * 1024
        self.splitArgs['maxInputEvents'] = 100000000
        self.splitArgs['maxInputFiles'] = 1000
        self.splitArgs['maxEdmSize'] = 20 * 1024 * 1024 * 1024
        self.splitArgs['maxOverSize'] = 10 * 1024 * 1024 * 1024
        self.SplitArgs['maxLatency'] = 50000

        return

    def tearDown(self):
        """
        _tearDown_
        """
        self.testInit.clearDatabase()

        return

    def deleteSplitLumis(self):
        """
        _deleteSplitLumis_
        """
        myThread = threading.currentThread()

        myThread.dbi.processData("""DELETE FROM lumi_section_split_active
                                    """,
                                 transaction = False)

        return

    def test00(self):
        """
        _test00_
        Test that the job name prefix feature works
        Test max edm size threshold for single lumi
        small lumi, followed by over-large lumi
        expect 1 job for small lumi and 4 jobs for over-large
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2 * lumi):
                newFile = File(makeUUID(), size = 1000 * lumi * lumi, events = 100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave = False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription2)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxEdmSize'] = 13000
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 3,
                         "ERROR: JobFactory didn't create three jobs")

        job = jobGroups[0].jobs[0]
        self.assertTrue(job['name'].startswith("RepackMerge-"),
                        "ERROR: Job has wrong name")

        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        job = jobGroups[0].jobs[1]
        self.assertEqual(len(job.getFiles()), 3,
                         "ERROR: Job does not process 3 files")

        job = jobGroups[0].jobs[2]
        self.assertEqual(len(job.getFiles()), 1,
                         "ERROR: Job does not process 1 file")

        return

    def test01(self):
        """
        _test01_
        Test max size threshold for single lumi
        small lumi, followed by large lumi
        expect 1 job for small lumi and 1 job for large
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2):
                newFile = File(makeUUID(), size = 1000 * lumi, events = 100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave = False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription2)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputSize'] = 3000
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        job = jobGroups[0].jobs[1]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        return

    def test02(self):
        """
        _test02_
        Test max event threshold for single lumi
        small lumi, followed by large lumi
        expect 1 job for small lumi and 1 job for large
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2):
                newFile = File(makeUUID(), size = 1000, events = 100 * lumi)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave = False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription2)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputEvents'] = 300
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        job = jobGroups[0].jobs[1]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        return

    def test03(self):
        """
        _test03_
        Test max input files threshold for single lumi
        small lumi, followed by large lumi
        expect 1 job for small lumi and 1 job for large
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(lumi * 2):
                newFile = File(makeUUID(), size = 1000, events = 100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave = False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription2)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputFiles'] = 3
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        job = jobGroups[0].jobs[1]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        return

    def test04(self):
        """
        _test04_
        Test max size threshold for multi lumi
        3 same size lumis
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2, 3]:
            for i in range(2):
                newFile = File(makeUUID(), size = 1000, events = 100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave = False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription2)

        mySplitArgs['minInputSize'] = 3000
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputSize'] = 5000
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.fileset2.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        return

    def test05(self):
        """
        _test05_
        Test max event threshold for multi lumi
        3 same size lumis
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2, 3]:
            for i in range(2):
                newFile = File(makeUUID(), size = 1000, events = 100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave = False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription2)

        mySplitArgs['minInputSize'] = 3000
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputEvents'] = 500
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.fileset2.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        return

    def test06(self):
        """
        _test06_
        Test max input files threshold for multi lumi
        3 same size lumis
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2, 3]:
            for i in range(2):
                newFile = File(makeUUID(), size = 1000, events = 100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave = False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription2)

        mySplitArgs['minInputSize'] = 3000
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputFiles'] = 5
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.fileset2.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        return

    def test07(self):
        """
        _test07_
        Test over merge
        one small lumi, one large lumi (small below min size,
        large below max size, but both together above max size)
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2):
                newFile = File(makeUUID(), size = 1000 * lumi * lumi, events = 100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave = False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription2)

        mySplitArgs['minInputSize'] = 3000
        mySplitArgs['maxInputSize'] = 9000
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        return

    def test08(self):
        """
        _test08_
        Test under merge (over merge size threshold)
        one small lumi, one large lumi (small below min size,
        large below max size, but both together above max size)
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2):
                newFile = File(makeUUID(), size = 1000 * lumi * lumi, events = 100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave = False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription2)

        mySplitArgs['minInputSize'] = 3000
        mySplitArgs['maxInputSize'] = 9000
        mySplitArgs['maxOverSize'] = 9500
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        self.fileset2.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        return

    def test09(self):
        """
        _test09_
        Test under merge (over merge event threshold)
        one small lumi, one large lumi (small below min size,
        large below max size, but both together above max size)
        
        It was changed due to maxinputevents not being used anymore.
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2):
                newFile = File(makeUUID(), size = 1000 * lumi * lumi, events = 100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave = False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription2)

        mySplitArgs['minInputSize'] = 1500
        mySplitArgs['maxInputSize'] = 9000
        mySplitArgs['maxOverSize'] = 9500
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        self.fileset2.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        return

    def test10(self):
        """
        _test10_
        Test merging of multiple lumis with holes in the lumi sequence
        Hole is due to no streamer files for the lumi
        Multi lumi input
        
        It only works with a single hole, as it creates a merged file even with it being of a smaller size than the mininputsize.
        
        It was changed due to the maxinputevents not being used anymore
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2, 4]:
            for i in range(2):
                newFile = File(makeUUID(), size = 1000, events = 100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave = False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription2)

        mySplitArgs['minInputSize'] = 100000
        mySplitArgs['maxInputSize'] = 200000
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        self.insertClosedLumiDAO.execute(binds = { 'RUN' : 1,
                                                   'LUMI' : 3,
                                                   'STREAM' : "A",
                                                   'FILECOUNT' : 0,
                                                   'INSERT_TIME' : self.currentTime,
                                                   'CLOSE_TIME' : self.currentTime },
                                         transaction = False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        return
示例#35
0
class DBSUploadTest(unittest.TestCase):
    """
    TestCase for DBSUpload module

    """
    def setUp(self):
        """
        _setUp_

        setUp function for unittest

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMComponent.DBS3Buffer"],
                                useDefault=False)
        self.testDir = self.testInit.generateWorkDir(deleteOnDestruction=False)
        self.configFile = EmulatorSetup.setupWMAgentConfig()

        myThread = threading.currentThread()
        self.bufferFactory = DAOFactory(
            package="WMComponent.DBSBuffer.Database",
            logger=myThread.logger,
            dbinterface=myThread.dbi)

        self.buffer3Factory = DAOFactory(package="WMComponent.DBS3Buffer",
                                         logger=myThread.logger,
                                         dbinterface=myThread.dbi)

        locationAction = self.bufferFactory(
            classname="DBSBufferFiles.AddLocation")
        locationAction.execute(siteName="se1.cern.ch")
        locationAction.execute(siteName="se1.fnal.gov")
        locationAction.execute(siteName="malpaquet")
        self.dbsUrl = "https://*****:*****@attr("integration")
    def testBasicUpload(self):
        """
        _testBasicUpload_

        Verify that we can successfully upload to DBS3.  Also verify that the
        uploader correctly handles files parentage when uploading.
        """
        self.dbsApi = DbsApi(url=self.dbsUrl)
        config = self.getConfig()
        dbsUploader = DBSUploadPoller(config=config)

        # First test verifies that uploader will poll and then not do anything
        # as the database is empty.
        dbsUploader.algorithm()

        acqEra = "Summer%s" % (int(time.time()))
        parentFiles = self.createParentFiles(acqEra)

        # The algorithm needs to be run twice.  On the first iteration it will
        # create all the blocks and upload one.  On the second iteration it will
        # timeout and upload the second block.
        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        # Verify the files made it into DBS3.
        self.verifyData(parentFiles[0]["datasetPath"], parentFiles)

        # Inject some more parent files and some child files into DBSBuffer.
        # Run the uploader twice, only the parent files should be added to DBS3.
        (moreParentFiles, childFiles) = \
                          self.createFilesWithChildren(parentFiles, acqEra)
        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(parentFiles[0]["datasetPath"],
                        parentFiles + moreParentFiles)

        # Run the uploader another two times to upload the child files.  Verify
        # that the child files were uploaded.
        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(childFiles[0]["datasetPath"], childFiles)
        return

    @attr("integration")
    def testDualUpload(self):
        """
        _testDualUpload_

        Verify that the dual upload mode works correctly.
        """
        self.dbsApi = DbsApi(url=self.dbsUrl)
        dbsUploader = DBSUploadPoller(config=config)
        dbsUtil = DBSBufferUtil()

        # First test verifies that uploader will poll and then not do anything
        # as the database is empty.
        dbsUploader.algorithm()

        acqEra = "Summer%s" % (int(time.time()))
        parentFiles = self.createParentFiles(acqEra)
        (moreParentFiles, childFiles) = \
                          self.createFilesWithChildren(parentFiles, acqEra)

        allFiles = parentFiles + moreParentFiles
        allBlocks = []
        for i in range(4):
            DBSBufferDataset(parentFiles[0]["datasetPath"]).create()
            blockName = parentFiles[0]["datasetPath"] + "#" + makeUUID()
            dbsBlock = DBSBlock(blockName,
                                location="malpaquet",
                                das=None,
                                workflow=None)
            dbsBlock.status = "Open"
            dbsBlock.setDataset(parentFiles[0]["datasetPath"], 'data', 'VALID')
            dbsUtil.createBlocks([dbsBlock])
            for file in allFiles[i * 5:(i * 5) + 5]:
                dbsBlock.addFile(file, 'data', 'VALID')
                dbsUtil.setBlockFiles({
                    "block": blockName,
                    "filelfn": file["lfn"]
                })
                if i < 2:
                    dbsBlock.status = "InDBS"
                dbsUtil.updateBlocks([dbsBlock])
            dbsUtil.updateFileStatus([dbsBlock], "InDBS")
            allBlocks.append(dbsBlock)

        DBSBufferDataset(childFiles[0]["datasetPath"]).create()
        blockName = childFiles[0]["datasetPath"] + "#" + makeUUID()
        dbsBlock = DBSBlock(blockName,
                            location="malpaquet",
                            das=None,
                            workflow=None)
        dbsBlock.status = "InDBS"
        dbsBlock.setDataset(childFiles[0]["datasetPath"], 'data', 'VALID')
        dbsUtil.createBlocks([dbsBlock])
        for file in childFiles:
            dbsBlock.addFile(file, 'data', 'VALID')
            dbsUtil.setBlockFiles({"block": blockName, "filelfn": file["lfn"]})

        dbsUtil.updateFileStatus([dbsBlock], "InDBS")

        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(parentFiles[0]["datasetPath"], parentFiles)

        # Change the status of the rest of the parent blocks so we can upload
        # them and the children.
        for dbsBlock in allBlocks:
            dbsBlock.status = "InDBS"
            dbsUtil.updateBlocks([dbsBlock])

        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(parentFiles[0]["datasetPath"],
                        parentFiles + moreParentFiles)

        # Run the uploader one more time to upload the children.
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(childFiles[0]["datasetPath"], childFiles)
        return

    def testCloseSettingsPerWorkflow(self):
        """
        _testCloseSettingsPerWorkflow_

        Test the block closing mechanics in the DBS3 uploader,
        this uses a fake dbs api to avoid reliance on external services.
        """
        # Signal trapExit that we are a friend
        os.environ["DONT_TRAP_EXIT"] = "True"
        try:
            # Monkey patch the imports of DbsApi
            from WMComponent.DBS3Buffer import DBSUploadPoller as MockDBSUploadPoller
            MockDBSUploadPoller.DbsApi = MockDbsApi

            # Set the poller and the dbsUtil for verification
            myThread = threading.currentThread()
            (_, dbsFilePath) = mkstemp(dir=self.testDir)
            self.dbsUrl = dbsFilePath
            config = self.getConfig()
            dbsUploader = MockDBSUploadPoller.DBSUploadPoller(config=config)
            dbsUtil = DBSBufferUtil()

            # First test is event based limits and timeout with no new files.
            # Set the files and workflow
            acqEra = "TropicalSeason%s" % (int(time.time()))
            workflowName = 'TestWorkload%s' % (int(time.time()))
            taskPath = '/%s/TestProcessing' % workflowName
            self.injectWorkflow(workflowName,
                                taskPath,
                                MaxWaitTime=2,
                                MaxFiles=100,
                                MaxEvents=150)
            self.createParentFiles(acqEra,
                                   nFiles=20,
                                   workflowName=workflowName,
                                   taskPath=taskPath)

            # The algorithm needs to be run twice.  On the first iteration it will
            # create all the blocks and upload one with less than 150 events.
            # On the second iteration the second block is uploaded.
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 1)
            globalFiles = myThread.dbi.processData(
                "SELECT id FROM dbsbuffer_file WHERE status = 'InDBS'"
            )[0].fetchall()
            notUploadedFiles = myThread.dbi.processData(
                "SELECT id FROM dbsbuffer_file WHERE status = 'NOTUPLOADED'"
            )[0].fetchall()
            self.assertEqual(len(globalFiles), 14)
            self.assertEqual(len(notUploadedFiles), 6)
            # Check the fake DBS for data
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 2)
            for block in fakeDBSInfo:
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['file_count'], 7)
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)
            time.sleep(3)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 0)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 3)
            for block in fakeDBSInfo:
                if block['block']['file_count'] != 6:
                    self.assertEqual(block['block']['file_count'], 7)
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)

            # Now check the limit by size and timeout with new files
            acqEra = "TropicalSeason%s" % (int(time.time()))
            workflowName = 'TestWorkload%s' % (int(time.time()))
            taskPath = '/%s/TestProcessing' % workflowName
            self.injectWorkflow(workflowName,
                                taskPath,
                                MaxWaitTime=2,
                                MaxFiles=5,
                                MaxEvents=200000000)
            self.createParentFiles(acqEra,
                                   nFiles=16,
                                   workflowName=workflowName,
                                   taskPath=taskPath)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 1)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 6)
            for block in fakeDBSInfo:
                if acqEra in block['block']['block_name']:
                    self.assertEqual(block['block']['file_count'], 5)
                self.assertTrue('block_events' not in block['block'])
                self.assertTrue('close_settings' not in block)
                self.assertEqual(block['block']['open_for_writing'], 0)

            # Put more files, they will go into the same block and then it will be closed
            # after timeout
            time.sleep(3)
            self.createParentFiles(acqEra,
                                   nFiles=3,
                                   workflowName=workflowName,
                                   taskPath=taskPath)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 0)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 7)
            for block in fakeDBSInfo:
                if acqEra in block['block']['block_name']:
                    if block['block']['file_count'] < 5:
                        self.assertEqual(block['block']['file_count'], 4)
                    else:
                        self.assertEqual(block['block']['file_count'], 5)
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)

            # Finally test size limits
            acqEra = "TropicalSeason%s" % (int(time.time()))
            workflowName = 'TestWorkload%s' % (int(time.time()))
            taskPath = '/%s/TestProcessing' % workflowName
            self.injectWorkflow(workflowName,
                                taskPath,
                                MaxWaitTime=1,
                                MaxFiles=500,
                                MaxEvents=200000000,
                                MaxSize=2048)
            self.createParentFiles(acqEra,
                                   nFiles=7,
                                   workflowName=workflowName,
                                   taskPath=taskPath)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            time.sleep(2)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()

            self.assertEqual(len(openBlocks), 0)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 11)
            for block in fakeDBSInfo:
                if acqEra in block['block']['block_name']:
                    if block['block']['file_count'] != 1:
                        self.assertEqual(block['block']['block_size'], 2048)
                        self.assertEqual(block['block']['file_count'], 2)
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)
        except:
            self.fail("We failed at some point in the test")
        finally:
            # We don't trust anyone else with _exit
            del os.environ["DONT_TRAP_EXIT"]
        return
示例#36
0
class StoreResultsTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        Initialize the database.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)
        self.testDir = self.testInit.generateWorkDir()
        return

    def tearDown(self):
        """
        _tearDown_

        Clear out the database.
        """
        self.testInit.clearDatabase()
        self.testInit.delWorkDir()
        return

    def testStoreResults(self):
        """
        _testStoreResults_

        Create a StoreResults workflow and verify it installs into WMBS
        correctly.
        """
        arguments = StoreResultsWorkloadFactory.getTestArguments()
        arguments.update({'CmsPath': "/uscmst1/prod/sw/cms"})

        factory = StoreResultsWorkloadFactory()
        testWorkload = factory.factoryWorkloadConstruction(
            "TestWorkload", arguments)

        testWMBSHelper = WMBSHelper(testWorkload,
                                    "StoreResults",
                                    "SomeBlock",
                                    cachepath=self.testDir)
        testWMBSHelper.createTopLevelFileset()
        testWMBSHelper._createSubscriptionsInWMBS(
            testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset)

        testWorkflow = Workflow(name="TestWorkload",
                                task="/TestWorkload/StoreResults")
        testWorkflow.load()

        self.assertEqual(len(testWorkflow.outputMap.keys()), 2,
                         "Error: Wrong number of WF outputs.")

        goldenOutputMods = ["Merged"]
        for goldenOutputMod in goldenOutputMods:
            mergedOutput = testWorkflow.outputMap[goldenOutputMod][0][
                "merged_output_fileset"]
            unmergedOutput = testWorkflow.outputMap[goldenOutputMod][0][
                "output_fileset"]

            mergedOutput.loadData()
            unmergedOutput.loadData()

            self.assertEqual(
                mergedOutput.name,
                "/TestWorkload/StoreResults/merged-%s" % goldenOutputMod,
                "Error: Merged output fileset is wrong: %s" %
                mergedOutput.name)
            self.assertEqual(
                unmergedOutput.name,
                "/TestWorkload/StoreResults/merged-%s" % goldenOutputMod,
                "Error: Unmerged output fileset is wrong: %s." %
                unmergedOutput.name)

        logArchOutput = testWorkflow.outputMap["logArchive"][0][
            "merged_output_fileset"]
        unmergedLogArchOutput = testWorkflow.outputMap["logArchive"][0][
            "output_fileset"]
        logArchOutput.loadData()
        unmergedLogArchOutput.loadData()

        self.assertEqual(logArchOutput.name,
                         "/TestWorkload/StoreResults/merged-logArchive",
                         "Error: LogArchive output fileset is wrong.")
        self.assertEqual(unmergedLogArchOutput.name,
                         "/TestWorkload/StoreResults/merged-logArchive",
                         "Error: LogArchive output fileset is wrong.")

        topLevelFileset = Fileset(name="TestWorkload-StoreResults-SomeBlock")
        topLevelFileset.loadData()

        procSubscription = Subscription(fileset=topLevelFileset,
                                        workflow=testWorkflow)
        procSubscription.loadData()

        self.assertEqual(procSubscription["type"], "Merge",
                         "Error: Wrong subscription type.")
        self.assertEqual(procSubscription["split_algo"],
                         "ParentlessMergeBySize", "Error: Wrong split algo.")

        return
示例#37
0
class SetupCMSSWPsetTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testDir = self.testInit.generateWorkDir()
        sys.path.insert(0, os.path.join(WMCore.WMBase.getTestBase(),
                                        "WMCore_t/WMRuntime_t/Scripts_t"))

    def tearDown(self):
        sys.path.remove(os.path.join(WMCore.WMBase.getTestBase(), "WMCore_t/WMRuntime_t/Scripts_t"))
        if 'WMTaskSpace' in sys.modules:
            del sys.modules["WMTaskSpace"]
        self.testInit.delWorkDir()
        os.unsetenv("WMAGENT_SITE_CONFIG_OVERRIDE")

    def createTestStep(self):
        """
        _createTestStep_

        Create a test step that can be passed to the setup script.

        """
        newStep = WMStep("cmsRun1")
        newStepHelper = CMSSWStepHelper(newStep)
        newStepHelper.setStepType("CMSSW")
        newStepHelper.setGlobalTag("SomeGlobalTag")
        stepTemplate = StepFactory.getStepTemplate("CMSSW")
        stepTemplate(newStep)
        newStep.application.command.configuration = "PSet.py"
        return newStepHelper


    def createTestJob(self):
        """
        _createTestJob_

        Create a test job that has parents for each input file.

        """
        newJob = Job(name = "TestJob")
        newJob.addFile(File(lfn = "/some/file/one",
                            parents = set([File(lfn = "/some/parent/one")])))
        newJob.addFile(File(lfn = "/some/file/two",
                            parents = set([File(lfn = "/some/parent/two")])))
        return newJob

    def loadProcessFromPSet(self):
        """
        _loadProcessFromPSet_

        This requires changing the working directory,
        do so in a safe manner to encapsulate the change to this method only
        """

        currentPath = os.getcwd()
        loadedProcess = None
        try:
            if not os.path.isdir(self.testDir):
                raise
            os.chdir(self.testDir)
            testFile = "PSet.py"
            pset = imp.load_source('process', testFile)
            loadedProcess = pset.process
        except Exception as ex:
            self.fail("An exception was caught while trying to load the PSet, %s" % str(ex))
        finally:
            os.chdir(currentPath)

        return loadedProcess

    def testPSetFixup(self):
        """
        _testPSetFixup_

        Verify that all necessary parameters are set in the PSet.

        """
        from WMCore.WMRuntime.Scripts.SetupCMSSWPset import SetupCMSSWPset

        if os.environ.get('CMSSW_VERSION', None):
            del os.environ['CMSSW_VERSION']

        setupScript = SetupCMSSWPset()
        setupScript.step = self.createTestStep()
        setupScript.stepSpace = ConfigSection(name = "stepSpace")
        setupScript.stepSpace.location = self.testDir
        setupScript.job = self.createTestJob()
        setupScript()

        fixedPSet = self.loadProcessFromPSet()

        self.assertEqual(len(fixedPSet.source.fileNames.value), 2,
                         "Error: Wrong number of files.")
        self.assertEqual(len(fixedPSet.source.secondaryFileNames.value), 2,
                         "Error: Wrong number of secondary files.")
        self.assertEqual(fixedPSet.source.fileNames.value[0], "/some/file/one",
                         "Error: Wrong input file.")
        self.assertEqual(fixedPSet.source.fileNames.value[1], "/some/file/two",
                         "Error: Wrong input file.")
        self.assertEqual(fixedPSet.source.secondaryFileNames.value[0], "/some/parent/one",
                         "Error: Wrong input file.")
        self.assertEqual(fixedPSet.source.secondaryFileNames.value[1], "/some/parent/two",
                         "Error: Wrong input file.")
        self.assertEqual(fixedPSet.maxEvents.input.value, -1,
                         "Error: Wrong maxEvents.")

    def testEventsPerLumi(self):
        """
        _testEventsPerLumi_
        Verify that you can put in events per lumi in the process.

        """
        from WMCore.WMRuntime.Scripts.SetupCMSSWPset import SetupCMSSWPset

        os.environ['CMSSW_VERSION'] = "CMSSW_7_4_0"

        setupScript = SetupCMSSWPset()
        setupScript.step = self.createTestStep()
        setupScript.step.setEventsPerLumi(500)
        setupScript.stepSpace = ConfigSection(name = "stepSpace")
        setupScript.stepSpace.location = self.testDir
        setupScript.job = self.createTestJob()
        setupScript()

        fixedPSet = self.loadProcessFromPSet()

        self.assertEqual(len(fixedPSet.source.fileNames.value), 2,
                         "Error: Wrong number of files.")
        self.assertEqual(len(fixedPSet.source.secondaryFileNames.value), 2,
                         "Error: Wrong number of secondary files.")
        self.assertEqual(fixedPSet.source.fileNames.value[0], "/some/file/one",
                         "Error: Wrong input file.")
        self.assertEqual(fixedPSet.source.fileNames.value[1], "/some/file/two",
                         "Error: Wrong input file.")
        self.assertEqual(fixedPSet.source.secondaryFileNames.value[0], "/some/parent/one",
                         "Error: Wrong input file.")
        self.assertEqual(fixedPSet.source.secondaryFileNames.value[1], "/some/parent/two",
                         "Error: Wrong input file.")
        self.assertEqual(fixedPSet.source.numberEventsInLuminosityBlock.value,
                         500, "Error: Wrong number of events per luminosity block")
        self.assertEqual(fixedPSet.maxEvents.input.value, -1,
                         "Error: Wrong maxEvents.")

    def testChainedProcesing(self):
        """
        test for chained CMSSW processing - check the overriden TFC, its values
        and that input files is / are set correctly.

        """
        from WMCore.WMRuntime.Scripts.SetupCMSSWPset import SetupCMSSWPset

        os.environ['CMSSW_VERSION'] = "CMSSW_7_6_0"

        setupScript = SetupCMSSWPset()
        setupScript.step = self.createTestStep()
        setupScript.stepSpace = ConfigSection(name = "stepSpace")
        setupScript.stepSpace.location = self.testDir
        setupScript.job = self.createTestJob()
        setupScript.step.setupChainedProcessing("my_first_step", "my_input_module")
        setupScript()

        # test if the overriden TFC is right
        self.assertTrue(hasattr(setupScript.step.data.application, "overrideCatalog"),
                        "Error: overriden TFC was not set")
        tfc = loadTFC(setupScript.step.data.application.overrideCatalog)
        inputFile = "../my_first_step/my_input_module.root"
        self.assertEqual(tfc.matchPFN("direct", inputFile), inputFile)
        self.assertEqual(tfc.matchLFN("direct", inputFile), inputFile)

        self.assertEqual(setupScript.process.source.fileNames.value, [inputFile])


    def testPileupSetup(self):
        """
        Test the pileup setting.

        reference (setupScript.process instance):
        in test/python/WMCore_t/WMRuntime_t/Scripts_t/WMTaskSpace/cmsRun1/PSet.py

        """
        try:
            from dbs.apis.dbsClient import DbsApi
        except ImportError as ex:
            raise nose.SkipTest

        # this is modified and shortened version of
        # WMCore/test/python/WMCore_t/Misc_t/site-local-config.xml
        # since the dataset name in question (below) is only present at
        # storm-fe-cms.cr.cnaf.infn.it, need to make the test think it's its local SE
        siteLocalConfigContent = \
        """
<site-local-config>
    <site name="-SOME-SITE-NAME-">
        <event-data>
            <catalog url="trivialcatalog_file:/uscmst1/prod/sw/cms/SITECONF/T1_US_FNAL/PhEDEx/storage.xml?protocol=dcap"/>
        </event-data>
        <local-stage-out>
            <!-- original cmssrm.fnal.gov -->
            <phedex-node value="T2_CH_CERN"/>
            <command value="test-copy"/>
            <catalog url="trivialcatalog_file:/uscmst1/prod/sw/cms/SITECONF/T1_US_FNAL/PhEDEx/storage.xml?protocol=dcap"/>
        </local-stage-out>
        <calib-data>
            <frontier-connect>
                <load balance="proxies"/>
                <proxy url="http://cmsfrontier1.fnal.gov:3128"/>
                <proxy url="http://cmsfrontier2.fnal.gov:3128"/>
            </frontier-connect>
        </calib-data>
    </site>
</site-local-config>
"""
        siteLocalConfig = os.path.join(self.testDir, "test-site-local-config.xml")
        f = open(siteLocalConfig, 'w')
        f.write(siteLocalConfigContent)
        f.close()

        from WMCore.WMRuntime.Scripts.SetupCMSSWPset import SetupCMSSWPset
        setupScript = SetupCMSSWPset()
        setupScript.step = self.createTestStep()
        setupScript.stepSpace = ConfigSection(name = "stepSpace")
        setupScript.stepSpace.location = os.path.join(self.testDir, "cmsRun1")
        setupScript.job = self.createTestJob()
        # define pileup configuration
        # despite of the implementation considering whichever type of pileup,
        # only "data" and "mc" types are eventually considered and lead to any
        # modifications of job input files
        pileupConfig = {"data": ["/Mu/PenguinsPenguinsEverywhere-SingleMu-HorriblyJaundicedYellowEyedPenginsSearchingForCarrots-v31/RECO"],
                        "mc": ["/Mu/PenguinsPenguinsEverywhere-SingleMu-HorriblyJaundicedYellowEyedPenginsSearchingForCarrots-v31/RECO"]}
        dbsUrl = "https://cmsweb.cern.ch/dbs/prod/global/DBSReader"
        setupScript.step.setupPileup(pileupConfig, dbsUrl)
        # SetupCMSSWPset pileup handling will be consulting SiteLocalConfig
        # to determine StorageElement (SE) name the job is running on
        # SiteLocalConfig loads the site-local-config.xml file from env.
        # variable defined location ; if the variable is not defined already, set it
        # obviously, if "WMAGENT_SITE_CONFIG_OVERRIDE" is already set here, the above
        # thick with SE name is not effective
        if not os.getenv("WMAGENT_SITE_CONFIG_OVERRIDE", None):
            os.environ["WMAGENT_SITE_CONFIG_OVERRIDE"] = siteLocalConfig
        # find out local site name from the testing local site config,
        # will be needed later
        siteConfig = loadSiteLocalConfig()
        seLocalName = siteConfig.localStageOut["phedex-node"]
        print("Running on site '%s', local SE name: '%s'" % (siteConfig.siteName, seLocalName))

        # before calling the script, SetupCMSSWPset will try to load JSON
        # pileup configuration file, need to create it in self.testDir
        fetcher = PileupFetcher()
        fetcher.setWorkingDirectory(self.testDir)
        fetcher._createPileupConfigFile(setupScript.step, fakeSites=['T1_US_FNAL'])

        setupScript()

        # now test all modifications carried out in SetupCMSSWPset.__call__
        # which will also test that CMSSWStepHelper.setupPileup run correctly
        mixModules, dataMixModules = setupScript._getPileupMixingModules()

        # load in the pileup configuration in the form of dict which
        # PileupFetcher previously saved in a JSON file
        pileupDict = setupScript._getPileupConfigFromJson()

        # get the sub dict for particular pileup type
        # for pileupDict structure description - see PileupFetcher._queryDbsAndGetPileupConfig
        for pileupType, modules in zip(("data", "mc"), (dataMixModules, mixModules)):
            # getting KeyError here - above pileupConfig is not correct - need
            # to have these two types of pile type
            d = pileupDict[pileupType]
            self._mixingModulesInputFilesTest(modules, d, seLocalName)


    def _mixingModulesInputFilesTest(self, modules, pileupSubDict, seLocalName):
        """
        pileupSubDic - contains only dictionary for particular pile up type

        """
        # consider only locally available files
        filesInConfigDict = []
        for v in pileupSubDict.values():
            if seLocalName in v["phedexNodeNames"]:
                filesInConfigDict.extend(v["FileList"])

        for m in modules:
            inputTypeAttrib = getattr(m, "input", None) or getattr(m, "secsource", None)
            fileNames = inputTypeAttrib.fileNames.value
            if fileNames == None:
                fileNames = []
            m = ("Pileup configuration file list '%s' and mixing modules input "
                 "filelist '%s' are not identical." % (filesInConfigDict, fileNames))
            self.assertEqual(sorted(filesInConfigDict), sorted(fileNames), m)
示例#38
0
class ExpressMergeTest(unittest.TestCase):
    """
    _ExpressMergeTest_

    Test for ExpressMerge job splitter
    """
    def setUp(self):
        """
        _setUp_

        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules=["T0.WMBS"])

        self.splitterFactory = SplitterFactory(package="T0.JobSplitting")

        myThread = threading.currentThread()
        daoFactory = DAOFactory(package="T0.WMBS",
                                logger=logging,
                                dbinterface=myThread.dbi)

        myThread.dbi.processData("""INSERT INTO wmbs_location
                                    (id, site_name, state)
                                    VALUES (1, 'SomeSite', 1)
                                    """,
                                 transaction=False)
        myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
                                    (location, pnn)
                                    VALUES (1, 'SomePNN')
                                    """,
                                 transaction=False)

        myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
                                    (location, pnn)
                                    VALUES (1, 'SomePNN2')
                                    """,
                                 transaction=False)

        insertRunDAO = daoFactory(classname="RunConfig.InsertRun")
        insertRunDAO.execute(binds={
            'RUN': 1,
            'TIME': int(time.time()),
            'HLTKEY': "someHLTKey"
        },
                             transaction=False)

        insertLumiDAO = daoFactory(classname="RunConfig.InsertLumiSection")
        for lumi in range(1, 5):
            insertLumiDAO.execute(binds={
                'RUN': 1,
                'LUMI': lumi
            },
                                  transaction=False)

        insertStreamDAO = daoFactory(classname="RunConfig.InsertStream")
        insertStreamDAO.execute(binds={'STREAM': "Express"}, transaction=False)

        insertStreamFilesetDAO = daoFactory(
            classname="RunConfig.InsertStreamFileset")
        insertStreamFilesetDAO.execute(1, "Express", "TestFileset1")

        fileset1 = Fileset(name="TestFileset1")
        self.fileset2 = Fileset(name="TestFileset2")
        fileset1.load()
        self.fileset2.create()

        workflow1 = Workflow(spec="spec.xml",
                             owner="hufnagel",
                             name="TestWorkflow1",
                             task="Test")
        workflow2 = Workflow(spec="spec.xml",
                             owner="hufnagel",
                             name="TestWorkflow2",
                             task="Test")
        workflow1.create()
        workflow2.create()

        self.subscription1 = Subscription(fileset=fileset1,
                                          workflow=workflow1,
                                          split_algo="Express",
                                          type="Express")
        self.subscription2 = Subscription(fileset=self.fileset2,
                                          workflow=workflow2,
                                          split_algo="ExpressMerge",
                                          type="ExpressMerge")
        self.subscription1.create()
        self.subscription2.create()

        myThread.dbi.processData("""INSERT INTO wmbs_workflow_output
                                    (WORKFLOW_ID, OUTPUT_IDENTIFIER, OUTPUT_FILESET)
                                    VALUES (%d, 'SOMEOUTPUT', %d)
                                    """ % (workflow1.id, self.fileset2.id),
                                 transaction=False)

        # keep for later
        self.insertSplitLumisDAO = daoFactory(
            classname="JobSplitting.InsertSplitLumis")

        # default split parameters
        self.splitArgs = {}
        self.splitArgs['maxInputSize'] = 2 * 1024 * 1024 * 1024
        self.splitArgs['maxInputFiles'] = 500,
        self.splitArgs['maxLatency'] = 15 * 23

        return

    def tearDown(self):
        """
        _tearDown_

        """
        self.testInit.clearDatabase()

        return

    def deleteSplitLumis(self):
        """
        _deleteSplitLumis_

        """
        myThread = threading.currentThread()

        myThread.dbi.processData("""DELETE FROM lumi_section_split_active
                                    """,
                                 transaction=False)

        return

    def test00(self):
        """
        _test00_

        Test that the job name prefix feature works
        Test latency trigger (wait and 0)

        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxLatency'] = 0
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertTrue(job['name'].startswith("ExpressMerge-"),
                        "ERROR: Job has wrong name")

        return

    def test01(self):
        """
        _test01_

        Test size and event triggers for single lumis (they are ignored)
        Test latency trigger (timed out)

        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        mySplitArgs['maxInputSize'] = 1
        mySplitArgs['maxInputFiles'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        time.sleep(1)

        mySplitArgs['maxLatency'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        return

    def test02(self):
        """
        _test02_

        Test input files threshold on multi lumis

        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        mySplitArgs['maxInputFiles'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        time.sleep(1)

        mySplitArgs['maxLatency'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        return

    def test03(self):
        """
        _test03_

        Test input size threshold on multi lumis

        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        mySplitArgs['maxInputSize'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        time.sleep(1)

        mySplitArgs['maxLatency'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        return

    def test04(self):
        """
        _test04_

        Test multi lumis express merges

        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        time.sleep(1)

        mySplitArgs['maxLatency'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        return

    def test05(self):
        """
        _test05_

        Test multi lumis express merges with holes

        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2, 4]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        time.sleep(1)

        mySplitArgs['maxLatency'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        return

    def test06(self):
        """
        _test06_

        Test active split lumis

        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        self.insertSplitLumisDAO.execute(binds={
            'SUB': self.subscription1['id'],
            'LUMI': 1
        })

        mySplitArgs['maxLatency'] = 0
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        self.deleteSplitLumis()

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        return
示例#39
0
class LocationsTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_
        
        Setup the database and logging connection.  Try to create all of the
        WMBS tables.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS"],
                                useDefault = False)
        return
                                                                
    def tearDown(self):
        """
        _tearDown_
        
        Drop all the WMBS tables.
        """
        self.testInit.clearDatabase()
        return

    def testCreateDeleteList(self):
        """
        _testCreateDeleteList_

        Test the creation, listing and deletion of locations in WMBS.
        """
        goldenLocations = ["goodse.cern.ch", "goodse.fnal.gov"]
        
        myThread = threading.currentThread()        
        daoFactory = DAOFactory(package="WMCore.WMBS", logger = myThread.logger,
                                dbinterface = myThread.dbi)

        locationNew = daoFactory(classname = "Locations.New")
        for location in goldenLocations:
            # The following is intentional, I want to test that inserting the
            # same location multiple times does not cause problems.
            locationNew.execute(siteName = location, jobSlots = 300)
            locationNew.execute(siteName = location, jobSlots = 300)
        
        locationNew.execute(siteName = "empty_site")
        goldenLocations.append("empty_site")

        locationList = daoFactory(classname = "Locations.List")
        currentLocations = locationList.execute()
        for location in currentLocations:
            assert location[1] in goldenLocations, \
                   "ERROR: Unknown location was returned"

            if location[1] == "empty_site":
                assert location[2] == 0, \
                    "ERROR: Site has wrong number of job slots."
            else:
                assert location[2] == 300, \
                    "ERROR: Site has wrong number of job slots."

            goldenLocations.remove(location[1])

        assert len(goldenLocations) == 0, \
               "ERROR: Some locations are missing..."
        
        locationDelete = daoFactory(classname = "Locations.Delete")
        locationDelete.execute(siteName = "goodse.fnal.gov")
        locationDelete.execute(siteName = "goodse.cern.ch")

        currentLocations = locationList.execute()
        assert len(currentLocations) == 1, \
            "ERROR: Not all locations were deleted"
        assert currentLocations[0][1] == "empty_site", \
            "ERROR: The wrong sites were deleted."

        return

    def testListSites(self):
        """
        _testListSites

        Test the ability to list all sites in the database.
        """
        myThread = threading.currentThread()        
        daoFactory = DAOFactory(package="WMCore.WMBS", logger = myThread.logger,
                                dbinterface = myThread.dbi)

        locationNew = daoFactory(classname = "Locations.New")

        locationNew.execute("Satsuma")
        locationNew.execute("Choshu")
        locationNew.execute("Tosa")

        listSites = daoFactory(classname = "Locations.ListSites")
        sites     = listSites.execute()

        self.assertEqual("Satsuma" in sites, True)
        self.assertEqual("Choshu" in sites, True)
        self.assertEqual("Tosa" in sites, True)

    def testListSitesTransaction(self):
        """
        _testListSitesTransaction_

        Verify that select behave appropriately when dealing with transactions.
        """
        myThread = threading.currentThread()
        daoFactory = DAOFactory(package="WMCore.WMBS", logger = myThread.logger,
                                dbinterface = myThread.dbi)
        
        myThread.transaction.begin()

        localTransaction = Transaction(myThread.dbi)
        localTransaction.begin()
        
        locationNew = daoFactory(classname = "Locations.New")

        locationNew.execute("Satsuma", conn = myThread.transaction.conn, transaction = True)
        locationNew.execute("Choshu", conn = myThread.transaction.conn, transaction = True)

        listSites = daoFactory(classname = "Locations.ListSites")
        nonTransSites = listSites.execute(conn = localTransaction.conn, transaction = True)
        transSites = listSites.execute(conn = myThread.transaction.conn, transaction = True)

        assert len(nonTransSites) == 0, \
               "Error: Wrong number of sites in non transaction list."
        assert len(transSites) == 2, \
               "Error: Wrong number of sites in transaction list."
        assert "Satsuma" in transSites, \
               "Error: Site missing in transaction list."
        assert "Choshu" in transSites, \
               "Error: Site missing in transaction list."

        localTransaction.commit()
        myThread.transaction.commit()
        return

    def testJobSlots(self):
        """
        _testJobSlots_
        
        Test our ability to set jobSlots
        """

        myThread = threading.currentThread()        
        daoFactory = DAOFactory(package="WMCore.WMBS", logger = myThread.logger,
                                dbinterface = myThread.dbi)

        locationNew = daoFactory(classname = "Locations.New")

        locationNew.execute("Satsuma")
        locationNew.execute("Choshu")
        locationNew.execute("Tosa")

        setSlots = daoFactory(classname = "Locations.SetJobSlots")
        setSlots.execute(siteName = 'Satsuma', jobSlots = 1868)
        setSlots.execute(siteName = 'Choshu',  jobSlots = 1868)
        setSlots.execute(siteName = 'Tosa',    jobSlots = 1868)

        locationList = daoFactory(classname = "Locations.List")
        currentLocations = locationList.execute()

        for location in currentLocations:
            self.assertEqual(location[2], 1868)

        return



    def testGetSiteInfo(self):
        """
        _testGetSiteInfo_
        
        Test our ability to retrieve ce, se names, etc.
        """

        myThread = threading.currentThread()        
        daoFactory = DAOFactory(package="WMCore.WMBS", logger = myThread.logger,
                                dbinterface = myThread.dbi)


        locationNew = daoFactory(classname = "Locations.New")

        locationNew.execute(siteName = "Satsuma", ceName = "Satsuma", seName = "Satsuma", jobSlots = 10)
        locationNew.execute(siteName = "Choshu", ceName = "Choshu", seName = "Choshu", jobSlots = 10)
        locationNew.execute(siteName = "Tosa", ceName = "Tosa", seName = "Choshu", jobSlots = 10)


        locationInfo = daoFactory(classname = "Locations.GetSiteInfo")

        result = locationInfo.execute(siteName = "Choshu")

        self.assertEqual(result[0]['ce_name'], 'Choshu')
        self.assertEqual(result[0]['se_name'], 'Choshu')
        self.assertEqual(result[0]['site_name'], 'Choshu')
        self.assertEqual(result[0]['job_slots'], 10)

        return
示例#40
0
class WorkflowManagerTest(unittest.TestCase):
    """
    TestCase for TestWorkflowManager module
    """

    _maxMessage = 10

    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all needed
        WMBS tables.
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = \
                     ['WMCore.Agent.Database',
                      'WMComponent.WorkflowManager.Database',
                      'WMCore.ThreadPool',
                      'WMCore.MsgService',
                      'WMCore.WMBS'],
                    useDefault = False)

        return

    def tearDown(self):
        """
        _tearDown_

        Database deletion
        """
        self.testInit.clearDatabase()

        return

    def getConfig(self):
        """
        _getConfig_

        Get defaults WorkflowManager parameters
        """

        return self.testInit.getConfiguration(
                    os.path.join(WMCore.WMInit.getWMBASE(), \
           'src/python/WMComponent/WorkflowManager/DefaultConfig.py'))


    def testA(self):
        """
        _testA_

        Handle AddWorkflowToManage events
        """
        myThread = threading.currentThread()
        config = self.getConfig()

        testWorkflowManager = WorkflowManager(config)
        testWorkflowManager.prepareToStart()

        for i in xrange(0, WorkflowManagerTest._maxMessage):

            workflow = Workflow(spec = "testSpec.xml", owner = "riahi", \
               name = "testWorkflow" + str(i), task = "testTask")
            workflow.create()

            for j in xrange(0, 3):
                workflowManagerdict = {'payload':{'WorkflowId' : workflow.id \
          , 'FilesetMatch': 'FILESET_' + str(j) ,'SplitAlgo':'NO SPLITALGO', 'Type':'NO TYPE' }}
                testWorkflowManager.handleMessage( \
      type = 'AddWorkflowToManage' , payload = workflowManagerdict )

        time.sleep(30)

        myThread.workerThreadManager.terminateWorkers()

        while threading.activeCount() > 1:
            print('Currently: '+str(threading.activeCount())+\
                ' Threads. Wait until all our threads have finished')
            time.sleep(1)
示例#41
0
class CouchTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel = logging.DEBUG)
        self.testDir = self.testInit.generateWorkDir()
        self.config = getConfig(self.testDir)
        # mock generator instance to communicate some configuration values
        self.generator = utils.AlertGeneratorMock(self.config)
        self.testName = self.id().split('.')[-1]


    def tearDown(self):
        self.testInit.delWorkDir()
        self.generator = None


    def testAlertGeneratorCouchDbSizePollerBasic(self):
        config = getConfig("/tmp")
        try:
            poller = CouchDbSizePoller(config.AlertGenerator.couchDbSizePoller, self.generator)
        except Exception as ex:
            self.fail("%s: exception: %s" % (self.testName, ex))
        poller.check() # -> on real system dir may result in permission denied
        poller._dbDirectory = "/dev"
        poller.check() # -> OK

        # test failing during set up
        poller = CouchDbSizePoller(config.AlertGenerator.couchDbSizePoller, self.generator)
        poller._query = "nonsense query"
        # this will fail on the above query
        self.assertRaises(Exception, poller._getDbDir)
        poller.check()


    def testAlertGeneratorCouchDbSizePollerSoftThreshold(self):
        self.config.AlertGenerator.couchDbSizePoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchDbSizePoller.soft = 5
        self.config.AlertGenerator.couchDbSizePoller.critical = 10
        self.config.AlertGenerator.couchDbSizePoller.pollInterval = 0.2
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchDbSizePoller
        ti.config = self.config.AlertGenerator.couchDbSizePoller
        ti.thresholdToTest = self.config.AlertGenerator.couchDbSizePoller.soft
        ti.level = self.config.AlertProcessor.soft.level
        ti.expected = 1
        ti.thresholdDiff = 4
        ti.testCase = self
        utils.doGenericValueBasedPolling(ti)


    def testAlertGeneratorCouchDbSizePollerCriticalThreshold(self):
        self.config.AlertGenerator.couchDbSizePoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchDbSizePoller.soft = 5
        self.config.AlertGenerator.couchDbSizePoller.critical = 10
        self.config.AlertGenerator.couchDbSizePoller.pollInterval = 0.2
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchDbSizePoller
        ti.config = self.config.AlertGenerator.couchDbSizePoller
        ti.thresholdToTest = self.config.AlertGenerator.couchDbSizePoller.critical
        ti.level = self.config.AlertProcessor.critical.level
        ti.expected = 1
        ti.thresholdDiff = 1
        ti.testCase = self
        utils.doGenericValueBasedPolling(ti)


    def testAlertGeneratorCouchDbSizePollerNoAlert(self):
        self.config.AlertGenerator.couchDbSizePoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchDbSizePoller.soft = 5
        self.config.AlertGenerator.couchDbSizePoller.critical = 10
        self.config.AlertGenerator.couchDbSizePoller.pollInterval = 0.2
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchDbSizePoller
        ti.config = self.config.AlertGenerator.couchDbSizePoller
        ti.thresholdToTest = self.config.AlertGenerator.couchDbSizePoller.soft - 3
        ti.expected = 0
        ti.thresholdDiff = 2
        ti.testCase = self
        utils.doGenericValueBasedPolling(ti)


    def testAlertGeneratorCouchPollerBasic(self):
        self.config.AlertGenerator.section_("bogusCouchPoller")
        self.config.AlertGenerator.bogusCouchPoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.bogusCouchPoller.soft = 1000
        self.config.AlertGenerator.bogusCouchPoller.critical = 2000
        self.config.AlertGenerator.bogusCouchPoller.pollInterval = 0.2
        self.config.AlertGenerator.bogusCouchPoller.period = 1
        try:
            poller = CouchPoller(self.config.AlertGenerator.bogusCouchPoller,
                                 self.generator)
        except Exception as ex:
            self.fail("%s: exception: %s" % (self.testName, ex))
        # this class would not have defined polling sample function, give it one
        poller.sample = lambda proc: float(12)
        poller.check()
        # assuming CouchDb server is running, check that 1 sensible measurement value was collected
        self.assertEqual(len(poller._measurements), 1)
        self.assertTrue(isinstance(poller._measurements[0], types.FloatType))
        # test handling of a non-existing process
        CouchPoller._getProcessPID = lambda inst: 1212121212
        self.assertRaises(Exception, CouchPoller,
                          self.config.AlertGenerator.bogusCouchPoller, self.generator)


    def testAlertGeneratorCouchCPUPollerSoftThreshold(self):
        self.config.AlertGenerator.couchCPUPoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchCPUPoller.soft = 70
        self.config.AlertGenerator.couchCPUPoller.critical = 80
        self.config.AlertGenerator.couchCPUPoller.pollInterval = 0.2
        self.config.AlertGenerator.couchCPUPoller.period = 1
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchCPUPoller
        ti.config = self.config.AlertGenerator.couchCPUPoller
        ti.thresholdToTest = self.config.AlertGenerator.couchCPUPoller.soft
        ti.level = self.config.AlertProcessor.soft.level
        ti.expected = 1
        ti.thresholdDiff = 10
        ti.testCase = self
        utils.doGenericPeriodAndProcessPolling(ti)


    def testAlertGeneratorCouchCPUPollerCriticalThreshold(self):
        self.config.AlertGenerator.couchCPUPoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchCPUPoller.soft = 70
        self.config.AlertGenerator.couchCPUPoller.critical = 80
        self.config.AlertGenerator.couchCPUPoller.pollInterval = 0.2
        self.config.AlertGenerator.couchCPUPoller.period = 1
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchCPUPoller
        ti.config = self.config.AlertGenerator.couchCPUPoller
        ti.thresholdToTest = self.config.AlertGenerator.couchCPUPoller.critical
        ti.level = self.config.AlertProcessor.critical.level
        ti.expected = 1
        ti.thresholdDiff = 10
        ti.testCase = self
        utils.doGenericPeriodAndProcessPolling(ti)


    def testAlertGeneratorCouchCPUPollerNoAlert(self):
        self.config.AlertGenerator.couchCPUPoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchCPUPoller.soft = 70
        self.config.AlertGenerator.couchCPUPoller.critical = 80
        self.config.AlertGenerator.couchCPUPoller.pollInterval = 0.2
        self.config.AlertGenerator.couchCPUPoller.period = 1
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchCPUPoller
        ti.config = self.config.AlertGenerator.couchCPUPoller
        # lower the threshold so that the alert is never generated
        ti.thresholdToTest = self.config.AlertGenerator.couchCPUPoller.soft - 20
        ti.level = 0
        ti.expected = 0
        ti.thresholdDiff = 10
        ti.testCase = self
        utils.doGenericPeriodAndProcessPolling(ti)


    def testAlertGeneratorCouchMemoryPollerSoftThreshold(self):
        self.config.AlertGenerator.couchMemPoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchMemPoller.soft = 70
        self.config.AlertGenerator.couchMemPoller.critical = 80
        self.config.AlertGenerator.couchMemPoller.pollInterval = 0.2
        self.config.AlertGenerator.couchMemPoller.period = 1
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchMemoryPoller
        ti.config = self.config.AlertGenerator.couchMemPoller
        ti.thresholdToTest = self.config.AlertGenerator.couchMemPoller.soft
        ti.level = self.config.AlertProcessor.soft.level
        ti.expected = 1
        ti.thresholdDiff = 10
        ti.testCase = self
        utils.doGenericPeriodAndProcessPolling(ti)


    def testAlertGeneratorCouchMemoryPollerCriticalThreshold(self):
        self.config.AlertGenerator.couchMemPoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchMemPoller.soft = 70
        self.config.AlertGenerator.couchMemPoller.critical = 80
        self.config.AlertGenerator.couchMemPoller.pollInterval = 0.2
        self.config.AlertGenerator.couchMemPoller.period = 1
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchMemoryPoller
        ti.config = self.config.AlertGenerator.couchMemPoller
        ti.thresholdToTest = self.config.AlertGenerator.couchMemPoller.critical
        ti.level = self.config.AlertProcessor.critical.level
        ti.expected = 1
        ti.thresholdDiff = 10
        ti.testCase = self
        utils.doGenericPeriodAndProcessPolling(ti)


    def testAlertGeneratorCouchMemoryPollerNoAlert(self):
        self.config.AlertGenerator.couchMemPoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchMemPoller.soft = 70
        self.config.AlertGenerator.couchMemPoller.critical = 80
        self.config.AlertGenerator.couchMemPoller.pollInterval = 0.2
        self.config.AlertGenerator.couchMemPoller.period = 1
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchMemoryPoller
        ti.config = self.config.AlertGenerator.couchMemPoller
        # lower the threshold so that the alert is never generated
        ti.thresholdToTest = self.config.AlertGenerator.couchMemPoller.soft - 20
        ti.level = 0
        ti.expected = 0
        ti.thresholdDiff = 10
        ti.testCase = self
        utils.doGenericPeriodAndProcessPolling(ti)


    def testAlertGeneratorCouchErrorsPollerBasic(self):
        try:
            poller = CouchErrorsPoller(self.config.AlertGenerator.couchErrorsPoller, self.generator)
        except Exception as ex:
            self.fail("Exception, reason: %s" % ex)

        # even a single values to observe shall be turned into particular iterables
        obs = self.config.AlertGenerator.couchErrorsPoller.observables
        self.config.AlertGenerator.couchErrorsPoller.observables = 400
        try:
            poller = CouchErrorsPoller(self.config.AlertGenerator.couchErrorsPoller, self.generator)
        except Exception as ex:
            self.fail("Exception, reason: %s" % ex)
        #self.assertTrue(isinstance(obs, (types.ListType, types.TupleType)))
        self.assertTrue(isinstance(self.config.AlertGenerator.couchErrorsPoller.observables,
                                   (types.ListType, types.TupleType)))

        # test return value on non-sense HTTP status code
        res = poller.sample("40000")
        self.assertFalse(res)
        # test definitely existing value
        res = poller.sample("200")
        # on a freshly started couch, this status code may not exist in the
        # stats table, so despite correct and meaningful HTTP status code, the
        # query may still return None, hence don't assume any particular response
        #self.assertTrue(isinstance(res, types.IntType))
        poller.check()


    def testAlertGeneratorCouchErrorsPollerSoftThreshold(self):
        self.config.AlertGenerator.couchErrorsPoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchErrorsPoller.soft = 100
        self.config.AlertGenerator.couchErrorsPoller.critical = 200
        # shall expect corresponding number of generated alerts for each observable value
        self.config.AlertGenerator.couchErrorsPoller.observables = (5, 6, 7)
        self.config.AlertGenerator.couchErrorsPoller.pollInterval = 0.2
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchErrorsPoller
        ti.config = self.config.AlertGenerator.couchErrorsPoller
        ti.thresholdToTest = self.config.AlertGenerator.couchErrorsPoller.soft
        ti.level = self.config.AlertProcessor.soft.level
        ti.expected = len(self.config.AlertGenerator.couchErrorsPoller.observables)
        ti.thresholdDiff = 10
        ti.testCase = self
        utils.doGenericValueBasedPolling(ti)


    def testAlertGeneratorCouchErrorsPollerCriticalThreshold(self):
        self.config.AlertGenerator.couchErrorsPoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchErrorsPoller.soft = 100
        self.config.AlertGenerator.couchErrorsPoller.critical = 200
        # shall expect corresponding number of generated alerts for each observable value
        self.config.AlertGenerator.couchErrorsPoller.observables = (5, 6, 7)
        self.config.AlertGenerator.couchErrorsPoller.pollInterval = 0.2
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchErrorsPoller
        ti.config = self.config.AlertGenerator.couchErrorsPoller
        ti.thresholdToTest = self.config.AlertGenerator.couchErrorsPoller.critical
        ti.level = self.config.AlertProcessor.critical.level
        ti.expected = len(self.config.AlertGenerator.couchErrorsPoller.observables)
        ti.thresholdDiff = 50
        ti.testCase = self
        utils.doGenericValueBasedPolling(ti)


    def testAlertGeneratorCouchErrorsPollerNoAlert(self):
        self.config.AlertGenerator.couchErrorsPoller.couchURL = os.getenv("COUCHURL", None)
        self.config.AlertGenerator.couchErrorsPoller.soft = 100
        self.config.AlertGenerator.couchErrorsPoller.critical = 200
        # shall expect corresponding number of generated alerts for each observable value
        self.config.AlertGenerator.couchErrorsPoller.observables = (5, 6, 7)
        self.config.AlertGenerator.couchErrorsPoller.pollInterval = 0.2
        ti = utils.TestInput() # see attributes comments at the class
        ti.pollerClass = CouchErrorsPoller
        ti.config = self.config.AlertGenerator.couchErrorsPoller
        ti.thresholdToTest = self.config.AlertGenerator.couchErrorsPoller.soft - 30
        ti.expected = 0
        ti.thresholdDiff = 20
        ti.testCase = self
        utils.doGenericValueBasedPolling(ti)
示例#42
0
class DBSBufferFileTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        DBSBuffer tables.  Also add some dummy locations.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        #self.testInit.clearDatabase(modules = ["WMComponent.DBSBuffer.Database"])
        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"],
                                useDefault = False)

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package = "WMComponent.DBS3Buffer",
                                     logger = myThread.logger,
                                     dbinterface = myThread.dbi)

        self.daoFactory2 = DAOFactory(package = "WMComponent.DBSUpload.Database",
                                      logger = myThread.logger,
                                      dbinterface = myThread.dbi)

        locationAction = self.daoFactory(classname = "DBSBufferFiles.AddLocation")
        locationAction.execute(siteName = "se1.cern.ch")
        locationAction.execute(siteName = "se1.fnal.gov")        

        
    def tearDown(self):        
        """
        _tearDown_
        
        Drop all the DBSBuffer tables.
        """
        self.testInit.clearDatabase()

    def testCreateDeleteExists(self):
        """
        _testCreateDeleteExists_

        Test the create(), delete() and exists() methods of the file class
        by creating and deleting a file.  The exists() method will be
        called before and after creation and after deletion.
        """
        testFile = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10)
        testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                     appFam = "RECO", psetHash = "GIBBERISH",
                                     configContent = "MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")

        assert testFile.exists() == False, \
               "ERROR: File exists before it was created"

        testFile.addRun(Run(1, *[45]))
        testFile.create()

        assert testFile.exists() > 0, \
               "ERROR: File does not exist after it was created"

        testFile.delete()

        assert testFile.exists() == False, \
               "ERROR: File exists after it has been deleted"
        return

    def testCreateTransaction(self):
        """
        _testCreateTransaction_
        
        Begin a transaction and then create a file in the database.  Afterwards,
        rollback the transaction.  Use the File class's exists() method to
        to verify that the file doesn't exist before it was created, exists
        after it was created and doesn't exist after the transaction was rolled
        back.
        """
        myThread = threading.currentThread()
        myThread.transaction.begin()
        
        testFile = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024,
                                 events = 10)
        testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                              appFam = "RECO", psetHash = "GIBBERISH",
                              configContent = "MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        
        assert testFile.exists() == False, \
               "ERROR: File exists before it was created"
        testFile.addRun(Run(1, *[45]))
        testFile.create()
        
        assert testFile.exists() > 0, \
               "ERROR: File does not exist after it was created"
        
        myThread.transaction.rollback()

        assert testFile.exists() == False, \
               "ERROR: File exists after transaction was rolled back."
        return    
     
    def testDeleteTransaction(self):
        """
        _testDeleteTransaction_
        
        Create a file and commit it to the database.  Start a new transaction
        and delete the file.  Rollback the transaction after the file has been
        deleted.  Use the file class's exists() method to verify that the file
        does not exist after it has been deleted but does exist after the
        transaction is rolled back.
        """
        testFile = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024,
                                 events = 10)
        testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                              appFam = "RECO", psetHash = "GIBBERISH",
                              configContent = "MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        
        assert testFile.exists() == False, \
               "ERROR: File exists before it was created"
        
        testFile.addRun(Run(1, *[45]))
        testFile.create()
        
        assert testFile.exists() > 0, \
               "ERROR: File does not exist after it was created"
        
        myThread = threading.currentThread()
        myThread.transaction.begin()
        
        testFile.delete()
        
        assert testFile.exists() == False, \
               "ERROR: File exists after it has been deleted"
        
        myThread.transaction.rollback()
        
        assert testFile.exists() > 0, \
               "ERROR: File does not exist after transaction was rolled back."
        return

    def testGetParentLFNs(self):
        """
        _testGetParentLFNs_
        
        Create three files and set them to be parents of a fourth file.  Check
        to make sure that getParentLFNs() on the child file returns the correct
        LFNs.
        """
        testFileParentA = DBSBufferFile(lfn = "/this/is/a/parent/lfnA", size = 1024,
                                        events = 20)
        testFileParentA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                     appFam = "RECO", psetHash = "GIBBERISH",
                                     configContent = "MOREGIBBERISH")
        testFileParentA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileParentA.addRun(Run(1, *[45]))
        
        testFileParentB = DBSBufferFile(lfn = "/this/is/a/parent/lfnB", size = 1024,
                                        events = 20)
        testFileParentB.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                     appFam = "RECO", psetHash = "GIBBERISH",
                                     configContent = "MOREGIBBERISH")
        testFileParentB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileParentB.addRun(Run(1, *[45]))
        
        testFileParentC = DBSBufferFile(lfn = "/this/is/a/parent/lfnC", size = 1024,
                                        events = 20)
        testFileParentC.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                     appFam = "RECO", psetHash = "GIBBERISH",
                                     configContent = "MOREGIBBERISH")
        testFileParentC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileParentC.addRun(Run( 1, *[45]))
        
        testFileParentA.create()
        testFileParentB.create()
        testFileParentC.create()

        testFile = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024,
                                 events = 10)
        testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                              appFam = "RECO", psetHash = "GIBBERISH",
                              configContent = "MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFile.addRun(Run( 1, *[45]))
        testFile.create()
        
        testFile.addParents([testFileParentA["lfn"], testFileParentB["lfn"],
                             testFileParentC["lfn"]])
        
        parentLFNs = testFile.getParentLFNs()

        assert len(parentLFNs) == 3, \
               "ERROR: Child does not have the right amount of parents"

        goldenLFNs = ["/this/is/a/parent/lfnA",
                      "/this/is/a/parent/lfnB",
                      "/this/is/a/parent/lfnC"]
        for parentLFN in parentLFNs:
            assert parentLFN in goldenLFNs, \
                   "ERROR: Unknown parent lfn"
            goldenLFNs.remove(parentLFN)
                   
        testFile.delete()
        testFileParentA.delete()
        testFileParentB.delete()
        testFileParentC.delete()
        return
    
    def testLoad(self):
        """
        _testLoad_
        
        Test the loading of file meta data using the ID of a file and the
        LFN of a file.
        """
        checksums = {"adler32": "adler32", "cksum": "cksum", "md5": "md5"}
        testFileA = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                                  checksums = checksums)
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.create()
                                                        
        testFileB = DBSBufferFile(lfn = testFileA["lfn"])
        testFileB.load()
        testFileC = DBSBufferFile(id = testFileA["id"])
        testFileC.load()

        assert testFileA == testFileB, \
               "ERROR: File load by LFN didn't work"

        assert testFileA == testFileC, \
               "ERROR: File load by ID didn't work"

        assert type(testFileB["id"]) == int or type(testFileB["id"]) == long, \
               "ERROR: File id is not an integer type."
        assert type(testFileB["size"]) == int or type(testFileB["size"]) == long, \
               "ERROR: File size is not an integer type."
        assert type(testFileB["events"]) == int or type(testFileB["events"]) == long, \
               "ERROR: File events is not an integer type."
        
        assert type(testFileC["id"]) == int or type(testFileC["id"]) == long, \
               "ERROR: File id is not an integer type."
        assert type(testFileC["size"]) == int or type(testFileC["size"]) == long, \
               "ERROR: File size is not an integer type."
        assert type(testFileC["events"]) == int or type(testFileC["events"]) == long, \
               "ERROR: File events is not an integer type."

        testFileA.delete()
        return

    def testAddChild(self):
        """
        _testAddChild_
        
        Add a child to some parent files and make sure that all the parentage
        information is loaded/stored correctly from the database.
        """
        testFileParentA = DBSBufferFile(lfn = "/this/is/a/parent/lfnA", size = 1024,
                                        events = 20)
        testFileParentA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                     appFam = "RECO", psetHash = "GIBBERISH",
                                     configContent = "MOREGIBBERISH")
        testFileParentA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        
        testFileParentA.addRun(Run( 1, *[45]))
        testFileParentB = DBSBufferFile(lfn = "/this/is/a/parent/lfnB", size = 1024,
                                        events = 20)
        testFileParentB.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                     appFam = "RECO", psetHash = "GIBBERISH",
                                     configContent = "MOREGIBBERISH")
        testFileParentB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileParentB.addRun(Run( 1, *[45]))
        testFileParentA.create()
        testFileParentB.create()
        
        testFileA = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10)
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()
        
        testFileParentA.addChildren("/this/is/a/lfn")
        testFileParentB.addChildren("/this/is/a/lfn")
        
        testFileB = DBSBufferFile(id = testFileA["id"])
        testFileB.load(parentage = 1)
        
        goldenFiles = [testFileParentA, testFileParentB]
        for parentFile in testFileB["parents"]:
            assert parentFile in goldenFiles, \
                   "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)
            
        assert len(goldenFiles) == 0, \
               "ERROR: Some parents are missing"
        return

    def testAddChildTransaction(self):
        """
        _testAddChildTransaction_

        Add a child to some parent files and make sure that all the parentage
        information is loaded/stored correctly from the database.  Rollback the
        addition of one of the childs and then verify that it does in fact only
        have one parent.
        """
        testFileParentA = DBSBufferFile(lfn = "/this/is/a/parent/lfnA", size = 1024,
                              events = 20)
        testFileParentA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                     appFam = "RECO", psetHash = "GIBBERISH",
                                     configContent = "MOREGIBBERISH")
        testFileParentA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileParentA.addRun(Run( 1, *[45]))
        
        testFileParentB = DBSBufferFile(lfn = "/this/is/a/parent/lfnB", size = 1024,
                              events = 20)
        testFileParentB.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                     appFam = "RECO", psetHash = "GIBBERISH",
                                     configContent = "MOREGIBBERISH")
        testFileParentB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileParentB.addRun(Run( 1, *[45]))
        testFileParentA.create()
        testFileParentB.create()

        testFileA = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10)
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()

        testFileParentA.addChildren("/this/is/a/lfn")

        myThread = threading.currentThread()
        myThread.transaction.begin()
        
        testFileParentB.addChildren("/this/is/a/lfn")

        testFileB = DBSBufferFile(id = testFileA["id"])
        testFileB.load(parentage = 1)

        goldenFiles = [testFileParentA, testFileParentB]
        for parentFile in testFileB["parents"]:
            assert parentFile in goldenFiles, \
                   "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)

        assert len(goldenFiles) == 0, \
              "ERROR: Some parents are missing"

        myThread.transaction.rollback()
        testFileB.load(parentage = 1)

        goldenFiles = [testFileParentA]
        for parentFile in testFileB["parents"]:
            assert parentFile in goldenFiles, \
                   "ERROR: Unknown parent file"
            goldenFiles.remove(parentFile)

        assert len(goldenFiles) == 0, \
              "ERROR: Some parents are missing"
        
        return
    
    def testSetLocation(self):
        """
        _testSetLocation_

        Create a file and add a couple locations.  Load the file from the
        database to make sure that the locations were set correctly.
        """
        testFileA = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10)
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()
        
        testFileA.setLocation(["se1.fnal.gov", "se1.cern.ch"])
        testFileA.setLocation(["bunkse1.fnal.gov", "bunkse1.cern.ch"],
                              immediateSave = False)

        testFileB = DBSBufferFile(id = testFileA["id"])
        testFileB.load()

        goldenLocations = ["se1.fnal.gov", "se1.cern.ch"]

        for location in testFileB["locations"]:
            assert location in goldenLocations, \
                   "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
              "ERROR: Some locations are missing"    
        return

    def testSetLocationTransaction(self):
        """
        _testSetLocationTransaction_

        Create a file at specific locations and commit everything to the
        database.  Reload the file from the database and verify that the
        locations are correct.  Rollback the database transaction and once
        again reload the file.  Verify that the original locations are back.
        """
        testFileA = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10)
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()
        
        testFileA.setLocation(["se1.fnal.gov"])

        myThread = threading.currentThread()
        myThread.transaction.begin()
        
        testFileA.setLocation(["se1.cern.ch"])
        testFileA.setLocation(["bunkse1.fnal.gov", "bunkse1.cern.ch"],
                              immediateSave = False)

        testFileB = DBSBufferFile(id = testFileA["id"])
        testFileB.load()

        goldenLocations = ["se1.fnal.gov", "se1.cern.ch"]

        for location in testFileB["locations"]:
            assert location in goldenLocations, \
                   "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
              "ERROR: Some locations are missing"

        myThread.transaction.rollback()
        testFileB.load()

        goldenLocations = ["se1.fnal.gov"]

        for location in testFileB["locations"]:
            assert location in goldenLocations, \
                   "ERROR: Unknown file location"
            goldenLocations.remove(location)

        assert len(goldenLocations) == 0, \
              "ERROR: Some locations are missing"
        return    

    def testLocationsConstructor(self):
        """
        _testLocationsConstructor_

        Test to make sure that locations passed into the File() constructor
        are loaded from and save to the database correctly.  Also test to make
        sure that the class behaves well when the location is passed in as a
        single string instead of a set.
        """
        testFileA = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                                  locations = set(["se1.fnal.gov"]))
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()

        testFileB = DBSBufferFile(lfn = "/this/is/a/lfn2", size = 1024, events = 10,
                                  locations = "se1.fnal.gov")
        testFileB.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileB.addRun(Run( 1, *[45]))
        testFileB.create()        

        testFileC = DBSBufferFile(id = testFileA["id"])
        testFileC.load()

        goldenLocations = ["se1.fnal.gov"]
        for location in testFileC["locations"]:
            assert location in goldenLocations, \
                   "ERROR: Unknown file location"
            goldenLocations.remove(location)
            
        assert len(goldenLocations) == 0, \
              "ERROR: Some locations are missing"

        testFileC = DBSBufferFile(id = testFileB["id"])
        testFileC.load()

        goldenLocations = ["se1.fnal.gov"]
        for location in testFileC["locations"]:
            assert location in goldenLocations, \
                   "ERROR: Unknown file location"
            goldenLocations.remove(location)
            
        assert len(goldenLocations) == 0, \
              "ERROR: Some locations are missing"        
        return

    def testAddRunSet(self):
        """
        _testAddRunSet_

        Test the ability to add run and lumi information to a file.
        """
        testFile = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                                 locations = "se1.fnal.gov")
        testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                              appFam = "RECO", psetHash = "GIBBERISH",
                              configContent = "MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        
        testFile.create()
        runSet = set()
        runSet.add(Run( 1, *[45]))
        runSet.add(Run( 2, *[67, 68]))
        testFile.addRunSet(runSet)
        
        assert (runSet - testFile["runs"]) == set(), \
            "Error: addRunSet is not updating set correctly"

    def testXSetBlock(self):
        """
        _testSetBlock_

        Verify that the [Set|Get]Block DAOs work correctly.
        """
        myThread = threading.currentThread()
        uploadFactory = DAOFactory(package = "WMComponent.DBSUpload.Database",
                                   logger = myThread.logger,
                                   dbinterface = myThread.dbi)

        createAction = uploadFactory(classname = "SetBlockStatus")
        createAction.execute(block = "someblockname", locations = ["se1.cern.ch"])

        setBlockAction = self.daoFactory(classname = "DBSBufferFiles.SetBlock")
        getBlockAction = self.daoFactory(classname = "DBSBufferFiles.GetBlock")        

        testFile = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10,
                                 locations = "se1.fnal.gov")
        testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                              appFam = "RECO", psetHash = "GIBBERISH",
                              configContent = "MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        
        testFile.create()

        setBlockAction.execute(lfn = testFile["lfn"], blockName = "someblockname")
        blockName = getBlockAction.execute(lfn = testFile["lfn"])

        assert blockName[0][0] == "someblockname", \
               "Error: Incorrect block returned: %s" % blockName[0][0]
        return

    def testCountFilesDAO(self):
        """
        _testCountFilesDAO_

        Verify that the CountFiles DAO object functions correctly.
        """
        testFileA = DBSBufferFile(lfn = "/this/is/a/lfnA", size = 1024, events = 10,
                                  locations = "se1.fnal.gov")
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.create()

        testFileB = DBSBufferFile(lfn = "/this/is/a/lfnB", size = 1024, events = 10,
                                  locations = "se1.fnal.gov")
        testFileB.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileB.create()

        testFileC = DBSBufferFile(lfn = "/this/is/a/lfnC", size = 1024, events = 10,
                                  locations = "se1.fnal.gov")
        testFileC.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileC.create()                

        countAction = self.daoFactory(classname = "CountFiles")

        assert countAction.execute() == 3, \
               "Error: Wrong number of files counted in DBS Buffer."

        return

    def testAddParents(self):
        """
        _testAddParents_

        Verify that the addParents() method works correctly even if the parents
        do not already exist in the database.
        """
        myThread = threading.currentThread()
        
        testFile = DBSBufferFile(lfn = "/this/is/a/lfnA", size = 1024, events = 10,
                                 locations = "se1.fnal.gov")
        testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                              appFam = "RECO", psetHash = "GIBBERISH",
                              configContent = "MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFile.create()

        testParent = DBSBufferFile(lfn = "/this/is/a/lfnB", size = 1024, events = 10,
                                   locations = "se1.fnal.gov")
        testParent.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                appFam = "RECO", psetHash = "GIBBERISH",
                                configContent = "MOREGIBBERISH")
        testParent.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RAW")
        testParent.create()

        goldenLFNs = ["lfn1", "lfn2", "lfn3", "/this/is/a/lfnB"]
        testFile.addParents(goldenLFNs)

        verifyFile = DBSBufferFile(id = testFile["id"])
        verifyFile.load(parentage = 1)
        parentLFNs = verifyFile.getParentLFNs()

        for parentLFN in parentLFNs:
            self.assertTrue(parentLFN in goldenLFNs, "Error: unknown lfn %s" % parentLFN)
            goldenLFNs.remove(parentLFN)

        self.assertEqual(len(goldenLFNs), 0, "Error: missing LFNs...")
        
        # Check that the bogus dataset is listed as inDBS
        sqlCommand = """SELECT in_dbs FROM dbsbuffer_algo_dataset_assoc das
                          INNER JOIN dbsbuffer_dataset ds ON das.dataset_id = ds.id
                          WHERE ds.path = 'bogus'"""

        status = myThread.dbi.processData(sqlCommand)[0].fetchall()[0][0]

        self.assertEqual(status, 1)


        # Now make sure the dummy files are listed as being in DBS
        sqlCommand = """SELECT status FROM dbsbuffer_file df
                          INNER JOIN dbsbuffer_algo_dataset_assoc das ON das.id = df.dataset_algo
                          INNER JOIN dbsbuffer_dataset ds ON das.dataset_id = ds.id
                          WHERE ds.path = '/bogus/dataset/path' """

        status = myThread.dbi.processData(sqlCommand)[0].fetchall()

        for entry in status:
            self.assertEqual(entry, ('AlreadyInDBS',))

        return

    def testGetChildrenDAO(self):
        """
        _testGetChildrenDAO_

        Verify that the GetChildren DAO correctly returns the LFNs of a file's
        children.
        """
        testFileChildA = DBSBufferFile(lfn = "/this/is/a/child/lfnA", size = 1024,
                                        events = 20)
        testFileChildA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                    appFam = "RECO", psetHash = "GIBBERISH",
                                    configContent = "MOREGIBBERISH")
        testFileChildA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileChildB = DBSBufferFile(lfn = "/this/is/a/child/lfnB", size = 1024,
                                        events = 20)
        testFileChildB.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                    appFam = "RECO", psetHash = "GIBBERISH",
                                    configContent = "MOREGIBBERISH")
        testFileChildB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileChildC = DBSBufferFile(lfn = "/this/is/a/child/lfnC", size = 1024,
                                        events = 20)
        testFileChildC.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                    appFam = "RECO", psetHash = "GIBBERISH",
                                    configContent = "MOREGIBBERISH")
        testFileChildC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        
        testFileChildA.create()
        testFileChildB.create()
        testFileChildC.create()

        testFile = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024,
                                 events = 10)
        testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                              appFam = "RECO", psetHash = "GIBBERISH",
                              configContent = "MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFile.create()

        testFileChildA.addParents([testFile["lfn"]])
        testFileChildB.addParents([testFile["lfn"]])
        testFileChildC.addParents([testFile["lfn"]])        

        getChildrenAction = self.daoFactory(classname = "DBSBufferFiles.GetChildren")
        childLFNs = getChildrenAction.execute(testFile["lfn"])
        
        assert len(childLFNs) == 3, \
               "ERROR: Parent does not have the right amount of children."

        goldenLFNs = ["/this/is/a/child/lfnA",
                      "/this/is/a/child/lfnB",
                      "/this/is/a/child/lfnC"]
        for childLFN in childLFNs:
            assert childLFN in goldenLFNs, \
                   "ERROR: Unknown child lfn"
            goldenLFNs.remove(childLFN)
                   
        return

    def testGetParentStatusDAO(self):
        """
        _testGetParentStatusDAO_

        Verify that the GetParentStatus DAO correctly returns the status of a
        file's children.
        """
        testFileChild = DBSBufferFile(lfn = "/this/is/a/child/lfnA", size = 1024,
                                        events = 20)
        testFileChild.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                    appFam = "RECO", psetHash = "GIBBERISH",
                                    configContent = "MOREGIBBERISH")
        testFileChild.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileChild.create()

        testFile = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024,
                                 events = 10)
        testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                              appFam = "RECO", psetHash = "GIBBERISH",
                              configContent = "MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFile.create()

        testFileChild.addParents([testFile["lfn"]])

        getStatusAction = self.daoFactory(classname = "DBSBufferFiles.GetParentStatus")
        parentStatus = getStatusAction.execute(testFileChild["lfn"])

        assert len(parentStatus) == 1, \
               "ERROR: Wrong number of statuses returned."
        assert parentStatus[0] == "NOTUPLOADED", \
               "ERROR: Wrong status returned."

        return

    def testSetLocationByLFN(self):
        """
        _testSetLocationByLFN_

        """

        myThread = threading.currentThread()

        testFileA = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10)
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileA.addRun(Run( 1, *[45]))
        testFileA.create()

        setLocationAction = self.daoFactory(classname = "DBSBufferFiles.SetLocationByLFN")
        setLocationAction.execute(binds = {'lfn': "/this/is/a/lfn", 'sename': 'se1.cern.ch'})

        testFileB = DBSBufferFile(id = testFileA["id"])
        testFileB.load()

        self.assertEqual(testFileB['locations'], set(['se1.cern.ch']))
        
        return


    def testAddCKSumByLFN(self):
        """
        _testAddCKSumByLFN_

        """

        
        testFileA = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10)
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.create()

        checksums = {"adler32": "adler32", "cksum": "cksum", "md5": "md5"}
        setCksumAction = self.daoFactory(classname = "DBSBufferFiles.AddChecksumByLFN")
        binds = [{'lfn': "/this/is/a/lfn", 'cktype': 'adler32', 'cksum': 201},
                 {'lfn': "/this/is/a/lfn", 'cktype': 'cksum', 'cksum': 101}]
        setCksumAction.execute(bulkList = binds)

        testFileB = DBSBufferFile(id = testFileA["id"])
        testFileB.load()

        self.assertEqual(testFileB['checksums'], {'adler32': '201', 'cksum': '101'})

        return



    def testBulkLoad(self):
        """
        _testBulkLoad_

        Can we load in bulk?
        """


        addToBuffer = DBSBufferUtil()

        bulkLoad = self.daoFactory(classname = "DBSBufferFiles.LoadBulkFilesByID")


        testFileChildA = DBSBufferFile(lfn = "/this/is/a/child/lfnA", size = 1024,
                                        events = 20)
        testFileChildA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                    appFam = "RECO", psetHash = "GIBBERISH",
                                    configContent = "MOREGIBBERISH")
        testFileChildA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileChildB = DBSBufferFile(lfn = "/this/is/a/child/lfnB", size = 1024,
                                        events = 20)
        testFileChildB.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                    appFam = "RECO", psetHash = "GIBBERISH",
                                    configContent = "MOREGIBBERISH")
        testFileChildB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileChildC = DBSBufferFile(lfn = "/this/is/a/child/lfnC", size = 1024,
                                        events = 20)
        testFileChildC.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                    appFam = "RECO", psetHash = "GIBBERISH",
                                    configContent = "MOREGIBBERISH")
        testFileChildC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        
        testFileChildA.create()
        testFileChildB.create()
        testFileChildC.create()

        testFileChildA.setLocation(["se1.fnal.gov", "se1.cern.ch"])
        testFileChildB.setLocation(["se1.fnal.gov", "se1.cern.ch"])
        testFileChildC.setLocation(["se1.fnal.gov", "se1.cern.ch"])

        runSet = set()
        runSet.add(Run( 1, *[45]))
        runSet.add(Run( 2, *[67, 68]))
        testFileChildA.addRunSet(runSet)
        testFileChildB.addRunSet(runSet)
        testFileChildC.addRunSet(runSet)


        testFileChildA.save()
        testFileChildB.save()
        testFileChildC.save()

        setCksumAction = self.daoFactory(classname = "DBSBufferFiles.AddChecksumByLFN")
        binds = [{'lfn': "/this/is/a/child/lfnA", 'cktype': 'adler32', 'cksum': 201},
                 {'lfn': "/this/is/a/child/lfnA", 'cktype': 'cksum', 'cksum': 101},
                 {'lfn': "/this/is/a/child/lfnB", 'cktype': 'adler32', 'cksum': 201},
                 {'lfn': "/this/is/a/child/lfnB", 'cktype': 'cksum', 'cksum': 101},
                 {'lfn': "/this/is/a/child/lfnC", 'cktype': 'adler32', 'cksum': 201},
                 {'lfn': "/this/is/a/child/lfnC", 'cktype': 'cksum', 'cksum': 101}]
        setCksumAction.execute(bulkList = binds)        

        testFile = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024,
                                 events = 10)
        testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                              appFam = "RECO", psetHash = "GIBBERISH",
                              configContent = "MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFile.create()

        testFileChildA.addParents([testFile["lfn"]])
        testFileChildB.addParents([testFile["lfn"]])
        testFileChildC.addParents([testFile["lfn"]])


        binds = [{'id': testFileChildA.exists()},
                 {'id': testFileChildB.exists()},
                 {'id': testFileChildC.exists()}]

        listOfFiles = addToBuffer.loadDBSBufferFilesBulk(fileObjs = binds)

        #print listOfFiles


        compareList = ['locations', 'psetHash', 'configContent', 'appName',
                       'appVer', 'appFam', 'events', 'datasetPath', 'runs']


        for f in listOfFiles:
            self.assertTrue(f['lfn'] in ["/this/is/a/child/lfnA", "/this/is/a/child/lfnB",
                                         "/this/is/a/child/lfnC"],
                            "Unknown file in loaded results")
            self.assertEqual(f['checksums'], {'adler32': '201', 'cksum': '101'})
            for parent in f['parents']:
                self.assertEqual(parent['lfn'], testFile['lfn'])
            for key in compareList:
                self.assertEqual(f[key], testFileChildA[key])


    def testProperties(self):
        """
        _testProperties_

        Test added tags that use DBSBuffer to transfer from workload to DBS
        """


        testFileA = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024, events = 10)
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileA.setValidStatus(validStatus = "VALID")
        testFileA.setProcessingVer(ver = "ProcVer")
        testFileA.setAcquisitionEra(era = "AcqEra")
        testFileA.setGlobalTag(globalTag = "GlobalTag")
        testFileA.setDatasetParent(datasetParent = "Parent")
        testFileA.create()

        # There are no accessors for these things because load is never called
        action = self.daoFactory2(classname = "LoadInfoFromDAS")
        das    = action.execute(ids = [1])[0]
        self.assertEqual(das['Parent'], 'Parent')
        self.assertEqual(das['GlobalTag'], 'GlobalTag')
        self.assertEqual(das['ValidStatus'], 'VALID')
示例#43
0
class ConditionTest(unittest.TestCase):
    """
    _ExpressTest_

    Test for Express job splitter
    """

    def setUp(self):
        """
        _setUp_

        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules = ["T0.WMBS"])

        self.splitterFactory = SplitterFactory(package = "T0.JobSplitting")

        myThread = threading.currentThread()
        daoFactory = DAOFactory(package = "T0.WMBS",
                                logger = logging,
                                dbinterface = myThread.dbi)

        wmbsDaoFactory = DAOFactory(package = "WMCore.WMBS",
                                    logger = logging,
                                    dbinterface = myThread.dbi)

        myThread.dbi.processData("""INSERT INTO wmbs_location
                                    (id, site_name, state)
                                    VALUES (1, 'SomeSite', 1)
                                    """, transaction = False)
        myThread.dbi.processData("""INSERT INTO wmbs_location_senames
                                    (location, se_name)
                                    VALUES (1, 'SomeSE')
                                    """, transaction = False)

        insertRunDAO = daoFactory(classname = "RunConfig.InsertRun")
        insertRunDAO.execute(binds = { 'RUN' : 1,
                                       'TIME' : int(time.time()),
                                       'HLTKEY' : "someHLTKey" },
                             transaction = False)

        insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection")
        insertLumiDAO.execute(binds = { 'RUN' : 1,
                                        'LUMI' : 1 },
                              transaction = False)

        insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream")
        insertStreamDAO.execute(binds = { 'STREAM' : "Express" },
                                transaction = False)

        insertStreamFilesetDAO = daoFactory(classname = "RunConfig.InsertStreamFileset")
        insertStreamFilesetDAO.execute(1, "Express", "TestFileset1")

        insertStreamerDAO = daoFactory(classname = "RunConfig.InsertStreamer")
        insertStreamerDAO.execute(binds = { 'RUN' : 1,
                                            'LUMI' : 1,
                                            'STREAM' : "Express",
                                            'TIME' : int(time.time()),
                                            'LFN' : "/streamer",
                                            'FILESIZE' : 0,
                                            'EVENTS' : 0 },
                                  transaction = False)

        insertPromptCalibrationDAO = daoFactory(classname = "RunConfig.InsertPromptCalibration")
        insertPromptCalibrationDAO.execute( { 'RUN' : 1,
                                              'STREAM' : "Express" },
                                            transaction = False)

        self.fileset1 = Fileset(name = "TestFileset1")
        self.fileset1.create()

        workflow1 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow1", task="Test")
        workflow1.create()

        self.subscription1  = Subscription(fileset = self.fileset1,
                                           workflow = workflow1,
                                           split_algo = "Condition",
                                           type = "Condition")
        self.subscription1.create()

        # set parentage chain and sqlite fileset
        alcaRecoFile = File("/alcareco", size = 0, events = 0)
        alcaRecoFile.addRun(Run(1, *[1]))
        alcaRecoFile.setLocation("SomeSE", immediateSave = False)
        alcaRecoFile.create()
        alcaPromptFile = File("/alcaprompt", size = 0, events = 0)
        alcaPromptFile.addRun(Run(1, *[1]))
        alcaPromptFile.setLocation("SomeSE", immediateSave = False)
        alcaPromptFile.create()
        sqliteFile = File("/sqlite", size = 0, events = 0)
        sqliteFile.create()
        self.fileset1.addFile(sqliteFile)
        self.fileset1.commit()

        results = myThread.dbi.processData("""SELECT lfn FROM wmbs_file_details
                                              """,
                                           transaction = False)[0].fetchall()

        setParentageDAO = wmbsDaoFactory(classname = "Files.SetParentage")
        setParentageDAO.execute(binds = [ { 'parent' : "/streamer",
                                            'child' : "/alcareco" },
                                          { 'parent' : "/alcareco",
                                            'child' : "/alcaprompt" },
                                          { 'parent' : "/alcaprompt",
                                            'child' : "/sqlite" } ],
                                transaction = False)

        # default split parameters
        self.splitArgs = {}
        self.splitArgs['runNumber'] = 1
        self.splitArgs['streamName'] = "Express"

        return

    def tearDown(self):
        """
        _tearDown_

        """
        self.testInit.clearDatabase()

        return

    def isPromptCalibFinished(self):
        """
        _isPromptCalibFinished_

        """
        myThread = threading.currentThread()

        result = myThread.dbi.processData("""SELECT finished
                                             FROM prompt_calib
                                             """,
                                          transaction = False)[0].fetchall()[0][0]

        return result

    def countPromptCalibFiles(self):
        """
        _deleteSplitLumis_

        """
        myThread = threading.currentThread()

        result = myThread.dbi.processData("""SELECT COUNT(*)
                                             FROM prompt_calib_file
                                             """,
                                          transaction = False)[0].fetchall()[0][0]

        return result

    def test00(self):
        """
        _test00_

        Make sure the job splitter behaves correctly.

        Just make sure the job splitter does nothing
        when the fileset is open and populates t0ast
        data structures when it's closed. In the later
        case all input files should be marked as
        acquired without creating a job as well.

        """
        mySplitArgs = self.splitArgs.copy()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription1)

        self.assertEqual(self.isPromptCalibFinished(), 0,
                         "ERROR: prompt_calib should not be finished")

        self.assertEqual(self.countPromptCalibFiles(), 0,
                         "ERROR: there should be no prompt_calib_file")

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(self.isPromptCalibFinished(), 0,
                         "ERROR: prompt_calib should not be finished")

        self.assertEqual(self.countPromptCalibFiles(), 1,
                         "ERROR: there should be one prompt_calib_file")

        self.fileset1.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        self.assertEqual(self.isPromptCalibFinished(), 1,
                         "ERROR: prompt_calib should be finished")

        self.assertEqual(self.countPromptCalibFiles(), 1,
                         "ERROR: there should be one prompt_calib_file")

        return
示例#44
0
class ReceiverTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel=logging.DEBUG)
        self.addr = "tcp://127.0.0.1:5557"
        self.ctrl = "tcp://127.0.0.1:5559"
        # simple printer Alert messages handler
        self.printer = lambda x: sys.stdout.write("printer handler: '%s'\n" %
                                                  str(x))

    def tearDown(self):
        pass

    def _getSenderChannels(self):
        context = zmq.Context()
        # set up a channel to send work
        workChann = context.socket(zmq.PUSH)
        workChann.connect(self.addr)
        # set up control channel
        contChann = context.socket(zmq.PUB)
        contChann.connect(self.ctrl)
        return workChann, contChann

    def testReceiverShutdownByMessage(self):
        # start a Receiver
        rec = Receiver(self.addr, self.printer, self.ctrl)
        rec.startReceiver()  # non blocking call

        workChann, contChann = self._getSenderChannels()

        # send some messages to the receiver and shut it eventually
        contChann.send_json(RegisterMsg("Receiver_t"))
        workChann.send_json(Alert(Type="Alert", Level=10))
        contChann.send_json(UnregisterMsg("Receiver_t"))
        # terminate the Receiver
        contChann.send_json(ShutdownMsg())

        # wait until the Receiver is properly shut
        # this will not be necessary when shutting down by a call
        while rec.isReady():
            time.sleep(0.1)

    def testReceiverShutdownByCall(self):
        # start a Receiver
        rec = Receiver(self.addr, self.printer, self.ctrl)
        rec.startReceiver()  # non blocking call

        workChann, contChann = self._getSenderChannels()

        # send some messages to the receiver and shut it eventually
        contChann.send_json(RegisterMsg("Receiver_t"))
        workChann.send_json(Alert(Type="Alert", Level=20))
        contChann.send_json(UnregisterMsg("Receiver_t"))

        # now messages are sent so shutdown the Receiver by a convenience
        # call, should block until the Receiver finishes, don't have to wait
        rec.shutdown()

    def testReceiverBuferring(self):
        """
        Test sending alerts that are buffered into a queue.

        """
        # alerts received will be added to the queue
        alertList = []
        handler = lambda x: alertList.append(x)
        rec = Receiver(self.addr, handler, self.ctrl)
        rec.startReceiver()  # non blocking call

        numAlertMsgs = 7
        thread = AlertsSender(self.addr, self.ctrl, numAlertMsgs)
        # thread will send numAlertMsgs and eventually shut the receiver
        # down by a shutdown control message
        thread.start()

        # wait here until the Receiver is not shut
        while rec.isReady():
            time.sleep(0.2)

        # worker will send shutdown message and execution will resume here
        # check the content of the queue - 5 alert messages
        self.assertEqual(len(alertList), numAlertMsgs)
        for alert in alertList:
            self.failUnless(alert.has_key("Type"))
            self.assertEqual(alert["Type"], "Alert")
示例#45
0
class FileBasedTest(unittest.TestCase):
    """
    _FileBasedTest_

    Test file based job splitting.
    """

    def setUp(self):
        """
        _setUp_

        Create two subscriptions: One that contains a single file and one that
        contains multiple files.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS"],
                                useDefault = False)
        
        myThread = threading.currentThread()
        daofactory = DAOFactory(package = "WMCore.WMBS",
                                logger = myThread.logger,
                                dbinterface = myThread.dbi)
        
        locationAction = daofactory(classname = "Locations.New")
        locationAction.execute(siteName = "site1", seName = "somese.cern.ch")
        locationAction.execute(siteName = "site2", seName = "otherse.cern.ch")
        
        self.multipleFileFileset = Fileset(name = "TestFileset1")
        self.multipleFileFileset.create()
        parentFile = File('/parent/lfn/', size = 1000, events = 100, 
                          locations = set(["somese.cern.ch"])) 
        parentFile.create() 
        for i in range(10):
            newFile = File(makeUUID(), size = 1000, events = 100,
                           locations = set(["somese.cern.ch"]))
            newFile.addRun(Run(i, *[45]))
            newFile.create()
            newFile.addParent(lfn = parentFile['lfn']) 
            self.multipleFileFileset.addFile(newFile)
        self.multipleFileFileset.commit()

        self.singleFileFileset = Fileset(name = "TestFileset2")
        self.singleFileFileset.create()
        newFile = File("/some/file/name", size = 1000, events = 100,
                       locations = set(["somese.cern.ch"]))
        newFile.create()
        self.singleFileFileset.addFile(newFile)
        self.singleFileFileset.commit()


        self.multipleSiteFileset = Fileset(name = "TestFileset3")
        self.multipleSiteFileset.create()
        for i in range(5):
            newFile = File(makeUUID(), size = 1000, events = 100,
                           locations = set(["somese.cern.ch"]))
            newFile.create()
            self.multipleSiteFileset.addFile(newFile)
        for i in range(5):
            newFile = File(makeUUID(), size = 1000, events = 100,
                           locations = set(["otherse.cern.ch", "somese.cern.ch"]))
            newFile.create()
            self.multipleSiteFileset.addFile(newFile)
        self.multipleSiteFileset.commit()

        testWorkflow = Workflow(spec = "spec.xml", owner = "Steve",
                                name = "wf001", task="Test" )
        testWorkflow.create()
        self.multipleFileSubscription = Subscription(fileset = self.multipleFileFileset,
                                                     workflow = testWorkflow,
                                                     split_algo = "FileBased",
                                                     type = "Processing")
        self.multipleFileSubscription.create()
        self.singleFileSubscription = Subscription(fileset = self.singleFileFileset,
                                                   workflow = testWorkflow,
                                                   split_algo = "FileBased",
                                                   type = "Processing")
        self.singleFileSubscription.create()

        self.multipleSiteSubscription = Subscription(fileset = self.multipleSiteFileset,
                                                     workflow = testWorkflow,
                                                     split_algo = "FileBased",
                                                     type = "Processing")
        self.multipleSiteSubscription.create()
        return
    
    def tearDown(self):
        """
        _tearDown_

        Clear out WMBS.
        """
        self.testInit.clearDatabase()
        return

    def createLargeFileBlock(self):
        """
        _createLargeFileBlock_
        
        Creates a large group of files for testing
        """
        testFileset = Fileset(name = "TestFilesetX")
        testFileset.create()
        for i in range(5000):
            newFile = File(makeUUID(), size = 1000, events = 100,
                           locations = set(["somese.cern.ch"]))
            newFile.create()
            testFileset.addFile(newFile)
        testFileset.commit()
            
        testWorkflow = Workflow(spec = "spec.xml", owner = "mnorman",
                                name = "wf003", task="Test" )
        testWorkflow.create()

        largeSubscription = Subscription(fileset = testFileset,
                                                   workflow = testWorkflow,
                                                   split_algo = "FileBased",
                                                   type = "Processing")
        largeSubscription.create()

        return largeSubscription

    def testExactFiles(self):
        """
        _testExactFiles_

        Test file based job splitting when the number of files per job is
        exactly the same as the number of files in the input fileset.
        """
        splitter = SplitterFactory()
        jobFactory = splitter(package = "WMCore.WMBS",
                              subscription = self.singleFileSubscription)

        jobGroups = jobFactory(files_per_job = 1)


        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 1)
        job = jobGroups[0].jobs.pop()
        self.assertEqual(job.getFiles(type = "lfn"), ["/some/file/name"])

        return

    def testMoreFiles(self):
        """
        _testMoreFiles_

        Test file based job splitting when the number of files per job is
        greater than the number of files in the input fileset.
        """
        splitter = SplitterFactory()
        jobFactory = splitter(package = "WMCore.WMBS",
                              subscription = self.singleFileSubscription)
        
        jobGroups = jobFactory(files_per_job = 10)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 1)
        job = jobGroups[0].jobs.pop()
        self.assertEqual(job.getFiles(type = "lfn"), ["/some/file/name"])

        return
        


    def test2FileSplit(self):
        """
        _test2FileSplit_

        Test file based job splitting when the number of files per job is
        2, this should result in five jobs.
        """
        splitter = SplitterFactory()
        jobFactory = splitter(package = "WMCore.WMBS",
                              subscription = self.multipleFileSubscription)
        
        jobGroups = jobFactory(files_per_job = 2)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 5)

        fileList = []
        for job in jobGroups[0].jobs:
            self.assertEqual(len(job.getFiles()), 2)
            for file in job.getFiles(type = "lfn"):
                fileList.append(file)
        self.assertEqual(len(fileList), 10)
        
        return

    def test3FileSplit(self):
        """
        _test3FileSplit_

        Test file based job splitting when the number of files per job is
        3, this should result in four jobs.
        """
        splitter = SplitterFactory()
        jobFactory = splitter(package = "WMCore.WMBS",
                              subscription = self.multipleFileSubscription)
        
        jobGroups = jobFactory(files_per_job = 3)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 4)

        fileList = []
        for job in jobGroups[0].jobs:
            assert len(job.getFiles()) in [3, 1], "ERROR: Job contains incorrect number of files"
            for file in job.getFiles(type = "lfn"):
                fileList.append(file)
        self.assertEqual(len(fileList), 10)
        
        return


    def testLocationSplit(self):

        """

        _testLocationSplit_

        This should test whether or not the FileBased algorithm understands that files at seperate sites
        cannot be in the same jobGroup (this is the current standard).
        
        """
        myThread = threading.currentThread()

        splitter = SplitterFactory()
        jobFactory = splitter(package = "WMCore.WMBS",
                              subscription = self.multipleSiteSubscription)

        jobGroups = jobFactory(files_per_job = 10)

        self.assertEqual(len(jobGroups), 2)
        self.assertEqual(len(jobGroups[0].jobs), 1)

        fileList = []
        self.assertEqual(len(jobGroups[1].jobs[0].getFiles()), 5)

        
        return
    
    def testLimit(self):
        """
        _testLimit_
        
        Test what happens when you limit the number of files.
        This should run each separate file in a separate loop,
        creating one jobGroups with one job with one file
        (The limit argument tells it what to do)
        """
        splitter = SplitterFactory()
        jobFactory = splitter(package = "WMCore.WMBS",
                              subscription = self.multipleFileSubscription)
                              
        
        jobGroups = jobFactory(files_per_job = 10, limit_file_loading = True,
                               file_load_limit = 1)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 1)

        return

    def testRespectRunBoundaries(self):
        """
        _testRespectRunBoundaries_

        Test whether or not this thing will respect run boundaries
        """

        splitter = SplitterFactory()
        jobFactory = splitter(package = "WMCore.WMBS",
                              subscription = self.multipleFileSubscription)

        jobGroups = jobFactory(files_per_job = 10, respect_run_boundaries = True)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 10)

        return

    def test_getParents(self): 
        """ 
        _getParents_ 
        
        Check that we can do the same as the TwoFileBased 
        """ 
        splitter = SplitterFactory() 
        jobFactory = splitter(package = "WMCore.WMBS", 
                              subscription = self.multipleFileSubscription) 
        
        jobGroups = jobFactory(files_per_job = 2, 
                               include_parents  = True) 
        
        self.assertEqual(len(jobGroups), 1) 
        self.assertEqual(len(jobGroups[0].jobs), 5) 
        
        fileList = [] 
        for job in jobGroups[0].jobs: 
            self.assertEqual(len(job.getFiles()), 2) 
            for file in job.getFiles(type = "lfn"): 
                fileList.append(file) 
        self.assertEqual(len(fileList), 10) 
        
        for j in jobGroups[0].jobs: 
            for f in j['input_files']: 
                self.assertEqual(len(f['parents']), 1) 
                self.assertEqual(list(f['parents'])[0]['lfn'], '/parent/lfn/') 
                
        return 



    def testZ_randomCrapForGenerators(self):
        """
        Either this works, and all other tests are obsolete, or it doesn't and they aren't.
        Either way, don't screw around with this.
        """

        def runCode(self, jobFactory):

            func = self.crazyAssFunction(jobFactory = jobFactory, file_load_limit = 500)

            

            startTime = time.time()
            goFlag    = True
            while goFlag:
                try:
                    res = func.next()
                    self.jobGroups.extend(res)
                except StopIteration:
                    goFlag = False
                    
            stopTime  = time.time()
            return jobGroups

        splitter = SplitterFactory()
        jobFactory = splitter(package = "WMCore.WMBS",
                              subscription = self.multipleSiteSubscription)

        jobFactory.open()
        jobGroups = []

        a = self.crazyAssFunction(jobFactory = jobFactory, file_load_limit = 2)

        for x in range(7):
            try:
                res = a.next()
                jobGroups.extend(res)
            except StopIteration:
                continue

        jobFactory.close()

        self.assertEqual(len(jobGroups), 6)
        for group in jobGroups:
            self.assertTrue(len(group.jobs) in [1, 2])
            for job in group.jobs:
                self.assertTrue(len(job['input_files']) in (1,2))
                for file in job['input_files']:
                    self.assertTrue(file['locations'] in [set(['somese.cern.ch']),
                                                              set(['otherse.cern.ch',
                                                                   'somese.cern.ch'])])



        self.jobGroups = []

        subscript = self.createLargeFileBlock()

        splitter = SplitterFactory()
        jobFactory = splitter(package = "WMCore.WMBS", subscription = subscript)

        jobFactory.open()

        runCode(self, jobFactory)
        #cProfile.runctx("runCode(self, jobFactory)", globals(), locals(), "coroutine.stats")

        jobGroups = self.jobGroups
        
        self.assertEqual(len(jobGroups), 10)
        for group in jobGroups:
            self.assertEqual(len(group.jobs), 500)
            self.assertTrue(group.exists() > 0)


        jobFactory.close()
        return

    def crazyAssFunction(self, jobFactory, file_load_limit = 1):
        groups = ['test']
        while groups != []:
            groups = jobFactory(files_per_job = 1, file_load_limit = file_load_limit)
            yield groups
示例#46
0
class WMAgentTest(unittest.TestCase):
    """
    _WMAgentTest_

    Global unittest for all WMAgent components
    """

    # This is an integration test
    __integration__ = "Any old bollocks"

    sites = ['T2_US_Florida', 'T2_US_UCSD', 'T2_TW_Taiwan', 'T1_CH_CERN']
    components = ['JobCreator', 'JobSubmitter', 'JobTracker',
                  'JobAccountant', 'JobArchiver', 'TaskArchiver',
                  'RetryManager', 'ErrorHandler']

    def setUp(self):
        """
        _setUp_

        Set up vital components
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS",'WMCore.MsgService',
                                                 'WMCore.ResourceControl', 'WMCore.ThreadPool',
                                                 'WMCore.Agent.Database'],
                                useDefault = False)

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package = "WMCore.WMBS",
                                     logger = myThread.logger,
                                     dbinterface = myThread.dbi)



        locationAction = self.daoFactory(classname = "Locations.New")
        pendingSlots  = self.daoFactory(classname = "Locations.SetPendingSlots")


        for site in self.sites:
            locationAction.execute(siteName = site, seName = 'se.%s' % (site), ceName = site)
            pendingSlots.execute(siteName = site, pendingSlots = 1000)


        #Create sites in resourceControl
        resourceControl = ResourceControl()
        for site in self.sites:
            resourceControl.insertSite(siteName = site, seName = 'se.%s' % (site), ceName = site)
            resourceControl.insertThreshold(siteName = site, taskType = 'Processing', \
                                            maxSlots = 10000, pendingSlots = 10000)


        self.testDir = self.testInit.generateWorkDir()


        # Set heartbeat
        for component in self.components:
            heartbeatAPI = HeartbeatAPI(component)
            heartbeatAPI.registerComponent()




        return


    def tearDown(self):
        """
        _tearDown_

        Tear down everything and go home.
        """

        self.testInit.clearDatabase()

        self.testInit.delWorkDir()

        return




    def createTestWorkload(self, workloadName = 'Test', emulator = True):
        """
        _createTestWorkload_

        Creates a test workload for us to run on, hold the basic necessities.
        """


        workload = testWorkload("TestWorkload")
        rereco = workload.getTask("ReReco")


        taskMaker = TaskMaker(workload, os.path.join(self.testDir, 'workloadTest'))
        taskMaker.skipSubscription = True
        taskMaker.processWorkload()

        workload.save(workloadName)

        return workload




    def getConfig(self):
        """
        _getConfig_

        This is the global test configuration object
        """



        config = Configuration()

        config.component_("Agent")
        config.Agent.WMSpecDirectory = self.testDir
        config.Agent.agentName       = 'testAgent'
        config.Agent.componentName   = 'test'


        # First the general stuff
        config.section_("General")
        config.General.workDir = os.getenv("TESTDIR", self.testDir)

        # Now the CoreDatabase information
        # This should be the dialect, dburl, etc

        config.section_("CoreDatabase")
        config.CoreDatabase.connectUrl = os.getenv("DATABASE")
        config.CoreDatabase.socket     = os.getenv("DBSOCK")



        # JobCreator
        config.component_("JobCreator")
        config.JobCreator.namespace = 'WMComponent.JobCreator.JobCreator'
        config.JobCreator.logLevel  = 'DEBUG'
        config.JobCreator.maxThreads                = 1
        config.JobCreator.UpdateFromResourceControl = True
        config.JobCreator.pollInterval              = 10
        config.JobCreator.jobCacheDir               = self.testDir
        config.JobCreator.defaultJobType            = 'processing' #Type of jobs that we run, used for resource control
        config.JobCreator.workerThreads             = 2
        config.JobCreator.componentDir              = os.path.join(os.getcwd(), 'Components')



        # JobSubmitter
        config.component_("JobSubmitter")
        config.JobSubmitter.namespace     = 'WMComponent.JobSubmitter.JobSubmitter'
        config.JobSubmitter.logLevel      = 'INFO'
        config.JobSubmitter.maxThreads    = 1
        config.JobSubmitter.pollInterval  = 10
        config.JobSubmitter.pluginName    = 'CondorGlobusPlugin'
        config.JobSubmitter.pluginDir     = 'JobSubmitter.Plugins'
        config.JobSubmitter.submitDir     = os.path.join(self.testDir, 'submit')
        config.JobSubmitter.submitNode    = os.getenv("HOSTNAME", 'badtest.fnal.gov')
        config.JobSubmitter.submitScript  = os.path.join(getWMBASE(),
                                                         'test/python/WMComponent_t/JobSubmitter_t',
                                                         'submit.sh')
        config.JobSubmitter.componentDir  = os.path.join(os.getcwd(), 'Components')
        config.JobSubmitter.workerThreads = 2
        config.JobSubmitter.jobsPerWorker = 200




        # JobTracker
        config.component_("JobTracker")
        config.JobTracker.logLevel      = 'DEBUG'
        config.JobTracker.pollInterval  = 10
        config.JobTracker.trackerName   = 'CondorTracker'
        config.JobTracker.pluginDir     = 'WMComponent.JobTracker.Plugins'
        config.JobTracker.componentDir  = os.path.join(os.getcwd(), 'Components')
        config.JobTracker.runTimeLimit  = 7776000 #Jobs expire after 90 days
        config.JobTracker.idleTimeLimit = 7776000
        config.JobTracker.heldTimeLimit = 7776000
        config.JobTracker.unknTimeLimit = 7776000



        # JobAccountant
        config.component_("JobAccountant")
        config.JobAccountant.pollInterval = 60
        config.JobAccountant.componentDir = os.path.join(os.getcwd(), 'Components')
        config.JobAccountant.logLevel     = 'INFO'



        # JobArchiver
        config.component_("JobArchiver")
        config.JobArchiver.pollInterval          = 60
        config.JobArchiver.logLevel              = 'INFO'
        config.JobArchiver.logDir                = os.path.join(self.testDir, 'logs')
        config.JobArchiver.componentDir          = os.path.join(os.getcwd(), 'Components')
        config.JobArchiver.numberOfJobsToCluster = 1000



        # Task Archiver
        config.component_("TaskArchiver")
        config.TaskArchiver.componentDir    = self.testInit.generateWorkDir()
        config.TaskArchiver.WorkQueueParams = {}
        config.TaskArchiver.pollInterval    = 60
        config.TaskArchiver.logLevel        = 'INFO'
        config.TaskArchiver.timeOut         = 0



        # JobStateMachine
        config.component_('JobStateMachine')
        config.JobStateMachine.couchurl        = os.getenv('COUCHURL',
                                                           'mnorman:[email protected]:5984')
        config.JobStateMachine.couchDBName     = "mnorman_test"


        # Needed, because this is a test
        os.makedirs(config.JobSubmitter.submitDir)


        return config



    def createFileCollection(self, name, nSubs, nFiles, workflowURL = 'test', site = None):
        """
        _createFileCollection_

        Create a collection of files for splitting into jobs
        """

        myThread = threading.currentThread()

        testWorkflow = Workflow(spec = workflowURL, owner = "mnorman",
                                name = name, task="/TestWorkload/ReReco")
        testWorkflow.create()

        for sub in range(nSubs):

            nameStr = '%s-%i' % (name, sub)

            testFileset = Fileset(name = nameStr)
            testFileset.create()

            for f in range(nFiles):
                # pick a random site
                if not site:
                    tmpSite = 'se.%s' % (random.choice(self.sites))
                else:
                    tmpSite = 'se.%s' % (site)
                testFile = File(lfn = "/lfn/%s/%i" % (nameStr, f), size = 1024, events = 10)
                testFile.setLocation(tmpSite)
                testFile.create()
                testFileset.addFile(testFile)

            testFileset.commit()
            testFileset.markOpen(isOpen = 0)
            testSubscription = Subscription(fileset = testFileset,
                                            workflow = testWorkflow,
                                            type = "Processing",
                                            split_algo = "FileBased")
            testSubscription.create()


        return



    def createReports(self, jobs, retryCount = 0):
        """
        _createReports_

        Create some dummy job reports for each job
        """


        report = Report()
        report.addStep('testStep', 0)

        for job in jobs:
            #reportPath = os.path.join(job['cache_dir'], 'Report.%i.pkl' % (retryCount))
            reportPath = job['fwjr_path']
            if os.path.exists(reportPath):
                os.remove(reportPath)
            report.save(reportPath)


        return



    def testA_StraightThrough(self):
        """
        _StraightThrough_

        Just run everything straight through without any variations
        """
        # Do pre-submit job check
        nRunning = getCondorRunningJobs()
        self.assertEqual(nRunning, 0, "User currently has %i running jobs.  Test will not continue" % (nRunning))

        myThread = threading.currentThread()
        workload = self.createTestWorkload()
        config   = self.getConfig()


        name         = 'WMAgent_Test1'
        site         = self.sites[0]
        nSubs        = 5
        nFiles       = 10
        workloadPath = os.path.join(self.testDir, 'workloadTest',
                                    'TestWorkload', 'WMSandbox',
                                    'WMWorkload.pkl')

        # Create a collection of files
        self.createFileCollection(name = name, nSubs = nSubs,
                                  nFiles = nFiles,
                                  workflowURL = workloadPath,
                                  site = site)



        ############################################################
        # Test the JobCreator


        config.Agent.componentName = 'JobCreator'
        testJobCreator = JobCreatorPoller(config = config)

        testJobCreator.algorithm()
        time.sleep(5)


        # Did all jobs get created?
        getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs")
        result = getJobsAction.execute(state = 'Created', jobType = "Processing")
        self.assertEqual(len(result), nSubs*nFiles)


        # Count database objects
        result = myThread.dbi.processData('SELECT * FROM wmbs_sub_files_acquired')[0].fetchall()
        self.assertEqual(len(result), nSubs * nFiles)


        # Find the test directory
        testDirectory = os.path.join(self.testDir, 'TestWorkload', 'ReReco')
        self.assertTrue('JobCollection_1_0' in os.listdir(testDirectory))
        self.assertTrue(len(os.listdir(testDirectory)) <= 20)

        groupDirectory = os.path.join(testDirectory, 'JobCollection_1_0')

        # First job should be in here
        self.assertTrue('job_1' in os.listdir(groupDirectory))
        jobFile = os.path.join(groupDirectory, 'job_1', 'job.pkl')
        self.assertTrue(os.path.isfile(jobFile))
        f = open(jobFile, 'r')
        job = cPickle.load(f)
        f.close()


        self.assertEqual(job['workflow'], name)
        self.assertEqual(len(job['input_files']), 1)
        self.assertEqual(os.path.basename(job['sandbox']), 'TestWorkload-Sandbox.tar.bz2')










        ###############################################################
        # Now test the JobSubmitter

        config.Agent.componentName = 'JobSubmitter'
        testJobSubmitter = JobSubmitterPoller(config = config)


        testJobSubmitter.algorithm()


        # Check that jobs are in the right state
        result = getJobsAction.execute(state = 'Created', jobType = "Processing")
        self.assertEqual(len(result), 0)
        result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
        self.assertEqual(len(result), nSubs * nFiles)



        # Check assigned locations
        getLocationAction = self.daoFactory(classname = "Jobs.GetLocation")
        for id in result:
            loc = getLocationAction.execute(jobid = id)
            self.assertEqual(loc, [[site]])


        # Check to make sure we have running jobs
        nRunning = getCondorRunningJobs()
        self.assertEqual(nRunning, nFiles * nSubs)


        #################################################################
        # Now the JobTracker


        config.Agent.componentName = 'JobTracker'
        testJobTracker = JobTrackerPoller(config = config)
        testJobTracker.setup()

        testJobTracker.algorithm()

        # Running the algo without removing the jobs should do nothing
        result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
        self.assertEqual(len(result), nSubs * nFiles)


        condorRM()
        time.sleep(1)

        # All jobs gone?
        nRunning = getCondorRunningJobs()
        self.assertEqual(nRunning, 0)


        testJobTracker.algorithm()
        time.sleep(5)

        # Running the algo without removing the jobs should do nothing
        result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
        self.assertEqual(len(result), 0)
        result = getJobsAction.execute(state = 'Complete', jobType = "Processing")
        self.assertEqual(len(result), nSubs * nFiles)




        #################################################################
        # Now the JobAccountant

        # First you need to load all jobs


        self.getFWJRAction = self.daoFactory(classname = "Jobs.GetFWJRByState")
        completeJobs       = self.getFWJRAction.execute(state = "complete")


        # Create reports for all jobs
        self.createReports(jobs = completeJobs, retryCount = 0)






        config.Agent.componentName = 'JobAccountant'
        testJobAccountant = JobAccountantPoller(config = config)
        testJobAccountant.setup()


        # It should do something with the jobs
        testJobAccountant.algorithm()


        # All the jobs should be done now
        result = getJobsAction.execute(state = 'Complete', jobType = "Processing")
        self.assertEqual(len(result), 0)
        result = getJobsAction.execute(state = 'Success', jobType = "Processing")
        self.assertEqual(len(result), nSubs * nFiles)



        #######################################################################
        # Now the JobArchiver


        config.Agent.componentName = 'JobArchiver'
        testJobArchiver = JobArchiverPoller(config = config)


        testJobArchiver.algorithm()

        # All the jobs should be cleaned up
        result = getJobsAction.execute(state = 'Success', jobType = "Processing")
        self.assertEqual(len(result), 0)
        result = getJobsAction.execute(state = 'Cleanout', jobType = "Processing")
        self.assertEqual(len(result), nSubs * nFiles)


        logDir = os.path.join(self.testDir, 'logs')

        for job in completeJobs:
            self.assertFalse(os.path.exists(job['fwjr_path']))
            jobFolder = 'JobCluster_%i' \
                    % (int(job['id']/config.JobArchiver.numberOfJobsToCluster))
            jobPath = os.path.join(logDir, jobFolder, 'Job_%i.tar' %(job['id']))
            self.assertTrue(os.path.isfile(jobPath))
            self.assertTrue(os.path.getsize(jobPath) > 0)




        ###########################################################################
        # Now the TaskAchiver


        config.Agent.componentName = 'TaskArchiver'
        testTaskArchiver = TaskArchiverPoller(config = config)


        testTaskArchiver.algorithm()


        result = getJobsAction.execute(state = 'Cleanout', jobType = "Processing")
        self.assertEqual(len(result), 0)


        for jdict in completeJobs:
            job = Job(id = jdict['id'])
            self.assertFalse(job.exists())





        if os.path.isdir('testDir'):
            shutil.rmtree('testDir')
        shutil.copytree('%s' %self.testDir, os.path.join(os.getcwd(), 'testDir'))




        return
示例#47
0
class DBSUploadTest(unittest.TestCase):
    """
    TestCase for DBSUpload module

    """
    def setUp(self):
        """
        _setUp_

        setUp function for unittest

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"],
                                useDefault = False)
        self.testDir = self.testInit.generateWorkDir(deleteOnDestruction = False)
        self.configFile = EmulatorSetup.setupWMAgentConfig()

        myThread = threading.currentThread()
        self.bufferFactory = DAOFactory(package = "WMComponent.DBSBuffer.Database",
                                        logger = myThread.logger,
                                        dbinterface = myThread.dbi)

        self.buffer3Factory = DAOFactory(package = "WMComponent.DBS3Buffer",
                                         logger = myThread.logger,
                                         dbinterface = myThread.dbi)

        locationAction = self.bufferFactory(classname = "DBSBufferFiles.AddLocation")
        locationAction.execute(siteName = "se1.cern.ch")
        locationAction.execute(siteName = "se1.fnal.gov")
        locationAction.execute(siteName = "malpaquet")
        self.dbsUrl = "https://*****:*****@attr("integration")
    def testBasicUpload(self):
        """
        _testBasicUpload_

        Verify that we can successfully upload to DBS3.  Also verify that the
        uploader correctly handles files parentage when uploading.
        """
        self.dbsApi = DbsApi(url = self.dbsUrl)
        config = self.getConfig()
        dbsUploader = DBSUploadPoller(config = config)

        # First test verifies that uploader will poll and then not do anything
        # as the database is empty.
        dbsUploader.algorithm()

        acqEra = "Summer%s" % (int(time.time()))
        parentFiles = self.createParentFiles(acqEra)

        # The algorithm needs to be run twice.  On the first iteration it will
        # create all the blocks and upload one.  On the second iteration it will
        # timeout and upload the second block.
        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        # Verify the files made it into DBS3.
        self.verifyData(parentFiles[0]["datasetPath"], parentFiles)

        # Inject some more parent files and some child files into DBSBuffer.
        # Run the uploader twice, only the parent files should be added to DBS3.
        (moreParentFiles, childFiles) = \
                          self.createFilesWithChildren(parentFiles, acqEra)
        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(parentFiles[0]["datasetPath"],
                        parentFiles + moreParentFiles)

        # Run the uploader another two times to upload the child files.  Verify
        # that the child files were uploaded.
        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(childFiles[0]["datasetPath"], childFiles)
        return

    @attr("integration")
    def testDualUpload(self):
        """
        _testDualUpload_

        Verify that the dual upload mode works correctly.
        """
        self.dbsApi = DbsApi(url = self.dbsUrl)
        config = self.getConfig(dbs3UploadOnly = True)
        dbsUploader = DBSUploadPoller(config = config)
        dbsUtil = DBSBufferUtil()

        # First test verifies that uploader will poll and then not do anything
        # as the database is empty.
        dbsUploader.algorithm()

        acqEra = "Summer%s" % (int(time.time()))
        parentFiles = self.createParentFiles(acqEra)
        (moreParentFiles, childFiles) = \
                          self.createFilesWithChildren(parentFiles, acqEra)

        allFiles = parentFiles + moreParentFiles
        allBlocks = []
        for i in range(4):
            blockName = parentFiles[0]["datasetPath"] + "#" + makeUUID()
            dbsBlock = DBSBlock(blockName, "malpaquet", 1)
            dbsBlock.status = "Open"                
            dbsUtil.createBlocks([dbsBlock])
            for file in allFiles[i * 5 : (i * 5) + 5]:
                dbsBlock.addFile(file)
                dbsUtil.setBlockFiles({"block": blockName, "filelfn": file["lfn"]})
                if i < 2:
                    dbsBlock.status = "InDBS"
                dbsUtil.updateBlocks([dbsBlock])
            dbsUtil.updateFileStatus([dbsBlock], "InDBS")
            allBlocks.append(dbsBlock)            

        blockName = childFiles[0]["datasetPath"] + "#" + makeUUID()
        dbsBlock = DBSBlock(blockName, "malpaquet", 1)
        dbsBlock.status = "InDBS"
        dbsUtil.createBlocks([dbsBlock])
        for file in childFiles:
            dbsBlock.addFile(file)
            dbsUtil.setBlockFiles({"block": blockName, "filelfn": file["lfn"]})

        dbsUtil.updateFileStatus([dbsBlock], "InDBS")

        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(parentFiles[0]["datasetPath"], parentFiles)

        # Change the status of the rest of the parent blocks so we can upload
        # them and the children.
        for dbsBlock in allBlocks:
            dbsBlock.status = "InDBS"            
            dbsUtil.updateBlocks([dbsBlock])

        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(parentFiles[0]["datasetPath"], parentFiles + moreParentFiles)

        # Run the uploader one more time to upload the children.
        dbsUploader.algorithm()
        time.sleep(5)        
    
        self.verifyData(childFiles[0]["datasetPath"], childFiles)
        return

    def testCloseSettingsPerWorkflow(self):
        """
        _testCloseSettingsPerWorkflow_

        Test the block closing mechanics in the DBS3 uploader,
        this uses a fake dbs api to avoid reliance on external services.
        """
        # Signal trapExit that we are a friend
        os.environ["DONT_TRAP_EXIT"] = "True"
        try:
            # Monkey patch the imports of DbsApi
            from WMComponent.DBS3Buffer import DBSUploadPoller as MockDBSUploadPoller
            MockDBSUploadPoller.DbsApi = MockDbsApi
    
            # Set the poller and the dbsUtil for verification
            myThread = threading.currentThread()
            (_, dbsFilePath) = mkstemp(dir = self.testDir)
            self.dbsUrl = dbsFilePath
            config = self.getConfig()
            dbsUploader = MockDBSUploadPoller.DBSUploadPoller(config = config)
            dbsUtil = DBSBufferUtil()
    
            # First test is event based limits and timeout with no new files.
            # Set the files and workflow
            acqEra = "TropicalSeason%s" % (int(time.time()))
            workflowName = 'TestWorkload%s' % (int(time.time()))
            taskPath = '/%s/TestProcessing' % workflowName
            self.injectWorkflow(workflowName, taskPath,
                                MaxWaitTime = 2, MaxFiles = 100,
                                MaxEvents = 150)
            self.createParentFiles(acqEra, nFiles = 20,
                                   workflowName = workflowName,
                                   taskPath = taskPath)
    
            # The algorithm needs to be run twice.  On the first iteration it will
            # create all the blocks and upload one with less than 150 events.
            # On the second iteration the second block is uploaded.
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 1)
            globalFiles = myThread.dbi.processData("SELECT id FROM dbsbuffer_file WHERE status = 'InDBS'")[0].fetchall()
            notUploadedFiles = myThread.dbi.processData("SELECT id FROM dbsbuffer_file WHERE status = 'NOTUPLOADED'")[0].fetchall()
            self.assertEqual(len(globalFiles), 14)
            self.assertEqual(len(notUploadedFiles), 6)
            # Check the fake DBS for data
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 2)
            for block in fakeDBSInfo:
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['file_count'], 7)
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)
            time.sleep(3)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 0)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 3)
            for block in fakeDBSInfo:
                if block['block']['file_count'] != 6:
                    self.assertEqual(block['block']['file_count'], 7)
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)

            # Now check the limit by size and timeout with new files
            acqEra = "TropicalSeason%s" % (int(time.time()))
            workflowName = 'TestWorkload%s' % (int(time.time()))
            taskPath = '/%s/TestProcessing' % workflowName
            self.injectWorkflow(workflowName, taskPath,
                                MaxWaitTime = 2, MaxFiles = 5,
                                MaxEvents = 200000000)
            self.createParentFiles(acqEra, nFiles = 16,
                                   workflowName = workflowName,
                                   taskPath = taskPath)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 1)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 6)
            for block in fakeDBSInfo:
                if acqEra in block['block']['block_name']:
                    self.assertEqual(block['block']['file_count'], 5)
                self.assertTrue('block_events' not in block['block'])
                self.assertTrue('close_settings' not in block)
                self.assertEqual(block['block']['open_for_writing'], 0)
    
            # Put more files, they will go into the same block and then it will be closed
            # after timeout
            time.sleep(3)
            self.createParentFiles(acqEra, nFiles = 3,
                                   workflowName = workflowName,
                                   taskPath = taskPath)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 0)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 7)
            for block in fakeDBSInfo:
                if acqEra in block['block']['block_name']:
                    if block['block']['file_count'] < 5:
                        self.assertEqual(block['block']['file_count'], 4)
                    else:
                        self.assertEqual(block['block']['file_count'], 5)
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)

            # Finally test size limits
            acqEra = "TropicalSeason%s" % (int(time.time()))
            workflowName = 'TestWorkload%s' % (int(time.time()))
            taskPath = '/%s/TestProcessing' % workflowName
            self.injectWorkflow(workflowName, taskPath,
                                MaxWaitTime = 1, MaxFiles = 500,
                                MaxEvents = 200000000, MaxSize = 2048)
            self.createParentFiles(acqEra, nFiles = 7,
                                   workflowName = workflowName,
                                   taskPath = taskPath)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            time.sleep(2)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
    
            self.assertEqual(len(openBlocks), 0)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 11)
            for block in fakeDBSInfo:
                if acqEra in block['block']['block_name']:
                    if block['block']['file_count'] != 1:
                        self.assertEqual(block['block']['block_size'], 2048)
                        self.assertEqual(block['block']['file_count'], 2)
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)
        except:
            self.fail("We failed at some point in the test")
        finally:
            # We don't trust anyone else with _exit
            del os.environ["DONT_TRAP_EXIT"]
        return
示例#48
0
文件: Base_t.py 项目: tsarangi/WMCore
class BaseTest(unittest.TestCase):
    """
    Some methods of this class are made static and are used from
    other test cases.

    """
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel=logging.DEBUG)
        self.testDir = self.testInit.generateWorkDir()
        self.config = getConfig(self.testDir)
        self.testComponentDaemonXml = "/tmp/TestComponent/Daemon.xml"

    def tearDown(self):
        self.testInit.delWorkDir()
        self.generator = None
        # if the directory and file "/tmp/TestComponent/Daemon.xml" after
        # ComponentsPoller test exist, then delete it
        d = os.path.dirname(self.testComponentDaemonXml)
        if os.path.exists(d):
            shutil.rmtree(d)

    def testSenderReceiverBasic(self):
        sender = Sender(self.config.Alert.address,
                        self.config.Alert.controlAddr, self.__class__.__name__)
        handler, receiver = utils.setUpReceiver(self.config.Alert.address,
                                                self.config.Alert.controlAddr)
        a = Alert(Component=inspect.stack()[0][3])
        sender(a)
        while len(handler.queue) == 0:
            time.sleep(0.5)
            print "%s waiting for alert to arrive" % inspect.stack()[0][3]
        receiver.shutdown()
        self.assertEqual(len(handler.queue), 1)
        self.assertEqual(handler.queue[0]["Component"], inspect.stack()[0][3])

    def testProcessDetailBasic(self):
        pid = os.getpid()
        name = inspect.stack()[0][3]  # test name
        pd = ProcessDetail(pid, name)
        self.assertEqual(pd.pid, pid)
        self.assertEqual(pd.name, name)
        self.assertEqual(pd.proc.pid, pid)
        numChildren = len(psutil.Process(pid).get_children())
        self.assertEqual(len(pd.children), numChildren)
        self.assertEqual(len(pd.allProcs), 1 + numChildren)
        d = pd.getDetails()
        self.assertEqual(d["pid"], pid)
        self.assertEqual(d["component"], name)
        self.assertEqual(d["numChildrenProcesses"], numChildren)
        pd.refresh()

    def testMeasurementsBasic(self):
        numMes = 10
        mes = Measurements(numMes)
        self.assertEqual(mes._numOfMeasurements, numMes)
        self.assertEqual(len(mes), 0)
        mes.append(20)
        self.assertEqual(len(mes), 1)
        self.assertEqual(mes[0], 20)
        mes.append(30)
        self.assertEqual(mes[1], 30)
        mes.clear()
        self.assertEqual(len(mes), 0)
        self.assertEqual(mes._numOfMeasurements, numMes)

    def testBasePollerBasic(self):
        config = getConfig("/tmp")
        # create some non-sence config section. just need a bunch of values defined
        config.AlertGenerator.section_("bogusPoller")
        config.AlertGenerator.bogusPoller.soft = 5  # [percent]
        config.AlertGenerator.bogusPoller.critical = 50  # [percent]
        config.AlertGenerator.bogusPoller.pollInterval = 2  # [second]
        config.AlertGenerator.bogusPoller.period = 10

        generator = utils.AlertGeneratorMock(config)
        poller = BasePoller(config.AlertGenerator.bogusPoller, generator)
        # define dummy check method
        poller.check = lambda: 1 + 1
        poller.start()
        # poller now runs
        time.sleep(config.AlertGenerator.bogusPoller.pollInterval * 2)
        poller.terminate()
        while poller.is_alive():
            time.sleep(0.2)
            print "%s waiting for test poller to terminate" % inspect.stack(
            )[0][3]

    def testBasePollerHandleFailedPolling(self):
        config = getConfig("/tmp")
        # create some non-sence config section. just need a bunch of values defined
        config.AlertGenerator.section_("bogusPoller")
        config.AlertGenerator.bogusPoller.soft = 5  # [percent]
        config.AlertGenerator.bogusPoller.critical = 50  # [percent]
        config.AlertGenerator.bogusPoller.pollInterval = 2  # [second]
        config.AlertGenerator.bogusPoller.period = 10

        generator = utils.AlertGeneratorMock(config)
        poller = BasePoller(config.AlertGenerator.bogusPoller, generator)
        ex = Exception("test exception")

        class Sender(object):
            def __call__(self, alert):
                self.alert = alert

        poller.sender = Sender()
        poller._handleFailedPolling(ex)
        self.assertEqual(poller.sender.alert["Source"], "BasePoller")

    def testPeriodPollerOnRealProcess(self):
        config = getConfig("/tmp")
        config.component_("AlertProcessor")
        config.AlertProcessor.section_("critical")
        config.AlertProcessor.section_("soft")
        config.AlertProcessor.critical.level = 5
        config.AlertProcessor.soft.level = 0
        config.component_("AlertGenerator")
        config.AlertGenerator.section_("bogusPoller")
        config.AlertGenerator.bogusPoller.soft = 5  # [percent]
        config.AlertGenerator.bogusPoller.critical = 50  # [percent]
        config.AlertGenerator.bogusPoller.pollInterval = 0.2  # [second]
        # period during which measurements are collected before evaluating for
        # possible alert triggering
        config.AlertGenerator.bogusPoller.period = 1

        generator = utils.AlertGeneratorMock(config)
        poller = PeriodPoller(config.AlertGenerator.bogusPoller, generator)
        poller.sender = utils.SenderMock()
        # get CPU usage percentage, it's like measuring CPU usage of a real
        # component, so use the appropriate poller's method for that
        # (PeriodPoller itself is higher-level class so it doesn't define
        # a method to provide sampling data)
        poller.sample = lambda processDetail: ComponentsCPUPoller.sample(
            processDetail)

        # get own pid
        pid = os.getpid()
        name = inspect.stack()[0][3]  # test name
        pd = ProcessDetail(pid, name)
        # need to repeat sampling required number of measurements
        numOfMeasurements = int(config.AlertGenerator.bogusPoller.period /
                                config.AlertGenerator.bogusPoller.pollInterval)
        mes = Measurements(numOfMeasurements)
        self.assertEqual(len(mes), 0)
        for i in range(mes._numOfMeasurements):
            poller.check(pd, mes)

        # since the whole measurement cycle was done, values should have been nulled
        self.assertEqual(len(mes), 0)

    def testPeriodPollerCalculationPredefinedInput(self):
        config = getConfig("/tmp")
        config.component_("AlertProcessor")
        config.AlertProcessor.section_("critical")
        config.AlertProcessor.section_("soft")
        config.AlertProcessor.critical.level = 5
        config.AlertProcessor.soft.level = 0
        config.component_("AlertGenerator")
        config.AlertGenerator.section_("bogusPoller")
        # put some threshold numbers, just need to check output calculation
        # from check() method
        config.AlertGenerator.bogusPoller.soft = 5  # [percent]
        config.AlertGenerator.bogusPoller.critical = 50  # [percent]
        config.AlertGenerator.bogusPoller.pollInterval = 0.2  # [second]
        config.AlertGenerator.bogusPoller.period = 1

        generator = utils.AlertGeneratorMock(config)
        poller = PeriodPoller(config.AlertGenerator.bogusPoller, generator)
        # since poller may trigger an alert, give it mock sender
        poller.sender = utils.SenderMock()
        # provide sample method with predefined input, float
        predefInput = 10.12
        poller.sample = lambda processDetail: predefInput

        processDetail = None
        numOfMeasurements = int(config.AlertGenerator.bogusPoller.period /
                                config.AlertGenerator.bogusPoller.pollInterval)
        mes = Measurements(numOfMeasurements)
        for i in range(mes._numOfMeasurements):
            poller.check(processDetail, mes)

        # the above loop should went 5 times, should reach evaluation of 5 x predefInput
        # values, the average should end up 10, which should trigger soft threshold
        self.assertEqual(len(poller.sender.queue), 1)
        a = poller.sender.queue[0]

        self.assertEqual(a["Component"], generator.__class__.__name__)
        self.assertEqual(a["Source"], poller.__class__.__name__)
        d = a["Details"]
        self.assertEqual(d["threshold"],
                         "%s%%" % config.AlertGenerator.bogusPoller.soft)
        self.assertEqual(d["numMeasurements"], mes._numOfMeasurements)
        self.assertEqual(d["period"], config.AlertGenerator.bogusPoller.period)
        self.assertEqual(d["average"], "%s%%" % predefInput)
        # since the whole measurement cycle was done, values should have been nulled
        self.assertEqual(len(mes), 0)
示例#49
0
class RuntimeTest(unittest.TestCase):
    """
    _RuntimeTest_

    A unittest to test the WMRuntime/WMSpec/Storage/etc tree
    """


    # This is an integration test
    __integration__ = "Any old bollocks"


    def setUp(self):
        """
        Basic setUp

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()


        self.testDir = self.testInit.generateWorkDir()

        # Random variables
        self.workloadDir = None
        self.unpackDir   = None
        self.initialDir  = os.getcwd()
        self.origPath    = sys.path


        # Create some dirs
        os.makedirs(os.path.join(self.testDir, 'packages'))

        return


    def tearDown(self):
        """
        _tearDown_
        
        Remove any references you put directly into the modules
        """

        self.testInit.delWorkDir()

        # Clean up imports
        if 'WMSandbox' in sys.modules.keys():
                del sys.modules['WMSandbox']
        if 'WMSandbox.JobIndex' in sys.modules.keys():
                del sys.modules['WMSandbox.JobIndex']
        

        return


    def createTestWorkload(self, workloadName = 'Test', emulator = True):
        """
        _createTestWorkload_

        Creates a test workload for us to run on, hold the basic necessities.
        """

        workloadDir = os.path.join(self.testDir, workloadName)

        #arguments = getTestArguments()

        #workload = rerecoWorkload("Tier1ReReco", arguments)
        #rereco = workload.getTask("ReReco")

        workload = testWorkload(emulation = emulator)
        rereco = workload.getTask("ReReco")

        # Set environment and site-local-config
        siteConfigPath = os.path.join(workloadDir, 'SITECONF/local/JobConfig/')
        if not os.path.exists(siteConfigPath):
            os.makedirs(siteConfigPath)
        shutil.copy('site-local-config.xml', siteConfigPath)
        environment = rereco.data.section_('environment')
        environment.CMS_PATH = workloadDir

        taskMaker = TaskMaker(workload, workloadDir)
        taskMaker.skipSubscription = True
        taskMaker.processWorkload()

        workload.save(workloadName)

        return workload



    def unpackComponents(self, workload):
        """
        Run the unpacker to build the directories
        IMPORTANT NOTE:
          This is not how we do things on the worker node
          On the worker node we do not run multiple tasks
          So here we create multiple tasks in different directories
          To mimic running on multiple systems

        """

        listOfTasks = getListOfTasks(workload = workload)

        self.unpackDir = os.path.join(self.testDir, 'unpack')

        if not os.path.exists(self.unpackDir):
            os.makedirs(self.unpackDir)

        os.chdir(self.unpackDir)

        sandbox  = workload.data.sandbox

        for task in listOfTasks:
            # We have to create a directory, unpack in it, and then get out
            taskName = task.name()
            taskDir = os.path.join(self.unpackDir, taskName)
            if not os.path.exists(taskDir):
                # Well then we have to make it
                os.makedirs(taskDir)
            os.chdir(taskDir)
            # Now that we're here, run the unpacker

            package  = os.path.join(self.testDir, 'packages', '%sJobPackage.pkl' % (taskName))
            jobIndex = 1

            RunUnpacker(sandbox = sandbox, package = package,
                        jobIndex = jobIndex, jobname = taskName)

            # And go back to where we started
            os.chdir(self.unpackDir)


        os.chdir(self.initialDir)

        return


    def createWMBSComponents(self, workload):
        """
        Create the WMBS Components for this job

        """

        listOfTasks = []
        listOfSubs  = []

        rerecoTask  = None
        
        for primeTask in workload.taskIterator():
            # There should only be one prime task, and it should be the rerecoTask
            rerecoTask = primeTask
            for task in primeTask.taskIterator():
                listOfTasks.append(task)

        for task in listOfTasks:
            fileset = self.getFileset()
            sub = self.createSubscriptions(task = task,
                                           fileset = fileset)
            #listOfSubs.append(sub)

        

        return



    def createSubscriptions(self, task, fileset):
        """
        Create a subscription based on a task


        """
        type = task.taskType()
        work = task.makeWorkflow()
        
        sub = Subscription(fileset = fileset,
                           workflow = work,
                           split_algo = "FileBased",
                           type = type)

        package = self.createWMBSJobs(subscription = sub,
                                      task = task)        

        packName = os.path.join(self.testDir, 'packages',
                                '%sJobPackage.pkl' %(task.name()))
        package.save(packName)

        return sub




    def createWMBSJobs(self, subscription, task):
        """
        Create the jobs for WMBS Components
        Send a subscription/task, get back a package.

        """

        splitter = SplitterFactory()
        jobfactory = splitter(subscription = subscription,
                              package = "WMCore.DataStructs",
                              generators = makeGenerators(task))
        params = task.jobSplittingParameters()
        jobGroups = jobfactory(**params)

        jobID = 1
        package = JobPackage()
        for group in jobGroups:
            for job in group.jobs:
                job['id'] = jobID
                jobID += 1
                package[job['id']] = job

        return package


    

    def getFileset(self):
        """
        Get a fileset based on the task

        """

        fileset = Fileset(name = 'Merge%s' %(type))


        for i in range(0, random.randint(15,25)):
            # Use the testDir to generate a random lfn
            inpFile = File(lfn = "%s/%s.root" %(self.testDir, makeUUID()),
                           size = random.randint(200000, 1000000),
                           events = random.randint(1000,2000) )
            inpFile.setLocation('Megiddo')
            fileset.addFile(inpFile)


        return fileset



    def runJobs(self, workload):
        """
        This might actually run the job.  Who knows?


        """
        listOfTasks = []

        for primeTask in workload.taskIterator():
            listOfTasks.append(primeTask)
            # Only run primeTasks for now


        for task in listOfTasks:
            jobName = task.name()
            taskDir = os.path.join(self.unpackDir, jobName, 'job')
            os.chdir(taskDir)
            sys.path.append(taskDir)

            # Scream, run around in panic, blow up machine
            print "About to run jobs"
            print taskDir
            miniStartup(dir = taskDir)
            

            # When exiting, go back to where you started
            os.chdir(self.initialDir)
            sys.path.remove(taskDir)
            
        return





    def testA_CreateWorkload(self):
        """
        _CreateWorkload_
        
        Create a workload
        Unpack the workload
        Check for consistency
        """

        workloadName = 'basicWorkload'
        workload     = self.createTestWorkload(workloadName = workloadName)

        self.createWMBSComponents(workload = workload)

        taskNames = []
        for task in getListOfTasks(workload = workload):
            taskNames.append(task.name())

        workloadPath  = os.path.join(self.testDir, workloadName, "TestWorkload")
        siteConfigDir = os.path.join(self.testDir, workloadName, 'SITECONF/local/JobConfig/')


        # Pre-run checks

        # Does it have the right directories?
        dirList = os.listdir(workloadPath)
        self.assertEqual(dirList, ['WMSandbox', 'TestWorkload-Sandbox.tar.bz2'])
        dirList = os.listdir(os.path.join(workloadPath, 'WMSandbox'))
        for taskName in taskNames:
            self.assertTrue(taskName in dirList)

        # Do we have job packages
        for task in taskNames:
            self.assertTrue('%sJobPackage.pkl' % (task) in os.listdir(os.path.join(self.testDir, 'packages')))


        # Does it have the SITECONF?
        self.assertTrue('site-local-config.xml' in os.listdir(siteConfigDir))



        # Now actually see if you can unpack it.
        self.unpackComponents(workload = workload)


        # Check for proper unpacking
        # Check the the task has the right directories,
        # and that the PSetTweaks and WMSandbox directories
        # have the right contents
        taskContents = ['WMSandbox', 'WMCore', 'PSetTweaks']
        PSetContents = ['PSetTweak.pyc', 'CVS', 'PSetTweak.py',
                        '__init__.pyc', 'WMTweak.py', '__init__.py']
        taskSandbox  = ['JobPackage.pcl', 'JobIndex.py', '__init__.py', 'WMWorkload.pkl']
        taskSandbox.extend(taskNames)  # Should have a directory for each task

        for task in taskNames:
            self.assertTrue(task in os.listdir(os.path.join(self.testDir, 'unpack')))
            taskDir = os.path.join(self.testDir, 'unpack', task, 'job')
            self.assertTrue(os.path.isdir(taskDir))
            self.assertEqual(os.listdir(taskDir).sort(), taskContents.sort())
            self.assertEqual(os.listdir(os.path.join(taskDir, 'WMSandbox')).sort(),
                             taskSandbox.sort())
            self.assertEqual(os.listdir(os.path.join(taskDir, 'PSetTweaks')).sort(),
                             PSetContents.sort())


        # And we're done.
        # Assume if we got this far everything is good
        

        # At the end, copy the directory
        #if os.path.exists('tmpDir'):
        #    shutil.rmtree('tmpDir')
        #shutil.copytree(self.testDir, 'tmpDir')

        return


    def testB_EmulatorTest(self):
        """
        _EmulatorTest_
        
        This is where things get scary.  We need to not only unpack the job,
        but also ascertain whether it can run locally in emulator mode.

        This requires...uh...emulator emulation.
        """


        # Assume all this works, because we tested it in testA
        workloadName = 'basicWorkload'
        workload     = self.createTestWorkload(workloadName = workloadName)

        self.createWMBSComponents(workload = workload)

        self.unpackComponents(workload = workload)


        self.runJobs(workload = workload)

        # Check the report
        taskDir = os.path.join(self.testDir, 'unpack/ReReco/job/WMTaskSpace')
        report = Report()
        report.load(os.path.join(taskDir, 'Report.0.pkl'))
        cmsReport = report.data.cmsRun1



        # Now validate the report
        self.assertEqual(report.data.ceName, socket.gethostname())
        self.assertEqual(report.data.seName, 'cmssrm.fnal.gov')
        self.assertEqual(report.data.siteName, 'T1_US_FNAL')
        self.assertEqual(report.data.hostName, socket.gethostname())
        self.assertTrue(report.data.completed)

        # Should have status 0 (emulator job)
        self.assertEqual(cmsReport.status, 0)

        # Should have one output module
        self.assertEqual(cmsReport.outputModules, ['TestOutputModule'])

        # It should have one file for input and output
        self.assertEqual(cmsReport.input.PoolSource.files.fileCount, 1)
        self.assertEqual(cmsReport.output.TestOutputModule.files.fileCount, 1)

        # So, um, I guess we're done


        # At the end, copy the directory
        #if os.path.exists('tmpDir'):
        #    shutil.rmtree('tmpDir')
        #shutil.copytree(self.testDir, 'tmpDir')

        return
class SiblingProcessingBasedTest(unittest.TestCase):
    """
    _SiblingProcessingBasedTest_

    Test SiblingProcessing job splitting.
    """
    def setUp(self):
        """
        _setUp_

        Setup the database connections and schema.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS"],
                                useDefault = False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package = "WMCore.WMBS",
                                logger = myThread.logger,
                                dbinterface = myThread.dbi)

        locationAction = daofactory(classname = "Locations.New")
        locationAction.execute("T2_CH_CERN", pnn = "T2_CH_CERN")
        locationAction.execute("T1_US_FNAL", pnn = "T1_US_FNAL_Disk")

        self.testFilesetA = Fileset(name = "FilesetA")
        self.testFilesetA.create()
        self.testFilesetB = Fileset(name = "FilesetB")
        self.testFilesetB.create()

        self.testFileA = File("testFileA", size = 1000, events = 100,
                              locations = set(["T2_CH_CERN"]))
        self.testFileA.create()
        self.testFileB = File("testFileB", size = 1000, events = 100,
                              locations = set(["T2_CH_CERN"]))
        self.testFileB.create()
        self.testFileC = File("testFileC", size = 1000, events = 100,
                              locations = set(["T2_CH_CERN"]))
        self.testFileC.create()

        self.testFilesetA.addFile(self.testFileA)
        self.testFilesetA.addFile(self.testFileB)
        self.testFilesetA.addFile(self.testFileC)
        self.testFilesetA.commit()

        self.testFileD = File("testFileD", size = 1000, events = 100,
                              locations = set(["T2_CH_CERN"]))
        self.testFileD.create()
        self.testFileE = File("testFileE", size = 1000, events = 100,
                              locations = set(["T2_CH_CERN"]))
        self.testFileE.create()
        self.testFileF = File("testFileF", size = 1000, events = 100,
                              locations = set(["T2_CH_CERN"]))
        self.testFileF.create()

        self.testFilesetB.addFile(self.testFileD)
        self.testFilesetB.addFile(self.testFileE)
        self.testFilesetB.addFile(self.testFileF)
        self.testFilesetB.commit()

        testWorkflowA = Workflow(spec = "specA.xml", owner = "Steve",
                                 name = "wfA", task = "Test")
        testWorkflowA.create()
        testWorkflowB = Workflow(spec = "specB.xml", owner = "Steve",
                                 name = "wfB", task = "Test")
        testWorkflowB.create()
        testWorkflowC = Workflow(spec = "specC.xml", owner = "Steve",
                                 name = "wfC", task = "Test")
        testWorkflowC.create()
        testWorkflowD = Workflow(spec = "specD.xml", owner = "Steve",
                                 name = "wfD", task = "Test")
        testWorkflowD.create()

        self.testSubscriptionA = Subscription(fileset = self.testFilesetA,
                                              workflow = testWorkflowA,
                                              split_algo = "FileBased",
                                              type = "Processing")
        self.testSubscriptionA.create()
        self.testSubscriptionB = Subscription(fileset = self.testFilesetB,
                                              workflow = testWorkflowB,
                                              split_algo = "FileBased",
                                              type = "Processing")
        self.testSubscriptionB.create()
        self.testSubscriptionC = Subscription(fileset = self.testFilesetB,
                                              workflow = testWorkflowC,
                                              split_algo = "FileBased",
                                              type = "Processing")
        self.testSubscriptionC.create()
        self.testSubscriptionD = Subscription(fileset = self.testFilesetB,
                                              workflow = testWorkflowD,
                                              split_algo = "FileBased",
                                              type = "Processing")
        self.testSubscriptionD.create()

        deleteWorkflow = Workflow(spec = "specE.xml", owner = "Steve",
                                  name = "wfE", task = "Test")
        deleteWorkflow.create()

        self.deleteSubscriptionA = Subscription(fileset = self.testFilesetA,
                                                workflow = deleteWorkflow,
                                                split_algo = "SiblingProcessingBased",
                                                type = "Cleanup")
        self.deleteSubscriptionA.create()
        self.deleteSubscriptionB = Subscription(fileset = self.testFilesetB,
                                                workflow = deleteWorkflow,
                                                split_algo = "SiblingProcessingBased",
                                                type = "Cleanup")
        self.deleteSubscriptionB.create()
        return

    def tearDown(self):
        """
        _tearDown_

        Clear out WMBS.
        """
        self.testInit.clearDatabase()
        return

    def testSiblingProcessing(self):
        """
        _testSiblingProcessing_

        Verify that the sibling processing split works correctly dealing with
        failed files and acquiring files correctly.
        """
        splitter = SplitterFactory()
        deleteFactoryA = splitter(package = "WMCore.WMBS",
                                  subscription = self.deleteSubscriptionA)
        deleteFactoryB = splitter(package = "WMCore.WMBS",
                                  subscription = self.deleteSubscriptionB)

        result = deleteFactoryA()

        assert len(result) == 0, \
               "Error: No jobs should be returned."

        result = deleteFactoryB()

        assert len(result) == 0, \
               "Error: No jobs should be returned."

        self.testSubscriptionA.completeFiles(self.testFileA)

        result = deleteFactoryA(files_per_job = 1)

        assert len(result) == 1, \
               "Error: Only one jobgroup should be returned."
        assert len(result[0].jobs) == 1, \
               "Error: There should only be one job in the jobgroup."
        assert result[0].jobs[0]["possiblePSN"] == set(["T2_CH_CERN"]), \
               "Error: possiblePSN is wrong."
        assert len(result[0].jobs[0]["input_files"]) == 1, \
               "Error: Job should only have one input file."
        assert result[0].jobs[0]["input_files"][0]["lfn"] == "testFileA", \
               "Error: Input file for job is wrong."

        result = deleteFactoryB(files_per_job = 1)

        assert len(result) == 0, \
               "Error: Second subscription should have no jobs."

        result = deleteFactoryA(files_per_job = 1)

        assert len(result) == 0, \
               "Error: No jobs should have been created."

        self.testSubscriptionB.completeFiles(self.testFileD)
        self.testSubscriptionC.failFiles(self.testFileD)

        result = deleteFactoryA(files_per_job = 1)

        assert len(result) == 0, \
               "Error: No jobs should have been created."

        result = deleteFactoryB(files_per_job = 1)

        assert len(result) == 0, \
               "Error: No jobs should have been created."

        self.testSubscriptionD.failFiles(self.testFileD)

        result = deleteFactoryA(files_per_job = 1)

        assert len(result) == 0, \
               "Error: No jobs should have been created."

        result = deleteFactoryB(files_per_job = 1)

        assert len(result) == 0, \
               "Error: No job groups should have been created."

        self.testSubscriptionB.completeFiles([self.testFileE, self.testFileF])
        self.testSubscriptionC.completeFiles([self.testFileE, self.testFileF])
        self.testSubscriptionD.completeFiles([self.testFileE, self.testFileF])

        result = deleteFactoryB(files_per_job = 10)

        assert len(result) == 0, \
               "Error: No jobs should have been created."

        self.testFilesetB.markOpen(False)

        result = deleteFactoryB(files_per_job = 10)

        assert len(result) == 1, \
               "Error: One jobgroup should have been returned."
        assert len(result[0].jobs) == 1, \
               "Error: There should only be one job in the jobgroup."
        assert len(result[0].jobs[0]["input_files"]) == 2, \
               "Error: Job should only have one input file."

        lfns = [result[0].jobs[0]["input_files"][0]["lfn"], result[0].jobs[0]["input_files"][1]["lfn"]]

        assert "testFileE" in lfns, \
               "Error: TestFileE missing from job input."
        assert "testFileF" in lfns, \
               "Error: TestFileF missing from job input."

        self.assertEqual(len(self.deleteSubscriptionB.availableFiles()), 0,
                         "Error: There should be no available files.")

        completeFiles = self.deleteSubscriptionB.filesOfStatus("Completed")
        self.assertEqual(len(completeFiles), 1,
                         "Error: There should only be one complete file.")
        self.assertEqual(list(completeFiles)[0]["lfn"], "testFileD",
                         "Error: Test file D should be complete.")

        return

    def testMultipleLocations(self):
        """
        _testMultipleLocations_

        Verify that the sibling processing based algorithm doesn't create jobs
        that run over files at multiple sites.
        """
        testFile1 = File("testFile1", size = 1000, events = 100,
                         locations = set(["T1_US_FNAL_Disk"]))
        testFile1.create()
        testFile2 = File("testFile2", size = 1000, events = 100,
                         locations = set(["T1_US_FNAL_Disk"]))
        testFile2.create()
        testFile3 = File("testFile3", size = 1000, events = 100,
                         locations = set(["T1_US_FNAL_Disk"]))
        testFile3.create()

        self.testFilesetA.addFile(testFile1)
        self.testFilesetA.addFile(testFile2)
        self.testFilesetA.addFile(testFile3)
        self.testFilesetA.commit()
        self.testFilesetA.markOpen(False)

        self.testSubscriptionA.completeFiles([testFile1, testFile2, testFile3])
        self.testSubscriptionA.completeFiles([self.testFileA, self.testFileB, self.testFileC])

        splitter = SplitterFactory()
        deleteFactoryA = splitter(package = "WMCore.WMBS",
                                  subscription = self.deleteSubscriptionA)

        result = deleteFactoryA(files_per_job = 50)

        assert len(result) == 2, \
               "Error: Wrong number of jobgroups returned."

        goldenFilesA = ["testFileA", "testFileB", "testFileC"]
        goldenFilesB = ["testFile1", "testFile2", "testFile3"]

        for jobGroup in result:
            assert len(jobGroup.jobs) == 1, \
                   "Error: Wrong number of jobs in jobgroup."
            assert len(jobGroup.jobs[0]["input_files"]) == 3, \
                   "Error: Wrong number of input files in job."

            jobSite = jobGroup.jobs[0]["possiblePSN"]

            assert (jobSite == set(["T2_CH_CERN"])
                    or jobSite == set(["T1_US_FNAL"])), \
                    "Error: Wrong site for job."

            if jobSite == set(["T2_CH_CERN"]):
                goldenFiles = goldenFilesA
            else:
                goldenFiles = goldenFilesB

            for jobFile in jobGroup.jobs[0]["input_files"]:
                goldenFiles.remove(jobFile["lfn"])

            assert len(goldenFiles) == 0,  \
                   "Error: Files are missing."

        return

    def testLargeNumberOfFiles(self):
        """
        _testLargeNumberOfFiles_

        Setup a subscription with 500 files and verify that the splitting algo
        works correctly.
        """
        testWorkflowA = Workflow(spec = "specA.xml", owner = "Steve",
                                 name = "wfA", task = "Test")
        testWorkflowA.create()
        testWorkflowB = Workflow(spec = "specB.xml", owner = "Steve",
                                 name = "wfB", task = "Test")
        testWorkflowB.create()

        testFileset = Fileset(name = "TestFileset")
        testFileset.create()

        allFiles = []
        for i in range(500):
            testFile = File(str(i), size = 1000, events = 100,
                            locations = set(["T2_CH_CERN"]))
            testFile.create()
            allFiles.append(testFile)
            testFileset.addFile(testFile)
        testFileset.commit()

        testSubscriptionA = Subscription(fileset = testFileset,
                                         workflow = testWorkflowA,
                                         split_algo = "FileBased",
                                         type = "Processing")
        testSubscriptionA.create()
        testSubscriptionB = Subscription(fileset = testFileset,
                                         workflow = testWorkflowB,
                                         split_algo = "SiblingProcessingBased",
                                         type = "Processing")
        testSubscriptionB.create()

        testSubscriptionA.completeFiles(allFiles)

        splitter = SplitterFactory()
        deleteFactoryA = splitter(package = "WMCore.WMBS",
                                  subscription = testSubscriptionB)

        result = deleteFactoryA(files_per_job = 50)
        self.assertEqual(len(result), 1,
                         "Error: Wrong number of job groups returned.")
        self.assertEqual(len(result[0].jobs), 10,
                         "Error: Wrong number of jobs returned.")

        return

    def testFilesWithoutOtherSubscriptions(self):
        """
        _testFilesWithoutOtherSubscriptions_

        Test the case where files only in the delete subscription
        can happen if cleanup of the other subscriptions is fast

        """
        testWorkflowA = Workflow(spec = "specA.xml", owner = "Steve",
                                 name = "wfA", task = "Test")
        testWorkflowA.create()

        testFileset = Fileset(name = "TestFileset")
        testFileset.create()

        allFiles = []
        for i in range(500):
            testFile = File(str(i), size = 1000, events = 100,
                            locations = set(["T2_CH_CERN"]))
            testFile.create()
            allFiles.append(testFile)
            testFileset.addFile(testFile)
        testFileset.commit()

        testSubscriptionA = Subscription(fileset = testFileset,
                                         workflow = testWorkflowA,
                                         split_algo = "SiblingProcessingBased",
                                         type = "Processing")
        testSubscriptionA.create()

        splitter = SplitterFactory()
        deleteFactoryA = splitter(package = "WMCore.WMBS",
                                  subscription = testSubscriptionA)

        result = deleteFactoryA(files_per_job = 50)
        self.assertEqual(len(result), 1,
                         "Error: Wrong number of job groups returned.")
        self.assertEqual(len(result[0].jobs), 10,
                         "Error: Wrong number of jobs returned.")

        return
示例#51
0
class ConditionTest(unittest.TestCase):
    """
    _ExpressTest_

    Test for Express job splitter
    """
    def setUp(self):
        """
        _setUp_

        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules=["T0.WMBS"])

        self.splitterFactory = SplitterFactory(package="T0.JobSplitting")

        myThread = threading.currentThread()
        daoFactory = DAOFactory(package="T0.WMBS",
                                logger=logging,
                                dbinterface=myThread.dbi)

        wmbsDaoFactory = DAOFactory(package="WMCore.WMBS",
                                    logger=logging,
                                    dbinterface=myThread.dbi)

        myThread.dbi.processData("""INSERT INTO wmbs_location
                                    (id, site_name, state)
                                    VALUES (1, 'SomeSite', 1)
                                    """,
                                 transaction=False)
        myThread.dbi.processData("""INSERT INTO wmbs_location_pnn
                                    (location, pnn)
                                    VALUES (1, 'SomePNN')
                                    """,
                                 transaction=False)

        insertRunDAO = daoFactory(classname="RunConfig.InsertRun")
        insertRunDAO.execute(binds={
            'RUN': 1,
            'TIME': int(time.time()),
            'HLTKEY': "someHLTKey"
        },
                             transaction=False)

        insertLumiDAO = daoFactory(classname="RunConfig.InsertLumiSection")
        insertLumiDAO.execute(binds={'RUN': 1, 'LUMI': 1}, transaction=False)

        insertStreamDAO = daoFactory(classname="RunConfig.InsertStream")
        insertStreamDAO.execute(binds={'STREAM': "Express"}, transaction=False)

        insertStreamFilesetDAO = daoFactory(
            classname="RunConfig.InsertStreamFileset")
        insertStreamFilesetDAO.execute(1, "Express", "TestFileset1")

        insertStreamerDAO = daoFactory(classname="RunConfig.InsertStreamer")
        insertStreamerDAO.execute(binds={
            'RUN': 1,
            'LUMI': 1,
            'STREAM': "Express",
            'TIME': int(time.time()),
            'LFN': "/streamer",
            'FILESIZE': 0,
            'EVENTS': 0
        },
                                  transaction=False)

        insertPromptCalibrationDAO = daoFactory(
            classname="RunConfig.InsertPromptCalibration")
        insertPromptCalibrationDAO.execute({
            'RUN': 1,
            'STREAM': "Express"
        },
                                           transaction=False)

        self.fileset1 = Fileset(name="TestFileset1")
        self.fileset1.create()

        workflow1 = Workflow(spec="spec.xml",
                             owner="hufnagel",
                             name="TestWorkflow1",
                             task="Test")
        workflow1.create()

        self.subscription1 = Subscription(fileset=self.fileset1,
                                          workflow=workflow1,
                                          split_algo="Condition",
                                          type="Condition")
        self.subscription1.create()

        # set parentage chain and sqlite fileset
        alcaRecoFile = File("/alcareco", size=0, events=0)
        alcaRecoFile.addRun(Run(1, *[1]))
        alcaRecoFile.setLocation("SomePNN", immediateSave=False)
        alcaRecoFile.create()
        alcaPromptFile = File("/alcaprompt", size=0, events=0)
        alcaPromptFile.addRun(Run(1, *[1]))
        alcaPromptFile.setLocation("SomePNN", immediateSave=False)
        alcaPromptFile.create()
        sqliteFile = File("/sqlite", size=0, events=0)
        sqliteFile.create()
        self.fileset1.addFile(sqliteFile)
        self.fileset1.commit()

        results = myThread.dbi.processData("""SELECT lfn FROM wmbs_file_details
                                              """,
                                           transaction=False)[0].fetchall()

        setParentageDAO = wmbsDaoFactory(classname="Files.SetParentage")
        setParentageDAO.execute(binds=[{
            'parent': "/streamer",
            'child': "/alcareco"
        }, {
            'parent': "/alcareco",
            'child': "/alcaprompt"
        }, {
            'parent': "/alcaprompt",
            'child': "/sqlite"
        }],
                                transaction=False)

        # default split parameters
        self.splitArgs = {}
        self.splitArgs['runNumber'] = 1
        self.splitArgs['streamName'] = "Express"

        return

    def tearDown(self):
        """
        _tearDown_

        """
        self.testInit.clearDatabase()

        return

    def isPromptCalibFinished(self):
        """
        _isPromptCalibFinished_

        """
        myThread = threading.currentThread()

        result = myThread.dbi.processData(
            """SELECT finished
                                             FROM prompt_calib
                                             """,
            transaction=False)[0].fetchall()[0][0]

        return result

    def countPromptCalibFiles(self):
        """
        _deleteSplitLumis_

        """
        myThread = threading.currentThread()

        result = myThread.dbi.processData(
            """SELECT COUNT(*)
                                             FROM prompt_calib_file
                                             """,
            transaction=False)[0].fetchall()[0][0]

        return result

    def test00(self):
        """
        _test00_

        Make sure the job splitter behaves correctly.

        Just make sure the job splitter does nothing
        when the fileset is open and populates t0ast
        data structures when it's closed. In the later
        case all input files should be marked as
        acquired without creating a job as well.

        """
        mySplitArgs = self.splitArgs.copy()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.assertEqual(self.isPromptCalibFinished(), 0,
                         "ERROR: prompt_calib should not be finished")

        self.assertEqual(self.countPromptCalibFiles(), 0,
                         "ERROR: there should be no prompt_calib_file")

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(self.isPromptCalibFinished(), 0,
                         "ERROR: prompt_calib should not be finished")

        self.assertEqual(self.countPromptCalibFiles(), 1,
                         "ERROR: there should be one prompt_calib_file")

        self.fileset1.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        self.assertEqual(self.isPromptCalibFinished(), 1,
                         "ERROR: prompt_calib should be finished")

        self.assertEqual(self.countPromptCalibFiles(), 1,
                         "ERROR: there should be one prompt_calib_file")

        return
示例#52
0
class DashboardInterfaceTest(unittest.TestCase):
    """
    Test for the dashboard interface and its monitoring interaction

    Well, once I've written them it will be
    """


    def setUp(self):
        """
        Basically, do nothing

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()


        self.testDir = self.testInit.generateWorkDir()

        return


    def tearDown(self):
        """
        Clean up the test directory

        """

        self.testInit.delWorkDir()

        return


    def createWorkload(self):
        """
        Create a workload in order to test things

        """
        generator = WMSpecGenerator()
        workload = generator.createReRecoSpec("Tier1ReReco")
        return workload

    def createTestJob(self):
        """
        Create a test job to pass to the DashboardInterface

        """

        job = Job(name = "ThisIsASillyName")

        testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10)
        testFileA.addRun(Run(1, *[45]))
        testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10)
        testFileB.addRun(Run(1, *[46]))

        job.addFile(testFileA)
        job.addFile(testFileB)

        job['id'] = 1

        return job


    def createReport(self, outcome = 0):
        """
        Create a test report

        """

        jobReport = Report()
        jobReport.addStep('cmsRun1')
        jobReport.setStepStartTime(stepName = 'cmsRun1')
        jobReport.setStepStopTime(stepName = 'cmsRun1')
        if outcome:
            jobReport.addError('cmsRun1', 200, 'FakeError', 'FakeError')

        return jobReport


    def setupJobEnvironment(self, name = 'test'):
        """
        _setupJobEnvironment_

        Make some sort of environment in which to run tests
        """

        os.environ['WMAGENT_SITE_CONFIG_OVERRIDE'] = os.path.join(getTestBase(),
                                            "WMCore_t/Storage_t",
                                            "T1_US_FNAL_SiteLocalConfig.xml")
        return

    def testASuccessfulJobMonitoring(self):
        """
        _testASuccessfulJobMonitoring_

        Check that the data packets make sense when a job completes successfully
        """

        # Get the necessary objects
        name     = 'testA'
        job      = self.createTestJob()
        workload = self.createWorkload()
        task     = workload.getTask(taskName = "DataProcessing")
        report   = self.createReport()

        # Fill the job environment
        self.setupJobEnvironment(name = name)

        # Instantiate DBInfo
        dbInfo   = DashboardInfo(job = job, task = task)
        dbInfo.addDestination('127.0.0.1', 8884)

        # Check jobStart information
        data = dbInfo.jobStart()
        self.assertEqual(data['MessageType'], 'JobStatus')
        self.assertEqual(data['StatusValue'], 'running')
        self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
        self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')

        # Do the first step
        step = task.getStep(stepName = "cmsRun1")

        # Do the step start
        data = dbInfo.stepStart(step = step.data)
        self.assertNotEqual(data['jobStart'], None)
        self.assertEqual(data['jobStart']['ExeStart'], step.name())
        self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
        self.assertEqual(data['1_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step = step.data, stepReport = report)
        self.assertEqual(data['1_ExeEnd'], step.name())
        self.assertEqual(data['1_ExeExitCode'], 0)
        self.assertTrue(data['1_ExeWCTime'] >= 0)
        self.assertEqual(report.retrieveStep("cmsRun1").counter, 1)

        #Do a second step
        step = task.getStep(stepName = "cmsRun1")

        #Do the step start (It's not the first step)
        data = dbInfo.stepStart(step = step.data)
        self.assertEqual(data['jobStart'], None)
        self.assertEqual(data['2_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step = step.data, stepReport = report)
        self.assertEqual(data['2_ExeEnd'], step.name())
        self.assertEqual(data['2_ExeExitCode'], 0)
        self.assertTrue(data['2_ExeWCTime'] >= 0)
        self.assertEqual(report.retrieveStep("cmsRun1").counter, 2)

        # End the job!
        data = dbInfo.jobEnd()
        self.assertEqual(data['ExeEnd'], "cmsRun1")
        self.assertEqual(data['JobExitCode'], 0)
        self.assertEqual(data['WrapperCPUTime'], 0)
        self.assertTrue(data['WrapperWCTime'] >= 0)
        self.assertNotEqual(data['JobExitReason'], "")

        return


    def testAFailedJobMonitoring(self):
        """
        _TestAFailedJobMonitoring_

        Simulate a job that completes but fails, check that the data sent is
        correct
        """

        # Get the necessary objects
        name     = 'testB'
        job      = self.createTestJob()
        workload = self.createWorkload()
        task     = workload.getTask(taskName = "DataProcessing")
        report   = self.createReport(outcome = 1)

        # Fill the job environment
        self.setupJobEnvironment(name = name)

        # Instantiate DBInfo
        dbInfo   = DashboardInfo(job = job, task = task)
        dbInfo.addDestination('127.0.0.1', 8884)

        # Check jobStart information
        data = dbInfo.jobStart()
        self.assertEqual(data['MessageType'], 'JobStatus')
        self.assertEqual(data['StatusValue'], 'running')
        self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
        self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')

        # Do the first step
        step = task.getStep(stepName = "cmsRun1")

        # Do the step start
        data = dbInfo.stepStart(step = step.data)
        self.assertNotEqual(data['jobStart'], None)
        self.assertEqual(data['jobStart']['ExeStart'], step.name())
        self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
        self.assertEqual(data['1_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step = step.data, stepReport = report)
        self.assertEqual(data['1_ExeEnd'], step.name())
        self.assertNotEqual(data['1_ExeExitCode'], 0)
        self.assertTrue(data['1_ExeWCTime'] >= 0)
        self.assertEqual(report.retrieveStep("cmsRun1").counter, 1)

        # End the job!
        data = dbInfo.jobEnd()
        self.assertEqual(data['ExeEnd'], "cmsRun1")
        self.assertNotEqual(data['JobExitCode'], 0)
        self.assertEqual(data['WrapperCPUTime'], 0)
        self.assertTrue(data['WrapperWCTime'] >= 0)
        self.assertNotEqual(data['JobExitReason'].find('cmsRun1'), -1)

        return

    def testAKilledJobMonitoring(self):
        """
        _TestAKilledJobMonitoring_

        Simulate a job that is killed check that the data sent is
        correct
        """

        # Get the necessary objects
        name     = 'testC'
        job      = self.createTestJob()
        workload = self.createWorkload()
        task     = workload.getTask(taskName = "DataProcessing")
        report   = self.createReport(outcome = 1)

        # Fill the job environment
        self.setupJobEnvironment(name = name)

        # Instantiate DBInfo
        dbInfo   = DashboardInfo(job = job, task = task)
        dbInfo.addDestination('127.0.0.1', 8884)

        # Check jobStart information
        data = dbInfo.jobStart()
        self.assertEqual(data['MessageType'], 'JobStatus')
        self.assertEqual(data['StatusValue'], 'running')
        self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
        self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')

        # Do the first step
        step = task.getStep(stepName = "cmsRun1")

        # Do the step start
        data = dbInfo.stepStart(step = step.data)
        self.assertNotEqual(data['jobStart'], None)
        self.assertEqual(data['jobStart']['ExeStart'], step.name())
        self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
        self.assertEqual(data['1_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step = step.data, stepReport = report)
        self.assertEqual(data['1_ExeEnd'], step.name())
        self.assertNotEqual(data['1_ExeExitCode'], 0)
        self.assertTrue(data['1_ExeWCTime'] >= 0)

        # Kill the job!
        data = dbInfo.jobKilled()
        self.assertEqual(data['ExeEnd'], "cmsRun1")
        self.assertNotEqual(data['JobExitCode'], 0)
        self.assertEqual(data['WrapperCPUTime'], 0)
        self.assertTrue(data['WrapperWCTime'] >= 0)
        self.assertNotEqual(data['JobExitReason'].find('killed'), -1)

        return

    @attr('integration')
    def testGetDN(self):
        """
        _testGetDN_

        Checks that we can get a DN
        """
        dn = getUserProxyDN()
        if 'X509_USER_PROXY' in os.environ:
            self.assertNotEqual(dn, None, 'Error: This should get a DN, if you have set one')
        else:
            self.assertEqual(dn, None, 'Error: There is no proxy in the environment, it should not get one')
示例#53
0
class BaseTest(unittest.TestCase):
    """
    Some methods of this class are made static and are used from 
    other test cases.
    
    """
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel = logging.DEBUG)
        self.testDir = self.testInit.generateWorkDir()
        self.config = getConfig(self.testDir)
        self.testComponentDaemonXml = "/tmp/TestComponent/Daemon.xml" 
        
        
    def tearDown(self):       
        self.testInit.delWorkDir()
        self.generator = None
        # if the directory and file "/tmp/TestComponent/Daemon.xml" after
        # ComponentsPoller test exist, then delete it
        d = os.path.dirname(self.testComponentDaemonXml)
        if os.path.exists(d):
            shutil.rmtree(d)
                    

    def testSenderReceiverBasic(self):
        sender = Sender(self.config.Alert.address,
                        self.__class__.__name__,
                        self.config.Alert.controlAddr)
        handler, receiver = utils.setUpReceiver(self.config.Alert.address,
                                                self.config.Alert.controlAddr)
        a = Alert(Component = inspect.stack()[0][3])
        sender(a)
        while len(handler.queue) == 0:
            time.sleep(0.5)
            print "%s waiting for alert to arrive" % inspect.stack()[0][3]
        receiver.shutdown()
        self.assertEqual(len(handler.queue), 1)
        self.assertEqual(handler.queue[0]["Component"], inspect.stack()[0][3])
        
        
    def testProcessDetailBasic(self):
        pid = os.getpid()
        name = inspect.stack()[0][3] # test name
        pd = ProcessDetail(pid, name)
        self.assertEqual(pd.pid, pid)
        self.assertEqual(pd.name, name)
        self.assertEqual(pd.proc.pid, pid)
        numChildren = len(psutil.Process(pid).get_children())
        self.assertEqual(len(pd.children), numChildren)
        self.assertEqual(len(pd.allProcs), 1 + numChildren)
        d = pd.getDetails()
        self.assertEqual(d["pid"], pid)
        self.assertEqual(d["component"], name)
        self.assertEqual(d["numChildrenProcesses"], numChildren)
                
        
    def testMeasurementsBasic(self):
        numMes = 10
        mes = Measurements(numMes)
        self.assertEqual(mes._numOfMeasurements, numMes)
        self.assertEqual(len(mes), 0)
        mes.append(20)
        self.assertEqual(len(mes), 1)
        self.assertEqual(mes[0], 20)
        mes.append(30)
        self.assertEqual(mes[1], 30)
        mes.clear()
        self.assertEqual(len(mes), 0)
        self.assertEqual(mes._numOfMeasurements, numMes)
        

    def testBasePollerBasic(self):
        config = getConfig("/tmp")
        # create some non-sence config section. just need a bunch of values defined        
        config.AlertGenerator.section_("bogusPoller")
        config.AlertGenerator.bogusPoller.soft = 5 # [percent]
        config.AlertGenerator.bogusPoller.critical = 50 # [percent] 
        config.AlertGenerator.bogusPoller.pollInterval = 2  # [second]
        config.AlertGenerator.bogusPoller.period = 10
        
        generator = utils.AlertGeneratorMock(config)
        poller = BasePoller(config.AlertGenerator.bogusPoller, generator)
        # define dummy check method
        poller.check = lambda: 1+1
        poller.start()
        # poller now runs
        time.sleep(config.AlertGenerator.bogusPoller.pollInterval * 2)
        poller.terminate()
        while poller.is_alive():
            time.sleep(0.2)
            print "%s waiting for test poller to terminate" % inspect.stack()[0][3]
            
        
    def testPeriodPollerOnRealProcess(self):
        config = getConfig("/tmp")
        config.component_("AlertProcessor")
        config.AlertProcessor.section_("critical")
        config.AlertProcessor.section_("soft")
        config.AlertProcessor.critical.level = 5
        config.AlertProcessor.soft.level = 0        
        config.component_("AlertGenerator")
        config.AlertGenerator.section_("bogusPoller")
        config.AlertGenerator.bogusPoller.soft = 5 # [percent]
        config.AlertGenerator.bogusPoller.critical = 50 # [percent] 
        config.AlertGenerator.bogusPoller.pollInterval = 0.2  # [second]
        # period during which measurements are collected before evaluating for
        # possible alert triggering
        config.AlertGenerator.bogusPoller.period = 1
        
        generator = utils.AlertGeneratorMock(config)
        poller = PeriodPoller(config.AlertGenerator.bogusPoller, generator)
        poller.sender = utils.SenderMock()
        # get CPU usage percentage, it's like measuring CPU usage of a real
        # component, so use the appropriate poller's method for that
        # (PeriodPoller itself is higher-level class so it doesn't define
        # a method to provide sampling data)
        poller.sample = lambda processDetail: ComponentsCPUPoller.sample(processDetail)
        
        # get own pid
        pid = os.getpid()
        name = inspect.stack()[0][3] # test name
        pd = ProcessDetail(pid, name)
        # need to repeat sampling required number of measurements
        numOfMeasurements = int(config.AlertGenerator.bogusPoller.period / 
                                config.AlertGenerator.bogusPoller.pollInterval)
        mes = Measurements(numOfMeasurements)
        self.assertEqual(len(mes), 0)
        for i in range(mes._numOfMeasurements):
            poller.check(pd, mes)
                    
        # since the whole measurement cycle was done, values should have been nulled
        self.assertEqual(len(mes), 0)
        
        
    def testPeriodPollerCalculationPredefinedInput(self):
        config = getConfig("/tmp")
        config.component_("AlertProcessor")
        config.AlertProcessor.section_("critical")
        config.AlertProcessor.section_("soft")
        config.AlertProcessor.critical.level = 5
        config.AlertProcessor.soft.level = 0        
        config.component_("AlertGenerator")
        config.AlertGenerator.section_("bogusPoller")
        # put some threshold numbers, just need to check output calculation
        # from check() method
        config.AlertGenerator.bogusPoller.soft = 5 # [percent]
        config.AlertGenerator.bogusPoller.critical = 50 # [percent] 
        config.AlertGenerator.bogusPoller.pollInterval = 0.2  # [second]
        config.AlertGenerator.bogusPoller.period = 1
        
        generator = utils.AlertGeneratorMock(config)
        poller = PeriodPoller(config.AlertGenerator.bogusPoller, generator)
        # since poller may trigger an alert, give it mock sender
        poller.sender = utils.SenderMock()
        # provide sample method with predefined input, float
        predefInput = 10.12
        poller.sample = lambda processDetail: predefInput
        
        processDetail = None
        numOfMeasurements = int(config.AlertGenerator.bogusPoller.period / 
                                config.AlertGenerator.bogusPoller.pollInterval)
        mes = Measurements(numOfMeasurements)
        for i in range(mes._numOfMeasurements):
            poller.check(processDetail, mes)
            
        # the above loop should went 5 times, should reach evaluation of 5 x predefInput
        # values, the average should end up 10, which should trigger soft threshold
        self.assertEqual(len(poller.sender.queue), 1)
        a = poller.sender.queue[0]
        
        self.assertEqual(a["Component"], generator.__class__.__name__)
        self.assertEqual(a["Source"], poller.__class__.__name__)
        d = a["Details"]
        self.assertEqual(d["threshold"], "%s%%" % config.AlertGenerator.bogusPoller.soft)
        self.assertEqual(d["numMeasurements"], mes._numOfMeasurements)
        self.assertEqual(d["period"], config.AlertGenerator.bogusPoller.period)
        self.assertEqual(d["average"], "%s%%" % predefInput)
        # since the whole measurement cycle was done, values should have been nulled
        self.assertEqual(len(mes), 0)
示例#54
0
class ParentlessMergeBySizeTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        Boiler plate DB setup.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMCore.WMBS",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)
        return

    def tearDown(self):
        """
        _tearDown_

        Clear out WMBS.
        """
        self.testInit.clearDatabase()
        return

    def stuffWMBS(self):
        """
        _stuffWMBS_

        Insert some dummy jobs, jobgroups, filesets, files and subscriptions
        into WMBS to test job creation.  Three completed job groups each
        containing several files are injected.  Another incomplete job group is
        also injected.  Also files are added to the "Mergeable" subscription as
        well as to the output fileset for their jobgroups.
        """
        locationAction = self.daoFactory(classname="Locations.New")
        locationAction.execute(siteName="s1", seName="somese.cern.ch")
        locationAction.execute(siteName="s1", seName="somese2.cern.ch")

        changeStateDAO = self.daoFactory(classname="Jobs.ChangeState")

        self.mergeFileset = Fileset(name="mergeFileset")
        self.mergeFileset.create()
        self.bogusFileset = Fileset(name="bogusFileset")
        self.bogusFileset.create()

        mergeWorkflow = Workflow(name="mergeWorkflow",
                                 spec="bunk2",
                                 owner="Steve",
                                 task="Test")
        mergeWorkflow.create()
        markWorkflow = self.daoFactory(
            classname="Workflow.MarkInjectedWorkflows")
        markWorkflow.execute(names=[mergeWorkflow.name], injected=True)

        self.mergeSubscription = Subscription(
            fileset=self.mergeFileset,
            workflow=mergeWorkflow,
            split_algo="ParentlessMergeBySize")
        self.mergeSubscription.create()
        self.bogusSubscription = Subscription(
            fileset=self.bogusFileset,
            workflow=mergeWorkflow,
            split_algo="ParentlessMergeBySize")

        file1 = File(lfn="file1",
                     size=1024,
                     events=1024,
                     first_event=0,
                     locations=set(["somese.cern.ch"]))
        file1.addRun(Run(1, *[45]))
        file1.create()
        file2 = File(lfn="file2",
                     size=1024,
                     events=1024,
                     first_event=1024,
                     locations=set(["somese.cern.ch"]))
        file2.addRun(Run(1, *[45]))
        file2.create()
        file3 = File(lfn="file3",
                     size=1024,
                     events=1024,
                     first_event=2048,
                     locations=set(["somese.cern.ch"]))
        file3.addRun(Run(1, *[45]))
        file3.create()
        file4 = File(lfn="file4",
                     size=1024,
                     events=1024,
                     first_event=3072,
                     locations=set(["somese.cern.ch"]))
        file4.addRun(Run(1, *[45]))
        file4.create()

        fileA = File(lfn="fileA",
                     size=1024,
                     events=1024,
                     first_event=0,
                     locations=set(["somese.cern.ch"]))
        fileA.addRun(Run(1, *[46]))
        fileA.create()
        fileB = File(lfn="fileB",
                     size=1024,
                     events=1024,
                     first_event=1024,
                     locations=set(["somese.cern.ch"]))
        fileB.addRun(Run(1, *[46]))
        fileB.create()
        fileC = File(lfn="fileC",
                     size=1024,
                     events=1024,
                     first_event=2048,
                     locations=set(["somese.cern.ch"]))
        fileC.addRun(Run(1, *[46]))
        fileC.create()

        fileI = File(lfn="fileI",
                     size=1024,
                     events=1024,
                     first_event=0,
                     locations=set(["somese.cern.ch"]))
        fileI.addRun(Run(2, *[46]))
        fileI.create()
        fileII = File(lfn="fileII",
                      size=1024,
                      events=1024,
                      first_event=1024,
                      locations=set(["somese.cern.ch"]))
        fileII.addRun(Run(2, *[46]))
        fileII.create()
        fileIII = File(lfn="fileIII",
                       size=1024,
                       events=102400,
                       first_event=2048,
                       locations=set(["somese.cern.ch"]))
        fileIII.addRun(Run(2, *[46]))
        fileIII.create()
        fileIV = File(lfn="fileIV",
                      size=102400,
                      events=1024,
                      first_event=3072,
                      locations=set(["somese.cern.ch"]))
        fileIV.addRun(Run(2, *[46]))
        fileIV.create()

        for file in [
                file1, file2, file3, file4, fileA, fileB, fileC, fileI, fileII,
                fileIII, fileIV
        ]:
            self.mergeFileset.addFile(file)
            self.bogusFileset.addFile(file)

        self.mergeFileset.commit()
        self.bogusFileset.commit()

        return

    def testMinMergeSize1(self):
        """
        _testMinMergeSize1_

        Set the minimum merge size to be 20,000 bytes which is more than the
        sum of all file sizes in the WMBS instance.  Verify that no merge jobs
        will be produced.
        """
        self.stuffWMBS()

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.mergeSubscription)

        result = jobFactory(min_merge_size=200000,
                            max_merge_size=2000000000,
                            max_merge_events=200000000)

        assert len(result) == 0, \
               "ERROR: No job groups should be returned."

        return

    def testMinMergeSize1(self):
        """
        _testMinMergeSize1_

        Set the minimum merge size to be 20,000 bytes which is more than the
        sum of all file sizes in the WMBS instance.  Verify that no merge jobs
        will be produced.
        """
        self.stuffWMBS()

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.mergeSubscription)

        result = jobFactory(min_merge_size=200000,
                            max_merge_size=2000000000,
                            max_merge_events=200000000)

        assert len(result) == 0, \
               "ERROR: No job groups should be returned."

        return

    def testMinMergeSize1a(self):
        """
        _testMinMergeSize1a_

        Set the minimum merge size to be 20,000 bytes which is more than the
        sum of all file sizes in the WMBS instance and mark the fileset as
        closed.  Verify that one job containing all files is pushed out.
        """
        self.stuffWMBS()
        self.mergeFileset.markOpen(False)

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.mergeSubscription)

        result = jobFactory(min_merge_size=200000,
                            max_merge_size=2000000,
                            max_merge_events=2000000)

        assert len(result) == 1, \
               "ERROR: More than one JobGroup returned: %s" % len(result)

        assert len(result[0].jobs) == 1, \
               "Error: One job should have been returned: %s" % len(result[0].jobs)

        goldenFiles = [
            "file1", "file2", "file3", "file4", "fileA", "fileB", "fileC",
            "fileI", "fileII", "fileIII", "fileIV"
        ]

        jobFiles = result[0].jobs[0].getFiles()

        currentRun = 0
        currentLumi = 0
        currentEvent = 0
        for file in jobFiles:
            file.loadData()
            assert file["lfn"] in goldenFiles, \
                   "Error: Unknown file: %s" % file["lfn"]
            self.assertTrue(
                file["locations"] == set(["somese.cern.ch",
                                          "somese2.cern.ch"]),
                "Error: File is missing a location.")
            goldenFiles.remove(file["lfn"])

            fileRun = list(file["runs"])[0].run
            fileLumi = min(list(file["runs"])[0])
            fileEvent = file["first_event"]

            if currentRun == 0:
                currentRun = fileRun
                currentLumi = fileLumi
                currentEvent = fileEvent
                continue

            assert fileRun >= currentRun, \
                   "ERROR: Files not sorted by run."

            if fileRun == currentRun:
                assert fileLumi >= currentLumi, \
                       "ERROR: Files not ordered by lumi"

            if fileLumi == currentLumi:
                assert fileEvent >= currentEvent, \
                       "ERROR: Files not ordered by first event"

            currentRun = fileRun
            currentLumi = fileLumi
            currentEvent = fileEvent

        return

    def testMaxMergeSize(self):
        """
        _testMaxMergeSize_

        Set the maximum merge size to be 100000 bytes.  Verify that two merge
        jobs are created, one for the one large file and another for the rest of
        the files.  Verify that each merge job contains the expected files and
        that we merge across runs.
        """
        self.stuffWMBS()

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.mergeSubscription)

        result = jobFactory(min_merge_size=1,
                            max_merge_size=100000,
                            max_merge_events=200000)

        assert len(result) == 1, \
               "ERROR: More than one JobGroup returned: %s" % result

        assert len(result[0].jobs) == 2, \
               "ERROR: Two jobs should have been returned."

        goldenFilesA = [
            "file1", "file2", "file3", "file4", "fileA", "fileB", "fileC",
            "fileI", "fileII", "fileIII"
        ]
        goldenFilesB = ["fileIV"]

        for job in result[0].jobs:
            jobFiles = job.getFiles()

            if jobFiles[0]["lfn"] in goldenFilesA:
                goldenFiles = goldenFilesA
            elif jobFiles[0]["lfn"] in goldenFilesB:
                goldenFiles = goldenFilesB

            currentRun = 0
            currentLumi = 0
            currentEvent = 0
            for file in jobFiles:
                assert file["lfn"] in goldenFiles, \
                       "Error: Unknown file in merge jobs."
                assert file["locations"] == set(["somese.cern.ch"]), \
                       "Error: File is missing a location."

                goldenFiles.remove(file["lfn"])

            fileRun = list(file["runs"])[0].run
            fileLumi = min(list(file["runs"])[0])
            fileEvent = file["first_event"]

            if currentRun == 0:
                currentRun = fileRun
                currentLumi = fileLumi
                currentEvent = fileEvent
                continue

            assert fileRun >= currentRun, \
                   "ERROR: Files not sorted by run."

            if fileRun == currentRun:
                assert fileLumi >= currentLumi, \
                       "ERROR: Files not ordered by lumi"

                if fileLumi == currentLumi:
                    assert fileEvent >= currentEvent, \
                           "ERROR: Files not ordered by first event"

            currentRun = fileRun
            currentLumi = fileLumi
            currentEvent = fileEvent

        assert len(goldenFilesA) == 0 and len(goldenFilesB) == 0, \
               "ERROR: Files missing from merge jobs."

        return

    def testMaxEvents(self):
        """
        _testMaxEvents_

        Verify the the max_merge_events parameter works and that we correctly
        merge across runs.
        """
        self.stuffWMBS()

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.mergeSubscription)

        result = jobFactory(min_merge_size=1,
                            max_merge_size=20000000,
                            max_merge_events=100000)

        assert len(result) == 1, \
               "ERROR: More than one JobGroup returned: %s" % result

        assert len(result[0].jobs) == 2, \
               "ERROR: Two jobs should have been returned: %s" % len(result[0].jobs)

        goldenFilesA = [
            "file1", "file2", "file3", "file4", "fileA", "fileB", "fileC",
            "fileI", "fileII", "fileIV"
        ]
        goldenFilesB = ["fileIII"]

        for job in result[0].jobs:
            jobFiles = job.getFiles()

            if jobFiles[0]["lfn"] in goldenFilesA:
                goldenFiles = goldenFilesA
            elif jobFiles[0]["lfn"] in goldenFilesB:
                goldenFiles = goldenFilesB

            currentRun = 0
            currentLumi = 0
            currentEvent = 0
            for file in jobFiles:
                assert file["lfn"] in goldenFiles, \
                       "Error: Unknown file in merge jobs."
                assert file["locations"] == set(["somese.cern.ch"]), \
                       "Error: File is missing a location: %s" % file["locations"]

                goldenFiles.remove(file["lfn"])

                fileRun = list(file["runs"])[0].run
                fileLumi = min(list(file["runs"])[0])
                fileEvent = file["first_event"]

                if currentRun == 0:
                    currentRun = fileRun
                    currentLumi = fileLumi
                    currentEvent = fileEvent
                    continue

                assert fileRun >= currentRun, \
                       "ERROR: Files not sorted by run: %s, %s" % (fileRun, currentRun)

                if fileRun == currentRun:
                    assert fileLumi >= currentLumi, \
                           "ERROR: Files not ordered by lumi"

                    if fileLumi == currentLumi:
                        assert fileEvent >= currentEvent, \
                               "ERROR: Files not ordered by first event"

                currentRun = fileRun
                currentLumi = fileLumi
                currentEvent = fileEvent

        assert len(goldenFilesA) == 0 and len(goldenFilesB) == 0 and \
               "ERROR: Files missing from merge jobs."

        return

    def testMinMergeSize1aNoRunMerge(self):
        """
        _testMinMergeSize1aNoRunMerge_

        Set the minimum merge size to be 20,000 bytes which is more than the
        sum of all file sizes in the WMBS instance and mark the fileset as
        closed.  Verify that two jobs are pushed out and that we don't merge
        accross run boundaries.
        """
        self.stuffWMBS()
        self.mergeFileset.markOpen(False)

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.mergeSubscription)

        result = jobFactory(min_merge_size=200000,
                            max_merge_size=2000000,
                            max_merge_events=2000000,
                            merge_across_runs=False)

        assert len(result) == 1, \
               "ERROR: More than one JobGroup returned: %s" % len(result)

        assert len(result[0].jobs) == 2, \
               "Error: Two jobs should have been returned: %s" % len(result[0].jobs)

        goldenFilesA = [
            "file1", "file2", "file3", "file4", "fileA", "fileB", "fileC"
        ]
        goldenFilesB = ["fileI", "fileII", "fileIII", "fileIV"]
        goldenFilesA.sort()
        goldenFilesB.sort()

        for job in result[0].jobs:
            currentRun = 0
            currentLumi = 0
            currentEvent = 0
            jobLFNs = []

            for file in job.getFiles():
                file.loadData()
                jobLFNs.append(file["lfn"])
                self.assertTrue(
                    file["locations"] == set(
                        ["somese.cern.ch", "somese2.cern.ch"]),
                    "Error: File is missing a location.")

                fileRun = list(file["runs"])[0].run
                fileLumi = min(list(file["runs"])[0])
                fileEvent = file["first_event"]

                if currentRun == 0:
                    currentRun = fileRun
                    currentLumi = fileLumi
                    currentEvent = fileEvent
                    continue

                assert fileRun >= currentRun, \
                       "ERROR: Files not sorted by run."

                if fileRun == currentRun:
                    assert fileLumi >= currentLumi, \
                           "ERROR: Files not ordered by lumi"

                if fileLumi == currentLumi:
                    assert fileEvent >= currentEvent, \
                           "ERROR: Files not ordered by first event"

                currentRun = fileRun
                currentLumi = fileLumi
                currentEvent = fileEvent

            jobLFNs.sort()
            if jobLFNs == goldenFilesA:
                goldenFilesA = []
            else:
                self.assertEqual(jobLFNs, goldenFilesB,
                                 "Error: LFNs do not match.")
                goldenFilesB = []

        return

    def testMaxMergeSizeNoRunMerge(self):
        """
        _testMaxMergeSizeNoRunMerge_

        Set the maximum merge size to be 100000 bytes.  Verify that two merge
        jobs are created, one for the one large file and another for the rest of
        the files.  Verify that each merge job contains the expected files and
        that we don't merge across run boundaries.
        """
        self.stuffWMBS()

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.mergeSubscription)

        result = jobFactory(min_merge_size=1,
                            max_merge_size=100000,
                            max_merge_events=200000,
                            merge_across_runs=False)

        assert len(result) == 1, \
               "ERROR: More than one JobGroup returned: %s" % result

        assert len(result[0].jobs) == 3, \
               "ERROR: Three jobs should have been returned."

        goldenFilesA = [
            "file1", "file2", "file3", "file4", "fileA", "fileB", "fileC"
        ]
        goldenFilesB = ["fileI", "fileII", "fileIII"]
        goldenFilesC = ["fileIV"]

        for job in result[0].jobs:
            jobFiles = job.getFiles()

            if jobFiles[0]["lfn"] in goldenFilesA:
                goldenFiles = goldenFilesA
            elif jobFiles[0]["lfn"] in goldenFilesB:
                goldenFiles = goldenFilesB
            else:
                goldenFiles = goldenFilesC

            currentRun = 0
            currentLumi = 0
            currentEvent = 0
            for file in jobFiles:
                self.assertTrue(file["lfn"] in goldenFiles,
                                "Error: Unknown file in merge jobs.")
                self.assertTrue(file["locations"] == set(["somese.cern.ch"]),
                                "Error: File is missing a location.")

                goldenFiles.remove(file["lfn"])

            fileRun = list(file["runs"])[0].run
            fileLumi = min(list(file["runs"])[0])
            fileEvent = file["first_event"]

            if currentRun == 0:
                currentRun = fileRun
                currentLumi = fileLumi
                currentEvent = fileEvent
                continue

            self.assertTrue(fileRun >= currentRun,
                            "ERROR: Files not sorted by run.")
            if fileRun == currentRun:
                self.assertTrue(fileLumi >= currentLumi,
                                "ERROR: Files not ordered by lumi")
                if fileLumi == currentLumi:
                    self.assertTrue(fileEvent >= currentEvent,
                                    "ERROR: Files not ordered by first event")

            currentRun = fileRun
            currentLumi = fileLumi
            currentEvent = fileEvent

        self.assertTrue(
            len(goldenFilesA) == 0 and len(goldenFilesB) == 0,
            "ERROR: Files missing from merge jobs.")

        return

    def testMaxEventsNoRunMerge(self):
        """
        _testMaxEventsNoRunMerge_

        Verify that the max events merge parameter works correctly and that we
        don't merge accross run boundaries.
        """
        self.stuffWMBS()

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.mergeSubscription)

        result = jobFactory(min_merge_size=1,
                            max_merge_size=20000000,
                            max_merge_events=100000,
                            merge_across_runs=False)

        self.assertTrue(
            len(result) == 1,
            "ERROR: More than one JobGroup returned: %s" % result)

        self.assertTrue(
            len(result[0].jobs) == 3,
            "ERROR: Three jobs should have been returned: %s" %
            len(result[0].jobs))

        goldenFilesA = [
            "file1",
            "file2",
            "file3",
            "file4",
            "fileA",
            "fileB",
            "fileC",
        ]
        goldenFilesB = ["fileI", "fileII", "fileIV"]
        goldenFilesC = ["fileIII"]

        for job in result[0].jobs:
            jobFiles = job.getFiles()

            if jobFiles[0]["lfn"] in goldenFilesA:
                goldenFiles = goldenFilesA
            elif jobFiles[0]["lfn"] in goldenFilesB:
                goldenFiles = goldenFilesB
            else:
                goldenFiles = goldenFilesC

            currentRun = 0
            currentLumi = 0
            currentEvent = 0
            for file in jobFiles:
                self.assertTrue(file["lfn"] in goldenFiles,
                                "Error: Unknown file in merge jobs.")
                self.assertTrue(
                    file["locations"] == set(["somese.cern.ch"]),
                    "Error: File is missing a location: %s" %
                    file["locations"])

                goldenFiles.remove(file["lfn"])

                fileRun = list(file["runs"])[0].run
                fileLumi = min(list(file["runs"])[0])
                fileEvent = file["first_event"]

                if currentRun == 0:
                    currentRun = fileRun
                    currentLumi = fileLumi
                    currentEvent = fileEvent
                    continue

                self.assertTrue(
                    fileRun >= currentRun,
                    "ERROR: Files not sorted by run: %s, %s" %
                    (fileRun, currentRun))
                if fileRun == currentRun:
                    self.assertTrue(fileLumi >= currentLumi,
                                    "ERROR: Files not ordered by lumi")
                    if fileLumi == currentLumi:
                        self.assertTrue(
                            fileEvent >= currentEvent,
                            "ERROR: Files not ordered by first event")

                currentRun = fileRun
                currentLumi = fileLumi
                currentEvent = fileEvent

        self.assertTrue(
            len(goldenFilesA) == 0 and len(goldenFilesB) == 0
            and len(goldenFilesC) == 0,
            "ERROR: Files missing from merge jobs.")

        return

    def testLocationMerging(self):
        """
        _testLocationMerging_

        Verify that files residing on different SEs are not merged together in
        the same job.
        """
        self.stuffWMBS()

        locationAction = self.daoFactory(classname="Locations.New")
        locationAction.execute(siteName="s2", seName="somese2.cern.ch")

        fileSite2 = File(lfn="fileSite2",
                         size=4098,
                         events=1024,
                         first_event=0,
                         locations=set(["somese2.cern.ch"]))
        fileSite2.addRun(Run(1, *[46]))
        fileSite2.create()

        self.mergeFileset.addFile(fileSite2)
        self.mergeFileset.commit()

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.mergeSubscription)

        result = jobFactory(min_merge_size=4097,
                            max_merge_size=99999999,
                            max_merge_events=999999999,
                            merge_across_runs=False)

        assert len(result) == 1, \
               "ERROR: More than one JobGroup returned."

        assert len(result[0].jobs) == 3, \
               "ERROR: Three jobs should have been returned."

        for job in result[0].jobs:
            firstInputFile = job.getFiles()[0]
            baseLocation = list(firstInputFile["locations"])[0]

            for inputFile in job.getFiles():
                assert len(inputFile["locations"]) == 1, \
                       "Error: Wrong number of locations"

                assert list(inputFile["locations"])[0] == baseLocation, \
                       "Error: Wrong location."

        return

    def testMaxWaitTime(self):
        """
        _testMaxWaitTime_

        Set the max wait times to be negative - this should force all files to merge
        out immediately

        Using the first setup as the first merge test which should normally produce
        no jobGroups
        """
        self.stuffWMBS()

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.mergeSubscription)

        result = jobFactory(min_merge_size=200000,
                            max_merge_size=2000000000,
                            max_merge_events=200000000,
                            max_wait_time=-10)

        # Everything should be in one, small jobGroup
        self.assertEqual(len(result), 1)
        self.assertEqual(len(result[0].jobs), 1)
        job = result[0].jobs[0]
        # All files should be in one job
        self.assertEqual(len(job.getFiles()), 11)

        return

    def testDifferentSubscritionIDs(self):
        """
        _testDifferentSubscriptionIDs_

        Make sure that the merge splitting still runs if the subscription ID
        is not equal to the workflow ID.
        """
        myThread = threading.currentThread()
        myThread.transaction.begin()
        dummyWorkflow = Workflow(name="dummyWorkflow",
                                 spec="bunk49",
                                 owner="Steve",
                                 task="Test2")
        dummyWorkflow.create()
        dummyFileset = Fileset(name="dummyFileset")
        dummyFileset.create()
        dummySubscription1 = Subscription(fileset=dummyFileset,
                                          workflow=dummyWorkflow,
                                          split_algo="ParentlessMergeBySize")
        dummySubscription2 = Subscription(fileset=dummyFileset,
                                          workflow=dummyWorkflow,
                                          split_algo="ParentlessMergeBySize")
        dummySubscription1.create()
        dummySubscription2.create()
        myThread.transaction.commit()

        self.stuffWMBS()
        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.mergeSubscription)
        result = jobFactory(min_merge_size=4097,
                            max_merge_size=99999999,
                            max_merge_events=999999999,
                            merge_across_runs=False)
        self.assertEqual(len(result), 1)
        jobGroup = result[0]
        self.assertEqual(len(jobGroup.jobs), 2)
        return
示例#55
0
class CursorLeakTest(unittest.TestCase):

    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.  Also add some dummy locations.
        """


        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS"],
                                useDefault = False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package = "WMCore.WMBS",
                                logger = myThread.logger,
                                dbinterface = myThread.dbi)

        locationAction = daofactory(classname = "Locations.New")
        locationAction.execute(siteName = "se1.cern.ch")
        locationAction.execute(siteName = "se1.fnal.gov")        
        
        return
          
    def tearDown(self):        
        """
        _tearDown_
        
        Drop all the WMBS tables.
        """
        self.testInit.clearDatabase()
            
    def testCursor(self):
        """
        _testCursor_
        
        test the cursor closing is really affected
        
        create 100 files with 5 parents and  loop 100 times.
        If the cursors are exhausted will crash.?
        
        TODO: improve for more effective testing. 

        """
        
        raise nose.SkipTest
        fileList = []
        parentFile = None
        for i in range(100):
            testFile = File(lfn = "/this/is/a/lfn%s" % i, size = 1024, events = 10,
                            checksums = {"cksum": "1"})
            testFile.addRun(Run(1, *[i]))
            testFile.create()
            
            for j in range(5):
                parentFile = File(lfn = "/this/is/a/lfnP%s" % j, size = 1024,
                                  events = 10, checksums = {"cksum": "1"})
                parentFile.addRun(Run(1, *[j]))
                parentFile.create()
                testFile.addParent(parentFile['lfn'])
    
            fileList.append(testFile)
            
        for i in range(100):
            for file in fileList:
                file.loadData()
                file.getAncestors(level = 2)
                file.getAncestors(level = 2, type = "lfn")
        
        return
    
    def testLotsOfAncestors(self):
        """
        _testLotsOfAncestors_

        Create a file with 15 parents with each parent having 100 parents to
        verify that the query to return grandparents works correctly.
        """
        raise nose.SkipTest
        testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10,
                        checksums = {"cksum": "1"}, locations = "se1.fnal.gov")
        testFileA.create()

        for i in xrange(15):
            testParent = File(lfn = makeUUID(), size = 1024, events = 10,
                              checksums = {"cksum": "1"}, locations = "se1.fnal.gov")
            testParent.create()
            testFileA.addParent(testParent["lfn"])

            for i in xrange(100):
                testGParent = File(lfn = makeUUID(), size = 1024, events = 10,
                                   checksums = {"cksum": "1"}, locations = "se1.fnal.gov")
                testGParent.create()
                testParent.addParent(testGParent["lfn"])                

        assert len(testFileA.getAncestors(level = 2, type = "lfn")) == 1500, \
               "ERROR: Incorrect grand parents returned"
        
        return
示例#56
0
文件: DBCore_t.py 项目: vytjan/WMCore
class DBCoreTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMQuality.TestDB"],
                                useDefault=False)

        return

    def tearDown(self):
        """
        Delete the databases
        """
        self.testInit.clearDatabase()
        return

    def testBuildBinds(self):
        """
        Test class for DBCore.buildbinds()

        """

        #This class may become obselete soon.  There is a TODO stuck onto DBCore.buildbinds()
        #This just checks to see that the sequence properly packages the first value is set
        #So that it sets the key seqname to the proper name in the files list
        #Also right now tests that it sets the keys right in each dict, but this seems redundant
        # -mnorman

        seqname = 'file'
        dictbinds = {'lumi': 123, 'test': 100}
        files = ['testA', 'testB', 'testC']

        myThread = threading.currentThread()

        testInterface = myThread.dbi

        binds = testInterface.buildbinds(files, seqname, dictbinds)

        #This should return a dict for every value in files with additional elements
        #from dictbinds.  We then loop over every dict (which should be equal to the
        #number of elements in files), and look to see that the filename matches and that
        #At least one of the dictbinds keys matches to its proper element

        for i in range(len(files)):
            self.assertEqual(binds[i][seqname], files[i])
            self.assertEqual(binds[i][dictbinds.keys()[0]],
                             dictbinds[dictbinds.keys()[0]])

        return

    def testProcessDataNoBinds(self):
        """
        _testProcessDataNoBinds_

        Verify that insert and select queries work when no binds are used.
        """
        insertSQL = "INSERT INTO test_tablea VALUES (1, 2, 'three')"
        selectSQL = "SELECT column1, column2, column3 from test_tablea"

        myThread = threading.currentThread()
        myThread.dbi.processData(insertSQL)
        resultSets = myThread.dbi.processData(selectSQL)

        assert len(resultSets) == 1, \
               "Error: Wrong number of ResultSets returned."

        results = resultSets[0].fetchall()

        assert len(results) == 1, \
               "Error: Wrong number of rows returned."
        assert len(results[0]) == 3, \
               "Error: Wrong number of columns returned."
        assert results[0][0] == 1, \
               "Error: Column one is wrong."
        assert results[0][1] == 2, \
               "Error: Column two is wrong."
        assert results[0][2] == "three", \
               "Error: Column three is wrong."

        return

    def testProcessDataOneBind(self):
        """
        _testProcessDataOneBind_

        Verify that insert and select queries work with one set of bind variables.
        """
        bindsA = {"one": 1, "two": 2, "three": "three"}
        bindsB = {"one": 3, "two": 2, "three": "one"}
        insertSQL = "INSERT INTO test_tablea VALUES (:one, :two, :three)"
        selectSQL = \
          """SELECT column1, column2, column3 FROM test_tablea
             WHERE column1 = :one AND column2 = :two AND column3 = :three"""

        myThread = threading.currentThread()
        myThread.dbi.processData(insertSQL, binds=bindsA)
        myThread.dbi.processData(insertSQL, binds=bindsB)

        resultSets = myThread.dbi.processData(selectSQL, bindsA)

        assert len(resultSets) == 1, \
               "Error: Wrong number of ResultSets returned."

        results = resultSets[0].fetchall()

        assert len(results) == 1, \
               "Error: Wrong number of rows returned."
        assert len(results[0]) == 3, \
               "Error: Wrong number of columns returned."
        assert results[0][0] == 1, \
               "Error: Column one is wrong."
        assert results[0][1] == 2, \
               "Error: Column two is wrong."
        assert results[0][2] == "three", \
               "Error: Column three is wrong."

        resultSets = myThread.dbi.processData(selectSQL, bindsB)

        assert len(resultSets) == 1, \
               "Error: Wrong number of ResultSets returned."

        results = resultSets[0].fetchall()

        assert len(results) == 1, \
               "Error: Wrong number of rows returned."
        assert len(results[0]) == 3, \
               "Error: Wrong number of columns returned."
        assert results[0][0] == 3, \
               "Error: Column one is wrong."
        assert results[0][1] == 2, \
               "Error: Column two is wrong."
        assert results[0][2] == "one", \
               "Error: Column three is wrong."

        return

    def testProcessDataSeveralBinds(self):
        """
        _testProcessDataSeveralBinds_

        Verify that insert and select queries work with several binds.
        """
        bindsA = [{
            "one": 1,
            "two": 2,
            "three": "three"
        }, {
            "one": 3,
            "two": 2,
            "three": "one"
        }, {
            "one": 4,
            "two": 5,
            "three": "six"
        }, {
            "one": 6,
            "two": 5,
            "three": "four"
        }]
        bindsB = [{
            "one": 10,
            "two": 11,
            "three": "twelve"
        }, {
            "one": 12,
            "two": 11,
            "three": "ten"
        }]

        insertSQL = "INSERT INTO test_tablea VALUES (:one, :two, :three)"
        selectSQL = \
          """SELECT column1, column2, column3 FROM test_tablea
             WHERE column1 = :one AND column2 = :two AND column3 = :three"""

        myThread = threading.currentThread()
        myThread.dbi.processData(insertSQL, binds=bindsA)
        myThread.dbi.processData(insertSQL, binds=bindsB)

        resultSets = myThread.dbi.processData(selectSQL, bindsA)

        assert len(resultSets) == 1, \
               "Error: Wrong number of ResultSets returned."

        results = resultSets[0].fetchall()

        assert len(results) == 4, \
               "Error: Wrong number of rows returned."

        for result in results:
            assert len(result) == 3, \
                   "Error: Wrong number of columns returned."
            for bind in bindsA:
                if bind["one"] == result[0] and bind["two"] == result[1] and \
                   bind["three"] == result[2]:
                    bindsA.remove(bind)
                    break

        assert len(bindsA) == 0, \
               "Error: Missing rows from select."

        resultSets = myThread.dbi.processData(selectSQL, bindsB)

        assert len(resultSets) == 1, \
               "Error: Wrong number of ResultSets returned."

        results = resultSets[0].fetchall()

        assert len(results) == 2, \
               "Error: Wrong number of rows returned."

        for result in results:
            assert len(result) == 3, \
                   "Error: Wrong number of columns returned."
            for bind in bindsB:
                if bind["one"] == result[0] and bind["two"] == result[1] and \
                   bind["three"] == result[2]:
                    bindsB.remove(bind)
                    break

        assert len(bindsB) == 0, \
               "Error: Missing rows from select."

        return

    def testProcessDataHugeBinds(self):
        """
        _testProcessDataHugeBinds_

        Verify that select and insert queries work with several thousand binds.
        """
        bindsA = []
        bindsB = []
        for i in range(3001):
            bindsA.append({"one": i, "two": i * 2, "three": str(i * 3)})

        for i in range(1501):
            bindsB.append({"one": (i + 1) * 2, "two": i, "three": str(i * 5)})

        insertSQL = "INSERT INTO test_tablea VALUES (:one, :two, :three)"
        selectSQL = \
          """SELECT column1, column2, column3 FROM test_tablea
             WHERE column1 = :one AND column2 = :two AND column3 = :three"""

        myThread = threading.currentThread()
        myThread.dbi.processData(insertSQL, binds=bindsA)
        myThread.dbi.processData(insertSQL, binds=bindsB)

        resultSets = myThread.dbi.processData(selectSQL, bindsA)
        results = []
        for resultSet in resultSets:
            results.extend(resultSet.fetchall())

        assert len(results) == 3001, \
               "Error: Wrong number of rows returned: %d" % len(results)

        for result in results:
            assert len(result) == 3, \
                   "Error: Wrong number of columns returned."
            for bind in bindsA:
                if bind["one"] == result[0] and bind["two"] == result[1] and \
                   bind["three"] == result[2]:
                    bindsA.remove(bind)
                    break

        assert len(bindsA) == 0, \
               "Error: Missing rows from select."

        resultSets = myThread.dbi.processData(selectSQL, bindsB)
        results = []
        for resultSet in resultSets:
            results.extend(resultSet.fetchall())

        assert len(results) == 1501, \
               "Error: Wrong number of rows returned."

        for result in results:
            assert len(result) == 3, \
                   "Error: Wrong number of columns returned."
            for bind in bindsB:
                if bind["one"] == result[0] and bind["two"] == result[1] and \
                   bind["three"] == result[2]:
                    bindsB.remove(bind)
                    break

        assert len(bindsB) == 0, \
               "Error: Missing rows from select."

        return

    def testInsertHugeNumber(self):
        """
        _testInsertHugeNumber_

        Verify that we can insert and select huge numbers.
        """
        insertSQL = "INSERT INTO test_bigcol VALUES(:val1)"
        selectSQL = "SELECT * FROM test_bigcol"

        bindsA = {"val1": 2012211901}
        bindsB = {"val1": 20122119010}

        myThread = threading.currentThread()
        myThread.dbi.processData(insertSQL, binds=bindsA)
        myThread.dbi.processData(insertSQL, binds=bindsB)

        resultSets = myThread.dbi.processData(selectSQL)
        results = []
        for resultSet in resultSets:
            for row in resultSet.fetchall():
                results.append(row[0])

        assert len(results) == 2, \
               "Error: Wrong number of results."
        assert bindsA["val1"] in results, \
               "Error: Value one is missing."
        assert bindsB["val1"] in results, \
               "Error: Value one is missing."

        return
示例#57
0
class PhEDExInjectorSubscriberTest(unittest.TestCase):
    """
    _PhEDExInjectorSubscriberTest_

    Unit tests for the PhEDExInjectorSubscriber.
    Create some database inside DBSBuffer, run the subscriber algorithm
    using a PhEDEx emulator and verify that it works both in unsafe and safe mode.
    For unsafe mode there a WMBS database is also created
    """

    def setUp(self):
        """
        _setUp_

        Install the DBSBuffer schema into the database and connect to PhEDEx.
        """

        self.phedexURL = "https://bogus.cern.ch/bogus"
        self.dbsURL = "https://bogus.cern.ch/bogus"
        EmulatorHelper.setEmulators(phedex = True, dbs = True, siteDB = True)

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer",
                                                 "WMCore.WMBS"],
                                useDefault = False)

        self.testFilesA = []
        self.testFilesB = []
        self.testDatasetA = "/BogusPrimary/Run2012Z-PromptReco-v1/RECO"
        self.testDatasetB = "/BogusPrimary/CRUZET11-v1/RAW"

        return

    def tearDown(self):
        """
        _tearDown_

        Delete the database.
        """
        self.testInit.clearDatabase()
        EmulatorHelper.resetEmulators()

    def createConfig(self, tier0Mode = False):
        """
        _createConfig_

        Create a config for the PhEDExInjector, paths to DBS and PhEDEx are dummies because
        we are using Emulators
        """
        config = self.testInit.getConfiguration()
        config.component_("DBSInterface")
        config.DBSInterface.globalDBSUrl = self.dbsURL

        config.component_("PhEDExInjector")
        config.PhEDExInjector.phedexurl = self.phedexURL
        config.PhEDExInjector.subscribeDatasets = True
        config.PhEDExInjector.group = "Saturn"
        config.PhEDExInjector.pollInterval = 30
        config.PhEDExInjector.subscribeInterval = 60
        config.PhEDExInjector.tier0Mode = tier0Mode

        return config

    def stuffDatabase(self, tier0Mode = False):
        """
        _stuffDatabase_

        Fill the dbsbuffer with some files and blocks.  We'll insert a total
        of 5 files spanning two blocks.  There will be a total of two datasets
        inserted into the database.

        All files will be already in GLOBAL and in_phedex
        """

        myThread = threading.currentThread()
        buffer3Factory = DAOFactory(package = "WMComponent.DBS3Buffer",
                                   logger = myThread.logger,
                                   dbinterface = myThread.dbi)
        insertWorkflow = buffer3Factory(classname = "InsertWorkflow")
        insertWorkflow.execute("BogusRequestA", "BogusTask",
                               0, 0, 0, 0)
        insertWorkflow.execute("BogusRequestB", "BogusTask",
                               0, 0, 0, 0)

        checksums = {"adler32": "1234", "cksum": "5678"}
        testFileA = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10,
                                  checksums = checksums,
                                  locations = set(["srm-cms.cern.ch"]))
        testFileA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileA.setDatasetPath(self.testDatasetA)
        testFileA.addRun(Run(2, *[45]))
        testFileA.create()

        testFileB = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10,
                                  checksums = checksums,
                                  locations = set(["srm-cms.cern.ch"]))
        testFileB.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileB.setDatasetPath(self.testDatasetA)
        testFileB.addRun(Run(2, *[45]))
        testFileB.create()

        testFileC = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10,
                                  checksums = checksums,
                                  locations = set(["srm-cms.cern.ch"]))
        testFileC.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileC.setDatasetPath(self.testDatasetA)
        testFileC.addRun(Run(2, *[45]))
        testFileC.create()

        self.testFilesA.append(testFileA)
        self.testFilesA.append(testFileB)
        self.testFilesA.append(testFileC)

        testFileD = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10,
                                  checksums = checksums,
                                  locations = set(["srm-cms.cern.ch"]))
        testFileD.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileD.setDatasetPath(self.testDatasetB)
        testFileD.addRun(Run(2, *[45]))
        testFileD.create()

        testFileE = DBSBufferFile(lfn = makeUUID(), size = 1024, events = 10,
                                  checksums = checksums,
                                  locations = set(["srm-cms.cern.ch"]))
        testFileE.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                               appFam = "RECO", psetHash = "GIBBERISH",
                               configContent = "MOREGIBBERISH")
        testFileE.setDatasetPath(self.testDatasetB)
        testFileE.addRun(Run(2, *[45]))
        testFileE.create()

        self.testFilesB.append(testFileD)
        self.testFilesB.append(testFileE)

        uploadFactory = DAOFactory(package = "WMComponent.DBSUpload.Database",
                                   logger = myThread.logger,
                                   dbinterface = myThread.dbi)
        createBlock = uploadFactory(classname = "SetBlockStatus")

        self.blockAName = self.testDatasetA + "#" + makeUUID()
        self.blockBName = self.testDatasetB + "#" + makeUUID()
        createBlock.execute(block = self.blockAName, locations = ["srm-cms.cern.ch"], open_status = 'Closed')
        createBlock.execute(block = self.blockBName, locations = ["srm-cms.cern.ch"], open_status = 'Closed')

        bufferFactory = DAOFactory(package = "WMComponent.DBSBuffer.Database",
                                   logger = myThread.logger,
                                   dbinterface = myThread.dbi)

        setBlock = bufferFactory(classname = "DBSBufferFiles.SetBlock")
        setBlock.execute(testFileA["lfn"], self.blockAName)
        setBlock.execute(testFileB["lfn"], self.blockAName)
        setBlock.execute(testFileC["lfn"], self.blockAName)
        setBlock.execute(testFileD["lfn"], self.blockBName)
        setBlock.execute(testFileE["lfn"], self.blockBName)

        fileStatus = bufferFactory(classname = "DBSBufferFiles.SetStatus")
        fileStatus.execute(testFileA["lfn"], "GLOBAL")
        fileStatus.execute(testFileB["lfn"], "GLOBAL")
        fileStatus.execute(testFileC["lfn"], "GLOBAL")
        fileStatus.execute(testFileD["lfn"], "GLOBAL")
        fileStatus.execute(testFileE["lfn"], "GLOBAL")

        phedexStatus = bufferFactory(classname = "DBSBufferFiles.SetPhEDExStatus")
        phedexStatus.execute(testFileA["lfn"], 1)
        phedexStatus.execute(testFileB["lfn"], 1)
        phedexStatus.execute(testFileC["lfn"], 1)
        phedexStatus.execute(testFileD["lfn"], 1)
        phedexStatus.execute(testFileE["lfn"], 1)

        associateWorkflow = buffer3Factory(classname = "DBSBufferFiles.AssociateWorkflowToFile")
        associateWorkflow.execute(testFileA["lfn"], "BogusRequestA", "BogusTask")
        associateWorkflow.execute(testFileB["lfn"], "BogusRequestA", "BogusTask")
        associateWorkflow.execute(testFileC["lfn"], "BogusRequestA", "BogusTask")
        associateWorkflow.execute(testFileD["lfn"], "BogusRequestB", "BogusTask")
        associateWorkflow.execute(testFileE["lfn"], "BogusRequestB", "BogusTask")

        # Make the desired subscriptions
        insertSubAction = buffer3Factory(classname = "NewSubscription")
        datasetA = DBSBufferDataset(path = self.testDatasetA)
        datasetB = DBSBufferDataset(path = self.testDatasetB)
        workload = WMWorkloadHelper()
        workload.load(os.path.join(getTestBase(), 'WMComponent_t/PhEDExInjector_t/specs/TestWorkload.pkl'))
        if tier0Mode:
            # Override the settings
            workload.setSubscriptionInformation(custodialSites = ["T0_CH_CERN", "T1_US_FNAL"],
                                                nonCustodialSites = ["T3_CO_Uniandes"],
                                                priority = "Normal", custodialSubType = "Replica",
                                                autoApproveSites = ["T0_CH_CERN"],
                                                dataTier = "RECO")
            workload.setSubscriptionInformation(custodialSites = ["T0_CH_CERN", "T1_UK_RAL"],
                                                nonCustodialSites = [],
                                                autoApproveSites = [],
                                                priority = "High", custodialSubType = "Replica",
                                                dataTier = "RAW")
        insertSubAction.execute(datasetA.exists(), workload.getSubscriptionInformation()[self.testDatasetA])
        insertSubAction.execute(datasetB.exists(), workload.getSubscriptionInformation()[self.testDatasetB])

        return

    def stuffWMBS(self):
        """
        _stuffWMBS_

        Inject a workflow in WMBS and add the subscriptions
        """

        testWorkflow = Workflow(spec = "bogus.xml",
                                owner = "/CN=OU/DN=SomeoneWithPermissions",
                                name = "BogusRequestB", task = "BogusTask", owner_vogroup = "", owner_vorole = "")
        testWorkflow.create()

        testMergeWorkflow = Workflow(spec = "bogus.xml",
                                     owner = "/CN=OU/DN=SomeoneWithPermissions",
                                     name = "BogusRequestB", task = "BogusTask/Merge", owner_vogroup = "", owner_vorole = "")
        testMergeWorkflow.create()

        testWMBSFileset = Fileset(name = "TopFileset")
        testWMBSFileset.create()
        testWMBSFilesetUnmerged = Fileset(name = "UnmergedFileset")
        testWMBSFilesetUnmerged.create()

        testFileA = File(lfn = "/this/is/a/lfnA" , size = 1024, events = 10)
        testFileA.addRun(Run(10, *[12312]))
        testFileA.setLocation('malpaquet')

        testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10)
        testFileB.addRun(Run(10, *[12314]))
        testFileB.setLocation('malpaquet')

        testFileA.create()
        testFileB.create()

        testWMBSFileset.addFile(testFileA)
        testWMBSFilesetUnmerged.addFile(testFileB)
        testWMBSFileset.commit()
        testWMBSFilesetUnmerged.commit()

        testSubscription = Subscription(fileset = testWMBSFileset,
                                        workflow = testWorkflow)
        testSubscription.create()

        testSubscriptionMerge = Subscription(fileset = testWMBSFilesetUnmerged,
                                             workflow = testMergeWorkflow,
                                             type = "Merge")
        testSubscriptionMerge.create()

        return (testSubscription, testSubscriptionMerge)

    def testNormalModeSubscriptions(self):
        """
        _testNormalModeSubscriptions_

        Tests that we can make custodial/non-custodial subscriptions on
        normal operation mode, this time we don't need WMBS for anything.
        All is subscribed in one go.

        Check that the requests are correct.
        """

        self.stuffDatabase()
        config = self.createConfig()
        subscriber = PhEDExInjectorSubscriber(config)
        subscriber.setup({})
        subscriber.algorithm({})

        phedexInstance = subscriber.phedex
        subscriptions = phedexInstance.subRequests

        # Let's check /BogusPrimary/Run2012Z-PromptReco-v1/RECO
        # According to the spec, this should be custodial at T1_US_FNAL
        # Non-custodial at T1_UK_RAL and T3_CO_Uniandes
        # Autoapproved in all sites
        # Priority is normal
        self.assertTrue(self.testDatasetA in subscriptions, "Dataset A was not subscribed")
        subInfoA = subscriptions[self.testDatasetA]
        self.assertEqual(len(subInfoA), 3, "Dataset A was not subscribed to all sites")
        for subInfo in subInfoA:
            site = subInfo["node"]
            self.assertEqual(subInfo["priority"], "normal", "Wrong priority for subscription")
            if site == "T1_UK_RAL_MSS" or site == "T3_CO_Uniandes":
                self.assertEqual(subInfo["custodial"], "n", "Wrong custodiality for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["request_only"], "n", "Wrong requestOnly for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["move"], "n", "Wrong subscription type for dataset A at %s" % subInfo["node"])
            elif site == "T1_US_FNAL_MSS":
                self.assertEqual(subInfo["custodial"], "y", "Wrong custodiality for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["request_only"], "n", "Wrong requestOnly for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["move"], "y", "Wrong subscription type for dataset A at %s" % subInfo["node"])
            else:
                self.fail("Dataset A was subscribed  to a wrong site %s" % site)

        # Now check /BogusPrimary/CRUZET11-v1/RAW
        # According to the spec, this is not custodial anywhere
        # Non-custodial at T1_UK_RAL and T2_CH_CERN
        # Request only at both sites and with high priority
        self.assertTrue(self.testDatasetB in subscriptions, "Dataset B was not subscribed")
        subInfoB = subscriptions[self.testDatasetB]
        self.assertEqual(len(subInfoB), 2, "Dataset B was not subscribed to all sites")
        for subInfo in subInfoB:
            site = subInfo["node"]
            self.assertEqual(subInfo["priority"], "high", "Wrong priority for subscription")
            if site == "T1_UK_RAL_MSS" or site == "T2_CH_CERN":
                self.assertEqual(subInfo["custodial"], "n", "Wrong custodiality for dataset B at %s" % subInfo["node"])
                self.assertEqual(subInfo["request_only"], "y", "Wrong requestOnly for dataset B at %s" % subInfo["node"])
                self.assertEqual(subInfo["move"], "n", "Wrong subscription type for dataset B at %s" % subInfo["node"])
            else:
                self.fail("Dataset B was subscribed to a wrong site %s" % site)

        myThread = threading.currentThread()
        result = myThread.dbi.processData("SELECT COUNT(*) FROM dbsbuffer_dataset_subscription where subscribed = 1")[0].fetchall()
        self.assertEqual(result[0][0], 5, "Not all datasets were marked as subscribed")
        result = myThread.dbi.processData("SELECT site FROM dbsbuffer_dataset_subscription where subscribed = 0")[0].fetchall()
        self.assertEqual(result[0][0], "T1_IT_CNAF", "A non-valid CMS site was subscribed")

        # Reset and run again and make sure that no duplicate subscriptions are created
        myThread.dbi.processData("UPDATE dbsbuffer_dataset_subscription SET subscribed = 0")
        subscriber.algorithm({})
        self.assertEqual(len(subscriptions[self.testDatasetA]), 3)
        self.assertEqual(len(subscriptions[self.testDatasetB]), 2)

        return

    def testTier0ModeSubscriptions(self):
        """
        _testTier0ModeSubscriptions_

        Tests that we can make custodial/non-custodial subscriptions on
        tier0 operation mode, custodial moves are made on block level.
        """

        self.stuffDatabase(tier0Mode = True)
        self.stuffWMBS()
        config = self.createConfig(tier0Mode = True)
        subscriber = PhEDExInjectorSubscriber(config)
        subscriber.setup({})
        subscriber.algorithm({})

        phedexInstance = subscriber.phedex
        subscriptions = phedexInstance.subRequests

        # Let's check /BogusPrimary/Run2012Z-PromptReco-v1/RECO
        # According to the spec, this should be custodial at T0_CH_CERN and T1_US_FNAL
        # Non-custodial at T2_US_Vanderbilt
        # Autoapproved at CERN
        # Priority is normal
        self.assertTrue(self.testDatasetA in subscriptions, "Dataset A was not subscribed")
        subInfoA = subscriptions[self.testDatasetA]
        self.assertEqual(len(subInfoA), 4, "Dataset A was not subscribed to all sites")
        for subInfo in subInfoA:
            site = subInfo["node"]
            if subInfo['level'] == 'block':
                self.assertEqual(subInfo["custodial"], "y", "Wrong custodiality for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["request_only"], "n", "Wrong requestOnly for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["move"], "y", "Wrong subscription type for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["node"], "T0_CH_CERN_MSS", "Wrong node for dataset A, block subscription.")
                self.assertEqual(subInfo["priority"], "high", "Wrong priority for subscription")
            elif site == "T0_CH_CERN_MSS":
                self.assertEqual(subInfo["custodial"], "y", "Wrong custodiality for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["request_only"], "n", "Wrong requestOnly for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["move"], "n", "Wrong subscription type for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["level"], "dataset", "Wrong level for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["priority"], "normal", "Wrong priority for subscription")
            elif site == "T1_US_FNAL_MSS":
                self.assertEqual(subInfo["custodial"], "y", "Wrong custodiality for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["request_only"], "y", "Wrong requestOnly for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["move"], "n", "Wrong subscription type for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["level"], "dataset", "Wrong level for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["priority"], "normal", "Wrong priority for subscription")
            elif site == "T3_CO_Uniandes":
                self.assertEqual(subInfo["custodial"], "n", "Wrong custodiality for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["request_only"], "y", "Wrong requestOnly for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["move"], "n", "Wrong subscription type for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["level"], "dataset", "Wrong level for dataset A at %s" % subInfo["node"])
                self.assertEqual(subInfo["priority"], "normal", "Wrong priority for subscription")
            else:
                self.fail("Dataset A was subscribed  to a wrong site %s" % site)
        self.assertEqual(len([x for x in subInfoA if x['level'] == 'block']), 1)

        # Now check /BogusPrimary/CRUZET11-v1/RAW
        # According to the spec, custodial to T0_CH_CERN_MSS and T1_UK_RAL_MSS
        # Request only at both sites and with high priority
        self.assertTrue(self.testDatasetB in subscriptions)
        subInfoB = subscriptions[self.testDatasetB]
        self.assertEqual(len(subInfoB), 2, "Dataset B was not subscribed to all sites")
        for subInfo in subInfoB:
            site = subInfo["node"]
            if site == "T0_CH_CERN_MSS" or site == "T1_UK_RAL_MSS":
                self.assertEqual(subInfo["custodial"], "y", "Wrong custodiality for dataset B at %s" % subInfo["node"])
                self.assertEqual(subInfo["request_only"], "y", "Wrong requestOnly for dataset B at %s" % subInfo["node"])
                self.assertEqual(subInfo["move"], "n", "Wrong subscription type for dataset B at %s" % subInfo["node"])
                self.assertEqual(subInfo["level"], "dataset", "Wrong level for dataset B at %s" % subInfo["node"])
                self.assertEqual(subInfo["priority"], "high", "Wrong priority for subscription")
            else:
                self.fail("Dataset B was subscribed  to a wrong site %s" % site)

        myThread = threading.currentThread()
        result = myThread.dbi.processData("SELECT COUNT(*) FROM dbsbuffer_dataset_subscription where subscribed = 1")[0].fetchall()
        self.assertEqual(result[0][0], 5, "Custodial move datasets were marked as subscribed")

        # Delete the wmbs entries
        myThread.dbi.processData("DELETE FROM wmbs_workflow")
        subscriber.algorithm({})
        self.assertEqual(len(subscriptions[self.testDatasetA]), 5)
        self.assertTrue(self.testDatasetB in subscriptions)
        self.assertEqual(len(subscriptions[self.testDatasetB]), 3)
        self.assertEqual(len([x for x in subscriptions[self.testDatasetB] if x['level'] == 'block']), 1)

        return
示例#58
0
class RunJobTest(unittest.TestCase):
    """
    _RunJobTest_


    Test the RunJob object and accessors
    """
    def setUp(self):

        myThread = threading.currentThread()

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        #self.tearDown()
        self.testInit.setSchema(customModules=[
            "WMCore.WMBS", "WMCore.BossAir", "WMCore.ResourceControl",
            "WMCore.Agent.Database"
        ],
                                useDefault=False)

        self.daoFactory = DAOFactory(package="WMCore.BossAir",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)

        resourceControl = ResourceControl()
        resourceControl.insertSite(siteName='Xanadu',
                                   pnn='se.Xanadu',
                                   ceName='Xanadu',
                                   plugin="TestPlugin")
        resourceControl.insertThreshold(siteName = 'Xanadu', taskType = 'Processing', \
                                        maxSlots = 10000, pendingSlots = 10000)

        # Create user
        wmbsFactory = DAOFactory(package="WMCore.WMBS",
                                 logger=myThread.logger,
                                 dbinterface=myThread.dbi)
        newuser = wmbsFactory(classname="Users.New")
        newuser.execute(dn="mnorman",
                        group_name="phgroup",
                        role_name="cmsrole")

        if PY3:
            self.assertItemsEqual = self.assertCountEqual

    def tearDown(self):
        """
        Database deletion
        """
        self.testInit.clearDatabase(modules=[
            "WMCore.WMBS", "WMCore.BossAir", "WMCore.ResourceControl",
            "WMCore.Agent.Database"
        ])

        self.testInit.delWorkDir()

        return

    def createJobs(self, nJobs):
        """
        Creates a series of jobGroups for submissions

        """

        testWorkflow = Workflow(spec="dummy",
                                owner="mnorman",
                                name="dummy",
                                task="basicWorkload/Production")
        testWorkflow.create()

        # Create Fileset, Subscription, jobGroup
        testFileset = Fileset(name="dummy")
        testFileset.create()
        testSubscription = Subscription(fileset=testFileset,
                                        workflow=testWorkflow,
                                        type="Processing",
                                        split_algo="FileBased")
        testSubscription.create()

        testJobGroup = JobGroup(subscription=testSubscription)
        testJobGroup.create()

        # Create jobs
        for id in range(nJobs):
            testJob = Job(name='Job_%i' % (id))
            testJob['owner'] = "mnorman"
            testJob['location'] = 'Xanadu'
            testJob.create(testJobGroup)
            testJobGroup.add(testJob)

        testFileset.commit()
        testJobGroup.commit()

        return testJobGroup

    def testA_BulkDAOs(self):
        """
        _BulkDAOs_

        Test the bulk DAO options, which is the only thing we should be using.
        """

        myThread = threading.currentThread()

        jobGroup = self.createJobs(nJobs=10)

        runJobs = []
        for job in jobGroup.jobs:
            runJob = RunJob(jobid=job.exists())
            runJob['status'] = 'New'
            runJob['userdn'] = job['owner']
            runJob['usergroup'] = 'phgroup'
            runJob['userrole'] = 'cmsrole'
            runJobs.append(runJob)

        statusDAO = self.daoFactory(classname="NewState")
        statusDAO.execute(states=['New', 'Gone', 'Dead'])

        result = myThread.dbi.processData(
            "SELECT name FROM bl_status")[0].fetchall()
        self.assertItemsEqual(result, [('Dead', ), ('Gone', ), ('New', )])

        newJobDAO = self.daoFactory(classname="NewJobs")
        newJobDAO.execute(jobs=runJobs)

        result = myThread.dbi.processData(
            "SELECT wmbs_id FROM bl_runjob")[0].fetchall()
        self.assertEqual(result, [(1, ), (2, ), (3, ), (4, ), (5, ), (6, ),
                                  (7, ), (8, ), (9, ), (10, )])

        loadJobsDAO = self.daoFactory(classname="LoadByStatus")
        loadJobs = loadJobsDAO.execute(status="New")
        self.assertEqual(len(loadJobs), 10)

        idList = [x['id'] for x in loadJobs]

        for job in loadJobs:
            job['bulkid'] = 1001

        updateDAO = self.daoFactory(classname="UpdateJobs")
        updateDAO.execute(jobs=loadJobs)

        loadJobs = loadJobsDAO.execute(status='New')
        self.assertEqual(len(loadJobs), 10)
        for job in loadJobs:
            self.assertEqual(job['bulkid'], '1001')

        loadWMBSDAO = self.daoFactory(classname="LoadByWMBSID")
        for job in jobGroup.jobs:
            jDict = loadWMBSDAO.execute(jobs=[job])
            self.assertEqual(job['id'], jDict[0]['jobid'])

        setStatusDAO = self.daoFactory(classname="SetStatus")
        setStatusDAO.execute(jobs=idList, status='Dead')

        result = loadJobsDAO.execute(status='Dead')
        self.assertEqual(len(result), 10)
        result = loadJobsDAO.execute(status='New')
        self.assertEqual(len(result), 0)

        runningJobDAO = self.daoFactory(classname="LoadRunning")
        runningJobs = runningJobDAO.execute()

        self.assertEqual(len(runningJobs), 10)

        completeDAO = self.daoFactory(classname="CompleteJob")
        completeDAO.execute(jobs=idList)

        result = loadJobsDAO.execute(status='Dead', complete='0')
        self.assertEqual(len(result), 10)

        deleteDAO = self.daoFactory(classname="DeleteJobs")
        deleteDAO.execute(jobs=idList)

        result = loadJobsDAO.execute(status='Dead')
        self.assertEqual(len(result), 0)

        return

    def testB_CheckWMBSBuild(self):
        """
        _CheckWMBSBuild_

        Trivial test that checks whether we can build
        runJobs from WMBS jobs
        """

        jobGroup = self.createJobs(nJobs=10)

        for job in jobGroup.jobs:
            rj = RunJob()
            rj.buildFromJob(job=job)
            self.assertEqual(job['id'], rj['jobid'])
            self.assertEqual(job['retry_count'], rj['retry_count'])
            job2 = rj.buildWMBSJob()
            self.assertEqual(job['id'], job2['id'])
            self.assertEqual(job['retry_count'], job2['retry_count'])

        return

    def testC_CheckWMBSBuildRoleAndGroup(self):
        """
        _CheckWMBSBuild_

        Trivial test that checks whether we can build
        runJobs from WMBS jobs
        """
        jobGroup = []

        # Create jobs
        for id in range(10):
            testJob = Job(name='Job_%i' % (id))
            testJob['owner'] = "mnorman"
            testJob['usergroup'] = "mygroup_%i" % id
            testJob['userrole'] = "myrole_%i" % id
            testJob['location'] = 'Xanadu'
            jobGroup.append(testJob)

        for job in jobGroup:
            rj = RunJob()
            rj.buildFromJob(job=job)
            self.assertEqual(job['usergroup'], rj['usergroup'])
            self.assertEqual(job['userrole'], rj['userrole'])
            job2 = rj.buildWMBSJob()
            self.assertEqual(job['usergroup'], job2['usergroup'])
            self.assertEqual(job['userrole'], job2['userrole'])
        return
示例#59
0
class SizeBasedTest(unittest.TestCase):
    """
    _SizeBasedTest_

    Test size based job splitting.
    """
    
    def setUp(self):
        """
        _setUp_

        Create two subscriptions: One that contains a single file and one that
        contains multiple files.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS"],
                                useDefault = False)
        
        myThread = threading.currentThread()
        daofactory = DAOFactory(package = "WMCore.WMBS",
                                logger = myThread.logger,
                                dbinterface = myThread.dbi)
        
        locationAction = daofactory(classname = "Locations.New")
        locationAction.execute(siteName = "site1", seName = "somese.cern.ch")
        locationAction.execute(siteName = "site2", seName = "otherse.cern.ch")
        
        self.multipleFileFileset = Fileset(name = "TestFileset1")
        self.multipleFileFileset.create()
        for i in range(10):
            newFile = File(makeUUID(), size = 1000, events = 100,
                           locations = set(["somese.cern.ch"]))
            newFile.create()
            self.multipleFileFileset.addFile(newFile)
        self.multipleFileFileset.commit()

        self.singleFileFileset = Fileset(name = "TestFileset2")
        self.singleFileFileset.create()
        newFile = File("/some/file/name", size = 1000, events = 100,
                       locations = set(["somese.cern.ch"]))
        newFile.create()
        self.singleFileFileset.addFile(newFile)
        self.singleFileFileset.commit()


        self.multipleSiteFileset = Fileset(name = "TestFileset3")
        self.multipleSiteFileset.create()
        for i in range(5):
            newFile = File(makeUUID(), size = 1000, events = 100)
            newFile.setLocation("somese.cern.ch")
            newFile.create()
            self.multipleSiteFileset.addFile(newFile)
        for i in range(5):
            newFile = File(makeUUID(), size = 1000, events = 100)
            newFile.setLocation(["somese.cern.ch","otherse.cern.ch"])
            newFile.create()
            self.multipleSiteFileset.addFile(newFile)
        self.multipleSiteFileset.commit()

        testWorkflow = Workflow(spec = "spec.xml", owner = "Steve",
                                name = "wf001", task="Test")
        testWorkflow.create()
        self.multipleFileSubscription = Subscription(fileset = self.multipleFileFileset,
                                                     workflow = testWorkflow,
                                                     split_algo = "SizeBased",
                                                     type = "Processing")
        self.multipleFileSubscription.create()
        self.singleFileSubscription = Subscription(fileset = self.singleFileFileset,
                                                   workflow = testWorkflow,
                                                   split_algo = "SizeBased",
                                                   type = "Processing")
        self.singleFileSubscription.create()
        self.multipleSiteSubscription = Subscription(fileset = self.multipleSiteFileset,
                                                     workflow = testWorkflow,
                                                     split_algo = "SizeBased",
                                                     type = "Processing")
        self.multipleSiteSubscription.create()
        return

    def tearDown(self):
        """
        _tearDown_

        Clear out WMBS.
        """
        myThread = threading.currentThread()

        if myThread.transaction == None:
            myThread.transaction = Transaction(self.dbi)
            
        myThread.transaction.begin()
            
        factory = WMFactory("WMBS", "WMCore.WMBS")
        destroy = factory.loadObject(myThread.dialect + ".Destroy")
        destroyworked = destroy.execute(conn = myThread.transaction.conn)
        
        if not destroyworked:
            raise Exception("Could not complete WMBS tear down.")
            
        myThread.transaction.commit()
        return    

    def testExactEvents(self):
        """
        _testExactEvents_

        Test event based job splitting when the number of events per job is
        exactly the same as the number of events in the input file.
        """

        splitter = SplitterFactory()
        jobFactory = splitter(self.singleFileSubscription)

        jobGroups = jobFactory(size_per_job = 1000)

        assert len(jobGroups) == 1, \
               "ERROR: JobFactory didn't return one JobGroup."

        assert len(jobGroups[0].jobs) == 1, \
               "ERROR: JobFactory didn't create a single job."

        job = jobGroups[0].jobs.pop()

        assert job.getFiles(type = "lfn") == ["/some/file/name"], \
               "ERROR: Job contains unknown files."
        

        return


    def testMultipleFiles(self):
        """
        _testMultipleFiles_
        
        Tests the mechanism for splitting up multiple files into jobs with
        a variety of different arguments.
        """

        splitter   = SplitterFactory()
        jobFactory = splitter(self.multipleFileSubscription)

        jobGroups  = jobFactory(size_per_job = 1000)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 10)
        for job in jobGroups[0].jobs:
            self.assertEqual(len(job.getFiles()), 1)

        return


    def testMultipleFiles2000(self):
        """
        _testMultipleFiles2000_
        
        Tests the mechanism for splitting up multiple files into jobs with
        a variety of different arguments.
        """

        splitter   = SplitterFactory()
        jobFactory = splitter(self.multipleFileSubscription)
        #Test it with two files per job
        jobGroups  = jobFactory(size_per_job = 2000)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 5)
        for job in jobGroups[0].jobs:
            self.assertEqual(len(job.getFiles()), 2)

        return


    def testMultipleFiles2500(self):
        """
        _testMultipleFiles2500_
        
        Tests the mechanism for splitting up multiple files into jobs with
        a variety of different arguments.
        """

        splitter   = SplitterFactory()
        jobFactory = splitter(self.multipleFileSubscription)


        #Now test it with a size that can't be broken up evenly
        jobGroups  = jobFactory(size_per_job = 2500)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 5)
        for job in jobGroups[0].jobs:
            self.assertEqual(len(job.getFiles()), 2)

        return


    def testMultipleFiles500(self):
        """
        _testMultipleFiles500_
        
        Tests the mechanism for splitting up multiple files into jobs with
        a variety of different arguments.
        """

        splitter   = SplitterFactory()
        jobFactory = splitter(self.multipleFileSubscription)


        #Test it with something too small to handle; should return one job per file, plus one extra
        #open at the end
        jobGroups  = jobFactory(size_per_job = 500)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 10)

        return


    def testMultipleSites(self):
        """
        _testMultipleSites_

        Tests how to break up files at different locations
        """

        splitter   = SplitterFactory()
        jobFactory = splitter(self.multipleSiteSubscription)

        jobGroups  = jobFactory(size_per_job = 1000)

        self.assertEqual(len(jobGroups), 2)
        self.assertEqual(len(jobGroups[0].jobs), 5)
        for job in jobGroups[0].jobs:
            self.assertEqual(len(job.getFiles()), 1)
示例#60
0
class PhEDExInjectorPollerTest(unittest.TestCase):
    """
    _PhEDExInjectorPollerTest_

    Unit tests for the PhEDExInjector.  Create some database inside DBSBuffer
    and then have the PhEDExInjector upload the data to PhEDEx.  Pull the data
    back down and verify that everything is complete.
    """
    def setUp(self):
        """
        _setUp_

        Install the DBSBuffer schema into the database and connect to PhEDEx.
        """
        self.phedexURL = "https://cmsweb.cern.ch/phedex/datasvc/json/test"
        self.dbsURL = "http://vocms09.cern.ch:8880/cms_dbs_int_local_yy_writer/servlet/DBSServlet"

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection(destroyAllDatabase=True)

        self.testInit.setSchema(customModules=["WMComponent.DBS3Buffer"],
                                useDefault=False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMComponent.DBSBuffer.Database",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)

        locationAction = daofactory(classname="DBSBufferFiles.AddLocation")
        locationAction.execute(siteName="srm-cms.cern.ch")
        locationAction.execute(siteName="se.fnal.gov")

        self.testFilesA = []
        self.testFilesB = []
        self.testDatasetA = "/%s/PromptReco-v1/RECO" % makeUUID()
        self.testDatasetB = "/%s/CRUZET11-v1/RAW" % makeUUID()
        self.phedex = PhEDEx({"endpoint": self.phedexURL}, "json")

        return

    def tearDown(self):
        """
        _tearDown_

        Delete the database.
        """
        self.testInit.clearDatabase()

    def stuffDatabase(self, spec="TestWorkload.pkl"):
        """
        _stuffDatabase_

        Fill the dbsbuffer with some files and blocks.  We'll insert a total
        of 5 files spanning two blocks.  There will be a total of two datasets
        inserted into the datbase.

        We'll inject files with the location set as an SE name as well as a
        PhEDEx node name as well.
        """
        myThread = threading.currentThread()

        buffer3Factory = DAOFactory(package="WMComponent.DBS3Buffer",
                                    logger=myThread.logger,
                                    dbinterface=myThread.dbi)
        insertWorkflow = buffer3Factory(classname="InsertWorkflow")
        insertWorkflow.execute("BogusRequest", "BogusTask", 0, 0, 0, 0)

        checksums = {"adler32": "1234", "cksum": "5678"}
        testFileA = DBSBufferFile(lfn=makeUUID(),
                                  size=1024,
                                  events=10,
                                  checksums=checksums,
                                  locations=set(["srm-cms.cern.ch"]))
        testFileA.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileA.setDatasetPath(self.testDatasetA)
        testFileA.addRun(Run(2, *[45]))
        testFileA.create()

        testFileB = DBSBufferFile(lfn=makeUUID(),
                                  size=1024,
                                  events=10,
                                  checksums=checksums,
                                  locations=set(["srm-cms.cern.ch"]))
        testFileB.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileB.setDatasetPath(self.testDatasetA)
        testFileB.addRun(Run(2, *[45]))
        testFileB.create()

        testFileC = DBSBufferFile(lfn=makeUUID(),
                                  size=1024,
                                  events=10,
                                  checksums=checksums,
                                  locations=set(["srm-cms.cern.ch"]))
        testFileC.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileC.setDatasetPath(self.testDatasetA)
        testFileC.addRun(Run(2, *[45]))
        testFileC.create()

        self.testFilesA.append(testFileA)
        self.testFilesA.append(testFileB)
        self.testFilesA.append(testFileC)

        testFileD = DBSBufferFile(lfn=makeUUID(),
                                  size=1024,
                                  events=10,
                                  checksums=checksums,
                                  locations=set(["srm-cms.cern.ch"]))
        testFileD.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileD.setDatasetPath(self.testDatasetB)
        testFileD.addRun(Run(2, *[45]))
        testFileD.create()

        testFileE = DBSBufferFile(lfn=makeUUID(),
                                  size=1024,
                                  events=10,
                                  checksums=checksums,
                                  locations=set(["srm-cms.cern.ch"]))
        testFileE.setAlgorithm(appName="cmsRun",
                               appVer="CMSSW_2_1_8",
                               appFam="RECO",
                               psetHash="GIBBERISH",
                               configContent="MOREGIBBERISH")
        testFileE.setDatasetPath(self.testDatasetB)
        testFileE.addRun(Run(2, *[45]))
        testFileE.create()

        self.testFilesB.append(testFileD)
        self.testFilesB.append(testFileE)

        uploadFactory = DAOFactory(package="WMComponent.DBS3Buffer",
                                   logger=myThread.logger,
                                   dbinterface=myThread.dbi)
        datasetAction = uploadFactory(classname="NewDataset")
        createAction = uploadFactory(classname="CreateBlocks")

        datasetAction.execute(datasetPath=self.testDatasetA)
        datasetAction.execute(datasetPath=self.testDatasetB)

        self.blockAName = self.testDatasetA + "#" + makeUUID()
        self.blockBName = self.testDatasetB + "#" + makeUUID()

        newBlockA = DBSBlock(name=self.blockAName,
                             location="srm-cms.cern.ch",
                             das=None,
                             workflow=None)
        newBlockA.setDataset(self.testDatasetA, 'data', 'VALID')
        newBlockA.status = 'Closed'

        newBlockB = DBSBlock(name=self.blockBName,
                             location="srm-cms.cern.ch",
                             das=None,
                             workflow=None)
        newBlockB.setDataset(self.testDatasetB, 'data', 'VALID')
        newBlockB.status = 'Closed'

        createAction.execute(blocks=[newBlockA, newBlockB])

        bufferFactory = DAOFactory(package="WMComponent.DBSBuffer.Database",
                                   logger=myThread.logger,
                                   dbinterface=myThread.dbi)

        setBlock = bufferFactory(classname="DBSBufferFiles.SetBlock")
        setBlock.execute(testFileA["lfn"], self.blockAName)
        setBlock.execute(testFileB["lfn"], self.blockAName)
        setBlock.execute(testFileC["lfn"], self.blockAName)
        setBlock.execute(testFileD["lfn"], self.blockBName)
        setBlock.execute(testFileE["lfn"], self.blockBName)

        fileStatus = bufferFactory(classname="DBSBufferFiles.SetStatus")
        fileStatus.execute(testFileA["lfn"], "LOCAL")
        fileStatus.execute(testFileB["lfn"], "LOCAL")
        fileStatus.execute(testFileC["lfn"], "LOCAL")
        fileStatus.execute(testFileD["lfn"], "LOCAL")
        fileStatus.execute(testFileE["lfn"], "LOCAL")

        associateWorkflow = buffer3Factory(
            classname="DBSBufferFiles.AssociateWorkflowToFile")
        associateWorkflow.execute(testFileA["lfn"], "BogusRequest",
                                  "BogusTask")
        associateWorkflow.execute(testFileB["lfn"], "BogusRequest",
                                  "BogusTask")
        associateWorkflow.execute(testFileC["lfn"], "BogusRequest",
                                  "BogusTask")
        associateWorkflow.execute(testFileD["lfn"], "BogusRequest",
                                  "BogusTask")
        associateWorkflow.execute(testFileE["lfn"], "BogusRequest",
                                  "BogusTask")

        return

    def createConfig(self):
        """
        _createConfig_

        Create a config for the PhEDExInjector with paths to the test DBS and
        PhEDEx instances.
        """
        config = self.testInit.getConfiguration()
        config.component_("DBSInterface")
        config.DBSInterface.globalDBSUrl = self.dbsURL

        config.component_("PhEDExInjector")
        config.PhEDExInjector.phedexurl = self.phedexURL
        config.PhEDExInjector.subscribeMSS = True
        config.PhEDExInjector.group = "Saturn"
        config.PhEDExInjector.pollInterval = 30
        config.PhEDExInjector.subscribeInterval = 60

        return config

    def retrieveReplicaInfoForBlock(self, blockName):
        """
        _retrieveReplicaInfoForBlock_

        Retrieve the replica information for a block.  It takes several minutes
        after a block is injected for the statistics to be calculated, so this
        will block until that information is available.
        """
        attempts = 0

        while attempts < 15:
            result = self.phedex.getReplicaInfoForFiles(block=blockName)

            if "phedex" in result:
                if "block" in result["phedex"]:
                    if len(result["phedex"]["block"]) != 0:
                        return result["phedex"]["block"][0]

            attempts += 1
            time.sleep(20)

        logging.info("Could not retrieve replica info for block: %s" %
                     blockName)
        return None

    @attr("integration")
    def testPoller(self):
        """
        _testPoller_

        Stuff the database and have the poller upload files to PhEDEx.  Retrieve
        replica information for the uploaded blocks and verify that all files
        have been injected.
        """
        return
        self.stuffDatabase()

        poller = PhEDExInjectorPoller(self.createConfig())
        poller.setup(parameters=None)
        poller.algorithm(parameters=None)

        replicaInfo = self.retrieveReplicaInfoForBlock(self.blockAName)
        goldenLFNs = []
        for file in self.testFilesA:
            goldenLFNs.append(file["lfn"])

        for replicaFile in replicaInfo["file"]:
            assert replicaFile["name"] in goldenLFNs, \
                   "Error: Extra file in replica block: %s" % replicaFile["name"]
            goldenLFNs.remove(replicaFile["name"])

        assert len(goldenLFNs) == 0, \
               "Error: Files missing from PhEDEx replica: %s" % goldenLFNs

        replicaInfo = self.retrieveReplicaInfoForBlock(self.blockBName)
        goldenLFNs = []
        for file in self.testFilesB:
            goldenLFNs.append(file["lfn"])

        for replicaFile in replicaInfo["file"]:
            assert replicaFile["name"] in goldenLFNs, \
                   "Error: Extra file in replica block: %s" % replicaFile["name"]
            goldenLFNs.remove(replicaFile["name"])

        assert len(goldenLFNs) == 0, \
               "Error: Files missing from PhEDEx replica: %s" % goldenLFNs

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMComponent.DBSUpload.Database",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)
        setBlock = daofactory(classname="SetBlockStatus")
        setBlock.execute(self.blockAName,
                         locations=None,
                         open_status="InGlobalDBS")

        poller.algorithm(parameters=None)
        replicaInfo = self.retrieveReplicaInfoForBlock(self.blockAName)
        assert replicaInfo["is_open"] == "n", \
               "Error: block should be closed."

        replicaInfo = self.retrieveReplicaInfoForBlock(self.blockBName)
        assert replicaInfo["is_open"] == "y", \
               "Error: block should be open."
        return

    def test_CustodialSiteA(self):
        """
        _CustodialSiteA_

        Check the custodialSite stuff by DAO, since I don't have a cert
        First make sure we properly handle having no custodialSite
        """

        self.stuffDatabase()

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMComponent.PhEDExInjector.Database",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)
        getUninjected = daofactory(classname="GetUninjectedFiles")

        uninjectedFiles = getUninjected.execute()
        self.assertEqual(uninjectedFiles.keys(), ['srm-cms.cern.ch'])

        return