Beispiel #1
0
class CouchTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel = logging.DEBUG)
        self.testDir = self.testInit.generateWorkDir()
        self.config = getConfig(self.testDir)
        # mock generator instance to communicate some configuration values
        self.generator = utils.AlertGeneratorMock(self.config)        
        self.testProcesses = []
        self.testName = self.id().split('.')[-1]
         
        
    def tearDown(self):       
        self.testInit.delWorkDir()
        self.generator = None
        utils.terminateProcesses(self.testProcesses)


    def testAlertGeneratorCouchDbSizePollerBasic(self):
        config = getConfig("/tmp")
        try:
            poller = CouchDbSizePoller(config.AlertGenerator.couchDbSizePoller, self.generator)
        except Exception, ex:
            self.fail("%s: exception: %s" % (self.testName, ex))
        poller.check() # -> on real system dir may result in permission denied
        poller._dbDirectory = "/dev"
        poller.check() # -> OK
        
        # test failing during set up
        poller = CouchDbSizePoller(config.AlertGenerator.couchDbSizePoller, self.generator)
        poller._query = "nonsense query"
        poller._dbDirectory = poller._getDbDir()
        poller.check()
        self.assertEquals(poller._dbDirectory, None)        
Beispiel #2
0
 def __init__(self, testClassName, dropExistingDb=True):
     TestInit.__init__(self, testClassName)
     self.databases = []
     self.couch = None
     # for experiments, after tests run, it's useful to have CouchDB
     # populated with the testing data - having tearDownCouch commented
     # out, this flag prevents from re-initializing the database
     self.dropExistingDb = dropExistingDb
Beispiel #3
0
class MockPluginTest(unittest.TestCase):
    def setUp(self):
        self.testinit = TestInit(__file__)
        self.workdir = self.testinit.generateWorkDir()
        jobList[0]['cache_dir'] = self.workdir

    def tearDown(self):
        self.testinit.delWorkDir()

    def testInit(self):
        wrongconfig = Configuration()
        wrongconfig.section_('BossAir')
        self.assertRaises( BossAirPluginException, MockPlugin, wrongconfig )

        wrongconfig.BossAir.section_('MockPlugin')
        self.assertRaises( BossAirPluginException, MockPlugin, wrongconfig )
        #The config does not contain fakeReport parameter
        self.assertRaises( BossAirPluginException, MockPlugin, wrongconfig )

        #The fakeReport does not exist
        wrongconfig.BossAir.MockPlugin.fakeReport = 'asdf'
        self.assertRaises( BossAirPluginException, MockPlugin, wrongconfig )

    def testTrack(self):
        mp = MockPlugin(config)

        #Check that the job has been scheduled
        self.assertEquals({}, mp.jobsScheduledEnd)

        # Don't be racy
        currentTime = datetime.now()
        #id is the only required parameter in the job dictionary
        res = mp.track( jobList, currentTime )
        self.assertTrue( mp.jobsScheduledEnd.has_key(1L) )
        #check scheduled end (N.B. this includes 20% of random time)
        scheduledEnd = mp.jobsScheduledEnd[1L]
        timeTillJob = scheduledEnd - currentTime
        self.assertTrue( timeTillJob >= timedelta(minutes = TEST_JOB_LEN - 1), \
                         "Time till Job %s !>= Delta %s" % (timeTillJob, \
                         timedelta(minutes = TEST_JOB_LEN - 1)))
        self.assertTrue( timeTillJob <= timedelta(minutes = TEST_JOB_LEN*120/100 + 1), \
                         "Time till Job %s !<= Delta %s" % (timeTillJob, \
                         timedelta(minutes = TEST_JOB_LEN * 120/100 + 1)) )
        #the job is running
        self.assertEquals( 'Running', res[0][0]['status'])
        self.assertEquals( 'Running', res[1][0]['status'])
        self.assertEquals( [], res[2])

        #the job is not running anymore
        mp.jobsScheduledEnd[1L] = datetime(1900,1,1)
        res = mp.track( jobList )
        self.assertEquals( [], res[0])
        self.assertEquals( 'Done', res[1][0]['status'])
        self.assertEquals( 'Done', res[2][0]['status'])

        del mp
Beispiel #4
0
class SetupCMSSWPsetTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testDir = self.testInit.generateWorkDir()
        sys.path.insert(0, os.path.join(WMCore.WMBase.getTestBase(),
                                        "WMCore_t/WMRuntime_t/Scripts_t"))

    def tearDown(self):
        sys.path.remove(os.path.join(WMCore.WMBase.getTestBase(),
                                     "WMCore_t/WMRuntime_t/Scripts_t"))
        del sys.modules["WMTaskSpace"]
        self.testInit.delWorkDir()

    def createTestStep(self):
        """
        _createTestStep_

        Create a test step that can be passed to the setup script.

        """
        newStep = WMStep("cmsRun1")
        newStepHelper = CMSSWStepHelper(newStep)
        newStepHelper.setStepType("CMSSW")
        newStepHelper.setGlobalTag("SomeGlobalTag")
        stepTemplate = StepFactory.getStepTemplate("CMSSW")
        stepTemplate(newStep)
        newStep.application.command.configuration = "PSet.py"
        newStep.application.multicore.numberOfCores = "auto"
        return newStepHelper

    def loadProcessFromPSet(self):
        """
        _loadProcessFromPSet_

        This requires changing the working directory,
        do so in a safe manner to encapsulate the change to this method only
        """

        currentPath = os.getcwd()
        loadedProcess = None
        try:
            if not os.path.isdir(self.testDir):
                raise
            os.chdir(self.testDir)
            testFile = "PSet.py"
            pset = imp.load_source('process', testFile)
            loadedProcess = pset.process
        except Exception, ex:
            self.fail("An exception was caught while trying to load the PSet, %s" % str(ex))
        finally:
Beispiel #5
0
 def setUp(self):
     """set up"""
     self.testInit = TestInit(__file__)
     self.testDir  = self.testInit.generateWorkDir()
     self.normalSave = "%s/WMCore_Agent_Configuration_t_normal.py" % self.testDir
     self.docSave = "%s/WMCore_Agent_Configuration_t_documented.py" % self.testDir
     self.commentSave = "%s/WMCore_Agent_Configuration_t_commented.py" % self.testDir
Beispiel #6
0
    def setUp(self):
        # stolen from CMSSWExecutor_t. thanks, dave
        self.testInit = TestInit(__file__)
        self.testDir = self.testInit.generateWorkDir()
        shutil.copyfile('/etc/hosts', os.path.join(self.testDir, 'testfile'))

        self.workload = newWorkload("UnitTests")
        self.task = self.workload.newTask("DeleterTask")
        stepHelper = step = self.task.makeStep("DeleteTest")
        self.step = stepHelper.data
        self.actualStep = stepHelper
        template = DeleteTemplate()
        template(self.step)
        self.helper = template.helper(self.step)
        self.executor = StepFactory.getStepExecutor(self.actualStep.stepType())

        taskMaker = TaskMaker(self.workload, self.testDir)
        taskMaker.skipSubscription = True
        taskMaker.processWorkload()

        self.sandboxDir = "%s/UnitTests" % self.testDir

        self.task.build(self.testDir)
        sys.path.insert(0, self.testDir)
        sys.path.insert(0, self.sandboxDir)


        self.job = Job(name = "/UnitTest/DeleterTask/DeleteTest-test-job")

        binDir = inspect.getsourcefile(ModuleLocator)
        binDir = binDir.replace("__init__.py", "bin")

        if not binDir in os.environ['PATH']:
            os.environ['PATH'] = "%s:%s" % (os.environ['PATH'], binDir)
Beispiel #7
0
    def setUp(self):

        myThread = threading.currentThread()

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        #self.tearDown()
        self.testInit.setSchema(customModules = ["WMCore.WMBS", "WMCore.BossAir", "WMCore.ResourceControl", "WMCore.Agent.Database"],
                                useDefault = False)

        self.daoFactory = DAOFactory(package = "WMCore.BossAir",
                                     logger = myThread.logger,
                                     dbinterface = myThread.dbi)

        resourceControl = ResourceControl()
        resourceControl.insertSite(siteName = 'Xanadu', seName = 'se.Xanadu',
                                   ceName = 'Xanadu', plugin = "TestPlugin")
        resourceControl.insertThreshold(siteName = 'Xanadu', taskType = 'Processing', \
                                        maxSlots = 10000, pendingSlots = 10000)

        # Create user
        wmbsFactory = DAOFactory(package = "WMCore.WMBS",
                                 logger = myThread.logger,
                                 dbinterface = myThread.dbi)
        newuser = wmbsFactory(classname = "Users.New")
        newuser.execute(dn = "mnorman", group_name = "phgroup", role_name = "cmsrole")
    def setUp(self):
        "make a logger instance and create tables"

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema()

        myThread = threading.currentThread()
        if myThread.dialect == "MySQL":
            myThread.create = """
create table test (bind1 varchar(20), bind2 varchar(20)) ENGINE=InnoDB """
        if myThread.dialect == "SQLite":
            myThread.create = """
                create table test (bind1 varchar(20), bind2 varchar(20))"""

        myThread.insert = """
insert into test (bind1, bind2) values (:bind1, :bind2) """
        myThread.insert_binds = [
            {"bind1": "value1a", "bind2": "value2a"},
            {"bind1": "value1b", "bind2": "value2b"},
            {"bind1": "value1c", "bind2": "value2d"},
        ]
        myThread.select = "select * from test"

        myThread = threading.currentThread()
        myThread.transaction = Transaction(myThread.dbi)
        myThread.transaction.processData(myThread.create)
        myThread.transaction.processData(myThread.insert, myThread.insert_binds)
        myThread.transaction.commit()

        return
Beispiel #9
0
 def setUp(self):
     self.testInit = TestInit(__file__)
     self.testInit.setLogging(logLevel = logging.DEBUG)
     self.testDir = self.testInit.generateWorkDir()
     self.config = getConfig(self.testDir)
     # mock generator instance to communicate some configuration values
     self.generator = utils.AlertGeneratorMock(self.config)
Beispiel #10
0
    def setUp(self):
        """
        _setUp_

        Create two subscriptions: One that contains a single file and one that
        contains multiple files.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS"],
                                useDefault = False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package = "WMCore.WMBS",
                                logger = myThread.logger,
                                dbinterface = myThread.dbi)

        self.nSites = 2
        locationAction = daofactory(classname = "Locations.New")
        for site in range(self.nSites):
            locationAction.execute(siteName = "site%i" % site,
                                   pnn = "T2_CH_CERN_%i" % site)

        return
    def setUp(self):
        """
        _setUp_

        Create two subscriptions: One that contains a single file and one that
        contains multiple files.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection(destroyAllDatabase=True)
        self.testInit.setSchema(customModules=["WMCore.WMBS"])

        self.splitterFactory = SplitterFactory(package="WMCore.JobSplitting")

        self.myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMCore.WMBS",
                                     logger=logging,
                                     dbinterface=self.myThread.dbi)

        myResourceControl = ResourceControl()
        myResourceControl.insertSite("T1_US_FNAL", 1000, 2000, "T1_US_FNAL_Disk", "T1_US_FNAL")
        myResourceControl.insertSite("T2_CH_CERN", 1000, 2000, "T2_CH_CERN", "T2_CH_CERN")

        self.performanceParams = {'timePerEvent': 12,
                                  'memoryRequirement': 2300,
                                  'sizePerEvent': 400}
        # dummy workflow
        self.testWorkflow = Workflow(spec="spec.xml", owner="dmwm", name="testWorkflow", task="Test")
        self.testWorkflow.create()

        return
Beispiel #12
0
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testDir = self.testInit.generateWorkDir()

        # shut up SiteLocalConfig
        os.environ['CMS_PATH'] = os.getcwd()
        workload = copy.deepcopy(testWorkloads.workload)
        task = workload.getTask("Production")
        step = task.getStep("stageOut1")
        # want to get the cmsstep so I can make the Report
        cmsstep = task.getStep('cmsRun1')
        self.cmsstepdir = os.path.join( self.testDir, 'cmsRun1')
        os.mkdir( self.cmsstepdir )
        open( os.path.join( self.cmsstepdir, '__init__.py'),'w').close()
        open( os.path.join( self.cmsstepdir, 'Report.pkl'),'w').close()

        cmsbuilder = CMSSWBuilder.CMSSW()
        cmsbuilder( cmsstep.data, 'Production', self.cmsstepdir )
        realstep = StageOutTemplate.StageOutStepHelper(step.data)
        realstep.disableRetries()
        self.realstep = realstep
        self.stepDir = os.path.join( self.testDir, 'stepdir')
        os.mkdir( self.stepDir )
        builder = StageOutBuilder.StageOut()
        builder( step.data, 'Production', self.stepDir)
Beispiel #13
0
    def setUp(self):
        """
        Set up for initializing the ResultSet test class

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        #self.testInit.setSchema(customModules = ["WMCore.WMBS"],
        #                           useDefault = False)

        self.mydialect = self.testInit.getConfiguration().CoreDatabase.dialect
        
        self.myThread = threading.currentThread()
        
        if self.mydialect.lower() == 'mysql':
            create_sql = "create table test (bind1 varchar(20), bind2 varchar(20)) ENGINE=InnoDB "
        elif self.mydialect.lower() == 'sqlite':
            create_sql = "create table test (bind1 varchar(20), bind2 varchar(20))"
        else:
            create_sql = "create table test (bind1 varchar(20), bind2 varchar(20))"
        
        #Create a table and insert several pieces
        self.myThread.dbi.processData(create_sql)
        return
Beispiel #14
0
 def setUp(self):
     self.testInit = TestInit(__file__)
     self.testInit.setLogging(logLevel = logging.DEBUG)
     self.addr = "tcp://127.0.0.1:5557"
     self.ctrl = "tcp://127.0.0.1:5559"
     # simple printer Alert messages handler
     self.printer = lambda x : sys.stdout.write("printer handler: '%s'\n" % str(x))
    def setUp(self):
        """
        _setUp_

        Install the DBSBuffer schema into the database and connect to PhEDEx.
        """
        self.phedexURL = "https://cmsweb.cern.ch/phedex/datasvc/json/test"
        self.dbsURL = "http://vocms09.cern.ch:8880/cms_dbs_int_local_yy_writer/servlet/DBSServlet"

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection(destroyAllDatabase = True)

        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"],
                                useDefault = False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package = "WMComponent.DBSBuffer.Database",
                                logger = myThread.logger,
                                dbinterface = myThread.dbi)

        locationAction = daofactory(classname = "DBSBufferFiles.AddLocation")
        locationAction.execute(siteName = "srm-cms.cern.ch")
        locationAction.execute(siteName = "se.fnal.gov")

        self.testFilesA = []
        self.testFilesB = []
        self.testDatasetA = "/%s/PromptReco-v1/RECO" % makeUUID()
        self.testDatasetB = "/%s/CRUZET11-v1/RAW" % makeUUID()
        self.phedex = PhEDEx({"endpoint": self.phedexURL}, "json")

        return
Beispiel #16
0
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel = logging.DEBUG)
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS",'WMCore.Agent.Database',
                                                 "WMCore.ResourceControl"],
                                 useDefault = False)
        self.testDir = self.testInit.generateWorkDir()

        self.config = Configuration()
        self.config.section_("Agent")
        self.config.Agent.useMsgService = False
        self.config.Agent.useTrigger = False
        self.config.component_("AlertProcessor")
        self.config.AlertProcessor.componentDir = self.testDir
        self.config.AlertProcessor.address = "tcp://127.0.0.1:5557"
        self.config.AlertProcessor.controlAddr = "tcp://127.0.0.1:5559"
        self.config.section_("CoreDatabase")

        self.config.CoreDatabase.socket = os.environ.get("DBSOCK")
        self.config.CoreDatabase.connectUrl = os.environ.get("DATABASE")

        self.config.AlertProcessor.section_("critical")
        self.config.AlertProcessor.section_("soft")

        self.config.AlertProcessor.critical.level = 5
        self.config.AlertProcessor.soft.level = 0
        self.config.AlertProcessor.soft.bufferSize = 3

        self.config.AlertProcessor.critical.section_("sinks")
        self.config.AlertProcessor.soft.section_("sinks")
Beispiel #17
0
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.  Also add some dummy locations.
        """
        self.queueDB = 'workqueue_t'
        self.queueInboxDB = 'workqueue_t_inbox'
        self.globalQDB = 'workqueue_t_global'
        self.globalQInboxDB = 'workqueue_t_global_inbox'
        self.localQDB = 'workqueue_t_local'
        self.localQInboxDB = 'workqueue_t_local_inbox'
        self.localQDB2 = 'workqueue_t_local2'
        self.localQInboxDB2 = 'workqueue_t_local2_inbox'

        self.setSchema()
        self.testInit = TestInit('WorkQueueTest')
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = self.schema,
                                useDefault = False)
        self.testInit.setupCouch(self.queueDB, *self.couchApps)
        self.testInit.setupCouch(self.queueInboxDB, *self.couchApps)
        self.testInit.setupCouch(self.globalQDB, *self.couchApps)
        self.testInit.setupCouch(self.globalQInboxDB , *self.couchApps)
        self.testInit.setupCouch(self.localQDB, *self.couchApps)
        self.testInit.setupCouch(self.localQInboxDB, *self.couchApps)
        self.testInit.setupCouch(self.localQDB2, *self.couchApps)
        self.testInit.setupCouch(self.localQInboxDB2, *self.couchApps)
        
        self.workDir = self.testInit.generateWorkDir()
        return
    def setUp(self):
        """
        _setUp_

        Create two subscriptions: One that contains a single file and one that
        contains multiple files.
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"], useDefault=False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi)

        locationAction = daofactory(classname="Locations.New")
        locationAction.execute(siteName="s1", seName="somese.cern.ch")
        locationAction.execute(siteName="s2", seName="otherse.cern.ch")

        self.testWorkflow = Workflow(spec="spec.xml", owner="Steve", name="wf001", task="Test")
        self.testWorkflow.create()

        self.performanceParams = {"timePerEvent": 12, "memoryRequirement": 2300, "sizePerEvent": 400}

        return
Beispiel #19
0
    def setUp(self):
        """
        _setUp_

        Build a testing environment similar to a WN
        """
        self.testInit = TestInit(__file__)
        self.testDir = self.testInit.generateWorkDir()

        # Build a workload/task/step with the basic required information
        self.workload = newWorkload("UnitTests")
        self.task = self.workload.newTask("CMSSWExecutor")
        stepHelper = self.task.makeStep("ExecutorTest")
        self.step = stepHelper.data
        template = CMSSWTemplate()
        template(self.step)
        self.helper = template.helper(self.step)
        self.step.application.setup.scramCommand = "scramulator.py"
        self.step.application.command.executable = "cmsRun.py"
        self.step.application.setup.scramProject = "CMSSW"
        self.step.application.setup.scramArch = "slc5_ia32_gcc434"
        self.step.application.setup.cmsswVersion = "CMSSW_X_Y_Z"
        self.step.application.setup.softwareEnvironment = "echo \"Software Setup...\";"
        self.step.output.jobReport = "FrameworkJobReport.xml"
        self.helper.addOutputModule("outputRECORECO", primaryDataset="Bogus",
                                    processedDataset="Test-Era-v1",
                                    dataTier="DATA")
        self.helper.addOutputModule("outputALCARECORECO", primaryDataset="Bogus",
                                    processedDataset="Test-Era-v1",
                                    dataTier="DATA")
        self.helper.setGlobalTag("Bogus")
        taskMaker = TaskMaker(self.workload, self.testDir)
        taskMaker.skipSubscription = True
        taskMaker.processWorkload()

        # Build the TaskSpace/StepSpace
        self.sandboxDir = os.path.join(self.testDir, "UnitTests")
        self.task.build(self.testDir)
        sys.path.append(self.testDir)
        sys.path.append(self.sandboxDir)

        # Copy the files that cmsRun would have generated in the step space
        open(os.path.join(self.step.builder.workingDir, "outputRECORECO.root"), "w").close()
        open(os.path.join(self.step.builder.workingDir, "outputALCARECORECO.root"), "w").close()
        shutil.copy(os.path.join(getTestBase(),
                                 "WMCore_t/FwkJobReport_t/CMSSWProcessingReport.xml"),
                    os.path.join(self.step.builder.workingDir, "FrameworkJobReport.xml"))

        # Create a job
        self.job = Job(name="/UnitTest/CMSSWExecutor/ExecutorTest-test-job")
        self.job["id"] = 1

        # Set the PATH
        binDir = inspect.getsourcefile(ModuleLocator)
        binDir = binDir.replace("__init__.py", "bin")

        if not binDir in os.environ['PATH']:
            os.environ['PATH'] = "%s:%s" % (os.environ['PATH'], binDir)

        self.oldCwd = os.getcwd()
Beispiel #20
0
    def setUp(self):
        """
        _setUp_

        Install schema and create a DAO factory for WMBS.
        """
        super(ResourceControlTest, self).setUp()
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS",
                                               "WMCore.ResourceControl",
                                               "WMCore.BossAir"],
                                useDefault=False)

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMCore.WMBS",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)

        self.baDaoFactory = DAOFactory(package="WMCore.BossAir",
                                       logger=myThread.logger,
                                       dbinterface=myThread.dbi)

        self.insertRunJob = self.baDaoFactory(classname="NewJobs")
        self.insertState = self.baDaoFactory(classname="NewState")
        states = ['PEND', 'RUN', 'Idle', 'Running']
        self.insertState.execute(states)

        self.tempDir = self.testInit.generateWorkDir()
        return
Beispiel #21
0
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        DBSBuffer tables.  Also add some dummy locations.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        #self.testInit.clearDatabase(modules = ["WMComponent.DBSBuffer.Database"])
        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"],
                                useDefault = False)

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package = "WMComponent.DBS3Buffer",
                                     logger = myThread.logger,
                                     dbinterface = myThread.dbi)

        self.daoFactory2 = DAOFactory(package = "WMComponent.DBSUpload.Database",
                                      logger = myThread.logger,
                                      dbinterface = myThread.dbi)

        locationAction = self.daoFactory(classname = "DBSBufferFiles.AddLocation")
        locationAction.execute(siteName = "se1.cern.ch")
        locationAction.execute(siteName = "se1.fnal.gov")        
Beispiel #22
0
    def setUp(self):
        """
        _setUp_

        setUp function for unittest

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"],
                                useDefault = False)
        self.testDir = self.testInit.generateWorkDir(deleteOnDestruction = False)
        self.configFile = EmulatorSetup.setupWMAgentConfig()

        myThread = threading.currentThread()
        self.bufferFactory = DAOFactory(package = "WMComponent.DBSBuffer.Database",
                                        logger = myThread.logger,
                                        dbinterface = myThread.dbi)

        self.buffer3Factory = DAOFactory(package = "WMComponent.DBS3Buffer",
                                         logger = myThread.logger,
                                         dbinterface = myThread.dbi)

        locationAction = self.bufferFactory(classname = "DBSBufferFiles.AddLocation")
        locationAction.execute(siteName = "se1.cern.ch")
        locationAction.execute(siteName = "se1.fnal.gov")
        locationAction.execute(siteName = "malpaquet")
        self.dbsUrl = "https://localhost:1443/dbs/dev/global/DBSWriter"
        self.dbsApi = None
        return
    def setUp(self):
        """
        _setUp_

        Install the DBSBuffer schema into the database and connect to PhEDEx.
        """

        self.phedexURL = "https://bogus.cern.ch/bogus"
        self.dbsURL = "https://bogus.cern.ch/bogus"
        EmulatorHelper.setEmulators(phedex = True, dbs = True, siteDB = True)

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer",
                                                 "WMCore.WMBS"],
                                useDefault = False)

        self.testFilesA = []
        self.testFilesB = []
        self.testDatasetA = "/BogusPrimary/Run2012Z-PromptReco-v1/RECO"
        self.testDatasetB = "/BogusPrimary/CRUZET11-v1/RAW"

        return
Beispiel #24
0
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.  Also add some dummy locations.
        """


        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS"],
                                useDefault = False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package = "WMCore.WMBS",
                                logger = myThread.logger,
                                dbinterface = myThread.dbi)

        locationAction = daofactory(classname = "Locations.New")
        locationAction.execute(siteName = "se1.cern.ch")
        locationAction.execute(siteName = "se1.fnal.gov")        
        
        return
Beispiel #25
0
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.  Also, create some dummy locations.

        This doesn't start server automatically.
        You need to start server before - make sure change self.server_url,
        if it is not the same as given one - localhost:8080.

        WMCORE/src/python/WMCore/WebTools/Root.py --ini=WMCORE/src/python/WMCore/HTTPFrontEnd/WMBSDefaultConfig.py
        """
        self.server_url = "http://localhost:8081"
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"], useDefault=False)

        myThread = threading.currentThread()
        self.daofactory = DAOFactory(package="WMCore.WMBS", logger=myThread.logger, dbinterface=myThread.dbi)

        locationAction = self.daofactory(classname="Locations.New")
        locationAction.execute(siteName="test.site.ch")
        locationAction.execute(siteName="base.site.ch")
        testSubscription, testFileA, testFileB, testFileC = self.createSubscriptionWithFileABC()
        self.createTestJob(testSubscription, "TestJob1", testFileA)
        self.createTestJob(testSubscription, "TestJob2", testFileB)
        self.createTestJob(testSubscription, "TestJob3", testFileC)

        return
Beispiel #26
0
    def __init__(self):
        """
        __init__

        Init the DB
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection(destroyAllDatabase = True)
        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"],
                                useDefault = False)
        self.configFile = EmulatorSetup.setupWMAgentConfig()

        myThread = threading.currentThread()
        self.bufferFactory = DAOFactory(package = "WMComponent.DBS3Buffer",
                                        logger = myThread.logger,
                                        dbinterface = myThread.dbi)

        locationAction = self.bufferFactory(classname = "DBSBufferFiles.AddLocation")
        locationAction.execute(siteName = "se1.cern.ch")
        locationAction.execute(siteName = "se1.fnal.gov")
        locationAction.execute(siteName = "malpaquet")

        config = self.getConfig()
        self.dbsUploader = DBSUploadPoller(config = config)

        return
Beispiel #27
0
    def setUp(self):
        "make a logger instance and create tables"

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema()
Beispiel #28
0
    def setUp(self):
        "make a logger instance and create tables"


        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema()

        myThread = threading.currentThread()
        if myThread.dialect == 'MySQL':
            myThread.create = """
create table test (bind1 varchar(20), bind2 varchar(20)) ENGINE=InnoDB """
        if myThread.dialect == 'SQLite':
            myThread.create = """
                create table test (bind1 varchar(20), bind2 varchar(20))"""

        myThread.insert = """
insert into test (bind1, bind2) values (:bind1, :bind2) """
        myThread.insert_binds = \
          [ {'bind1':'value1a', 'bind2': 'value2a'},\
            {'bind1':'value1b', 'bind2': 'value2b'},\
            {'bind1':'value1c', 'bind2': 'value2d'} ]
        myThread.select = "select * from test"

        myThread = threading.currentThread()
        myThread.transaction = Transaction(myThread.dbi)
        myThread.transaction.processData(myThread.create)
        myThread.transaction.processData(myThread.insert, myThread.insert_binds)
        myThread.transaction.commit()

        return
Beispiel #29
0
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.
        """

        myThread = threading.currentThread()

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        # We need to set sites in the locations table
        daofactory = DAOFactory(package="WMCore.WMBS",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)

        locationAction = daofactory(classname="Locations.New")
        locationAction.execute(siteName="site1", pnn="T2_CH_CERN")
        locationAction.execute(siteName="site2", pnn="malpaquet")
        locationAction.execute(siteName="site3", pnn="badse.cern.ch")
Beispiel #30
0
    def setUp(self):
        """
        _setUp_

        setUp function for unittest

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection(destroyAllDatabase = True)
        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"],
                                useDefault = False)

        myThread = threading.currentThread()
        self.bufferFactory = DAOFactory(package = "WMComponent.DBSBuffer.Database",
                                        logger = myThread.logger,
                                        dbinterface = myThread.dbi)

        locationAction = self.bufferFactory(classname = "DBSBufferFiles.AddLocation")
        locationAction.execute(siteName = "se1.cern.ch")
        locationAction.execute(siteName = "se1.fnal.gov")
        locationAction.execute(siteName = "malpaquet")



        return
Beispiel #31
0
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMCore.WMBS",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)

        locationNew = self.daoFactory(classname="Locations.New")
        locationNew.execute(siteName="test.site.ch", pnn="T2_CH_CERN")
        locationNew.execute(siteName="test2.site.ch", pnn="T2_CH_CERN")

        return
Beispiel #32
0
class ConfigurationExTest(unittest.TestCase):
    """
    test case for Configuration object

    """
    def setUp(self):
        """set up"""
        self.testInit = TestInit(__file__)
        self.testDir = self.testInit.generateWorkDir()
        self.functionSave = "%s/WMCore_Agent_Configuration_t_function.py" % self.testDir

    def tearDown(self):
        """clean up"""
        self.testInit.delWorkDir()

    def testCallableConfigParams(self):
        """ctor"""
        def f():
            return True

        config = Configuration()
        config.section_("SectionF")
        #creating field for the following test
        config.SectionF.aFunction = ''
        #Cannot set a function for plain Configuration objects
        #config.SectionF.__setattr__('aFunction', f)
        self.assertRaises(RuntimeError, config.SectionF.__setattr__,
                          config.SectionF.aFunction, f)

        config = ConfigurationEx()
        config.section_("SectionF")
        #No failures with configurationEx
        config.SectionF.aFunction = f

        #However ConfigurationEx instances cannot be saved
        self.assertRaises(RuntimeError, saveConfigurationFile, config,
                          self.functionSave)
Beispiel #33
0
    def setUp(self):

        myThread = threading.currentThread()

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        #self.tearDown()
        self.testInit.setSchema(customModules=[
            "WMCore.WMBS", "WMCore.BossAir", "WMCore.ResourceControl",
            "WMCore.Agent.Database"
        ],
                                useDefault=False)

        self.daoFactory = DAOFactory(package="WMCore.BossAir",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)

        resourceControl = ResourceControl()
        resourceControl.insertSite(siteName='Xanadu',
                                   pnn='se.Xanadu',
                                   ceName='Xanadu',
                                   plugin="TestPlugin")
        resourceControl.insertThreshold(siteName = 'Xanadu', taskType = 'Processing', \
                                        maxSlots = 10000, pendingSlots = 10000)

        # Create user
        wmbsFactory = DAOFactory(package="WMCore.WMBS",
                                 logger=myThread.logger,
                                 dbinterface=myThread.dbi)
        newuser = wmbsFactory(classname="Users.New")
        newuser.execute(dn="mnorman",
                        group_name="phgroup",
                        role_name="cmsrole")

        if PY3:
            self.assertItemsEqual = self.assertCountEqual
    def setUp(self):
        """
        _setUp_

        Create two subscriptions: One that contains a single file and one that
        contains multiple files.
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMCore.WMBS",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)

        locationAction = daofactory(classname="Locations.New")
        locationAction.execute(siteName="T1_US_FNAL", pnn="T1_US_FNAL_Disk")
        locationAction.execute(siteName="T2_CH_CERN", pnn="T2_CH_CERN")

        self.testWorkflow = Workflow(spec="spec.xml",
                                     owner="Steve",
                                     name="wf001",
                                     task="Test")
        self.testWorkflow.create()

        self.performanceParams = {
            'timePerEvent': 12,
            'memoryRequirement': 2300,
            'sizePerEvent': 400
        }

        return
Beispiel #35
0
    def setUp(self):
        """
        Basic setUp

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()


        self.testDir = self.testInit.generateWorkDir()

        # Random variables
        self.workloadDir = None
        self.unpackDir   = None
        self.initialDir  = os.getcwd()
        self.origPath    = sys.path


        # Create some dirs
        os.makedirs(os.path.join(self.testDir, 'packages'))

        return
Beispiel #36
0
    def setUp(self):
        """
        _setUp_

        Initialize the database.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)
        self.testDir = self.testInit.generateWorkDir()

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMCore.WMBS",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)
        self.listTasksByWorkflow = self.daoFactory(
            classname="Workflow.LoadFromName")
        self.listFilesets = self.daoFactory(classname="Fileset.List")
        self.listSubsMapping = self.daoFactory(
            classname="Subscriptions.ListSubsAndFilesetsFromWorkflow")

        return
Beispiel #37
0
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.  Also add some dummy locations.
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMCore.WMBS",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)

        locationAction = daofactory(classname="Locations.New")
        locationAction.execute(siteName="se1.cern.ch")
        locationAction.execute(siteName="se1.fnal.gov")

        return
Beispiel #38
0
class DaemonTest(unittest.TestCase):
    """
    _Daemon_t_

    Unit tests for message services: subscription, priority subscription, buffers,
    etc..

    """

    # minimum number of messages that need to be in queue
    _minMsg = 20
    # number of publish and gets from queue
    _publishAndGet = 10

    def setUp(self):
        "make a logger instance and create tables"
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.tempDir = tempfile.mkdtemp()

    def tearDown(self):
        """
        Deletion of the databases
        """
        self.testInit.clearDatabase()
        shutil.rmtree(self.tempDir, True)

    def testA(self):
        """
        __testSubscribe__

        Test daemon creation
        """
        # keep the parent alive
        self.pid = createDaemon(self.tempDir, True)
        try:
            try:
                if self.pid != 0:
                    time.sleep(2)
                    details = Details(os.path.join(self.tempDir, "Daemon.xml"))
                    time.sleep(10)
                    details.killWithPrejudice()
                else:
                    while True:
                        time.sleep(1)
            except:
                pass
        finally:
            if self.pid == 0:
                os._exit(-1)
            else:
                os.system('kill -9 %s' % self.pid)
Beispiel #39
0
class MySQLTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel = logging.DEBUG)
        self.testInit.setDatabaseConnection()
        self.testDir = self.testInit.generateWorkDir()
        self.config = getConfig(self.testDir)
        # mock generator instance to communicate some configuration values
        self.generator = utils.AlertGeneratorMock(self.config)
        self.testName = self.id().split('.')[-1]


    def tearDown(self):
        self.testInit.delWorkDir()
        self.generator = None


    def testMySQLPollerBasic(self):
        config = getConfig("/tmp")
        generator = utils.AlertGeneratorMock(config)
        # take for instance mysqlCPUPoller configuration here, just need
        # appropriate attributes set
        try:
            poller = MySQLPoller(config.AlertGenerator.mysqlCPUPoller, generator)
        except Exception, ex:
            self.fail("%s: exception: %s" % (self.testName, ex))
        # this class would not have defined polling sample function, give it one
        poller.sample = lambda proc: float(12)
        self.assertEqual(len(poller._measurements), 0)
        poller.check()
        self.assertEqual(len(poller._measurements), 1)
        self.assertEqual(poller._measurements[0], 12)

        # test handling of a non-existing process
        MySQLPoller._getProcessPID = lambda inst: 1212121212
        self.assertRaises(Exception, MySQLPoller,
                          config.AlertGenerator.mysqlCPUPoller, generator)
Beispiel #40
0
class DBCoreTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMQuality.TestDB"],
                                useDefault=False)

        return

    def tearDown(self):
        """
        Delete the databases
        """
        self.testInit.clearDatabase()
        return

    def testBuildBinds(self):
        """
        Test class for DBCore.buildbinds()

        """

        #This class may become obselete soon.  There is a TODO stuck onto DBCore.buildbinds()
        #This just checks to see that the sequence properly packages the first value is set
        #So that it sets the key seqname to the proper name in the files list
        #Also right now tests that it sets the keys right in each dict, but this seems redundant
        # -mnorman

        seqname = 'file'
        dictbinds = {'lumi': 123, 'test': 100}
        files = ['testA', 'testB', 'testC']

        myThread = threading.currentThread()

        testInterface = myThread.dbi

        binds = testInterface.buildbinds(files, seqname, dictbinds)

        #This should return a dict for every value in files with additional elements
        #from dictbinds.  We then loop over every dict (which should be equal to the
        #number of elements in files), and look to see that the filename matches and that
        #At least one of the dictbinds keys matches to its proper element

        for i in range(len(files)):
            self.assertEqual(binds[i][seqname], files[i])
            self.assertEqual(binds[i][dictbinds.keys()[0]],
                             dictbinds[dictbinds.keys()[0]])

        return

    def testProcessDataNoBinds(self):
        """
        _testProcessDataNoBinds_

        Verify that insert and select queries work when no binds are used.
        """
        insertSQL = "INSERT INTO test_tablea VALUES (1, 2, 'three')"
        selectSQL = "SELECT column1, column2, column3 from test_tablea"

        myThread = threading.currentThread()
        myThread.dbi.processData(insertSQL)
        resultSets = myThread.dbi.processData(selectSQL)

        assert len(resultSets) == 1, \
               "Error: Wrong number of ResultSets returned."

        results = resultSets[0].fetchall()

        assert len(results) == 1, \
               "Error: Wrong number of rows returned."
        assert len(results[0]) == 3, \
               "Error: Wrong number of columns returned."
        assert results[0][0] == 1, \
               "Error: Column one is wrong."
        assert results[0][1] == 2, \
               "Error: Column two is wrong."
        assert results[0][2] == "three", \
               "Error: Column three is wrong."

        return

    def testProcessDataOneBind(self):
        """
        _testProcessDataOneBind_

        Verify that insert and select queries work with one set of bind variables.
        """
        bindsA = {"one": 1, "two": 2, "three": "three"}
        bindsB = {"one": 3, "two": 2, "three": "one"}
        insertSQL = "INSERT INTO test_tablea VALUES (:one, :two, :three)"
        selectSQL = \
          """SELECT column1, column2, column3 FROM test_tablea
             WHERE column1 = :one AND column2 = :two AND column3 = :three"""

        myThread = threading.currentThread()
        myThread.dbi.processData(insertSQL, binds=bindsA)
        myThread.dbi.processData(insertSQL, binds=bindsB)

        resultSets = myThread.dbi.processData(selectSQL, bindsA)

        assert len(resultSets) == 1, \
               "Error: Wrong number of ResultSets returned."

        results = resultSets[0].fetchall()

        assert len(results) == 1, \
               "Error: Wrong number of rows returned."
        assert len(results[0]) == 3, \
               "Error: Wrong number of columns returned."
        assert results[0][0] == 1, \
               "Error: Column one is wrong."
        assert results[0][1] == 2, \
               "Error: Column two is wrong."
        assert results[0][2] == "three", \
               "Error: Column three is wrong."

        resultSets = myThread.dbi.processData(selectSQL, bindsB)

        assert len(resultSets) == 1, \
               "Error: Wrong number of ResultSets returned."

        results = resultSets[0].fetchall()

        assert len(results) == 1, \
               "Error: Wrong number of rows returned."
        assert len(results[0]) == 3, \
               "Error: Wrong number of columns returned."
        assert results[0][0] == 3, \
               "Error: Column one is wrong."
        assert results[0][1] == 2, \
               "Error: Column two is wrong."
        assert results[0][2] == "one", \
               "Error: Column three is wrong."

        return

    def testProcessDataSeveralBinds(self):
        """
        _testProcessDataSeveralBinds_

        Verify that insert and select queries work with several binds.
        """
        bindsA = [{
            "one": 1,
            "two": 2,
            "three": "three"
        }, {
            "one": 3,
            "two": 2,
            "three": "one"
        }, {
            "one": 4,
            "two": 5,
            "three": "six"
        }, {
            "one": 6,
            "two": 5,
            "three": "four"
        }]
        bindsB = [{
            "one": 10,
            "two": 11,
            "three": "twelve"
        }, {
            "one": 12,
            "two": 11,
            "three": "ten"
        }]

        insertSQL = "INSERT INTO test_tablea VALUES (:one, :two, :three)"
        selectSQL = \
          """SELECT column1, column2, column3 FROM test_tablea
             WHERE column1 = :one AND column2 = :two AND column3 = :three"""

        myThread = threading.currentThread()
        myThread.dbi.processData(insertSQL, binds=bindsA)
        myThread.dbi.processData(insertSQL, binds=bindsB)

        resultSets = myThread.dbi.processData(selectSQL, bindsA)

        assert len(resultSets) == 1, \
               "Error: Wrong number of ResultSets returned."

        results = resultSets[0].fetchall()

        assert len(results) == 4, \
               "Error: Wrong number of rows returned."

        for result in results:
            assert len(result) == 3, \
                   "Error: Wrong number of columns returned."
            for bind in bindsA:
                if bind["one"] == result[0] and bind["two"] == result[1] and \
                   bind["three"] == result[2]:
                    bindsA.remove(bind)
                    break

        assert len(bindsA) == 0, \
               "Error: Missing rows from select."

        resultSets = myThread.dbi.processData(selectSQL, bindsB)

        assert len(resultSets) == 1, \
               "Error: Wrong number of ResultSets returned."

        results = resultSets[0].fetchall()

        assert len(results) == 2, \
               "Error: Wrong number of rows returned."

        for result in results:
            assert len(result) == 3, \
                   "Error: Wrong number of columns returned."
            for bind in bindsB:
                if bind["one"] == result[0] and bind["two"] == result[1] and \
                   bind["three"] == result[2]:
                    bindsB.remove(bind)
                    break

        assert len(bindsB) == 0, \
               "Error: Missing rows from select."

        return

    def testProcessDataHugeBinds(self):
        """
        _testProcessDataHugeBinds_

        Verify that select and insert queries work with several thousand binds.
        """
        bindsA = []
        bindsB = []
        for i in range(3001):
            bindsA.append({"one": i, "two": i * 2, "three": str(i * 3)})

        for i in range(1501):
            bindsB.append({"one": (i + 1) * 2, "two": i, "three": str(i * 5)})

        insertSQL = "INSERT INTO test_tablea VALUES (:one, :two, :three)"
        selectSQL = \
          """SELECT column1, column2, column3 FROM test_tablea
             WHERE column1 = :one AND column2 = :two AND column3 = :three"""

        myThread = threading.currentThread()
        myThread.dbi.processData(insertSQL, binds=bindsA)
        myThread.dbi.processData(insertSQL, binds=bindsB)

        resultSets = myThread.dbi.processData(selectSQL, bindsA)
        results = []
        for resultSet in resultSets:
            results.extend(resultSet.fetchall())

        assert len(results) == 3001, \
               "Error: Wrong number of rows returned: %d" % len(results)

        for result in results:
            assert len(result) == 3, \
                   "Error: Wrong number of columns returned."
            for bind in bindsA:
                if bind["one"] == result[0] and bind["two"] == result[1] and \
                   bind["three"] == result[2]:
                    bindsA.remove(bind)
                    break

        assert len(bindsA) == 0, \
               "Error: Missing rows from select."

        resultSets = myThread.dbi.processData(selectSQL, bindsB)
        results = []
        for resultSet in resultSets:
            results.extend(resultSet.fetchall())

        assert len(results) == 1501, \
               "Error: Wrong number of rows returned."

        for result in results:
            assert len(result) == 3, \
                   "Error: Wrong number of columns returned."
            for bind in bindsB:
                if bind["one"] == result[0] and bind["two"] == result[1] and \
                   bind["three"] == result[2]:
                    bindsB.remove(bind)
                    break

        assert len(bindsB) == 0, \
               "Error: Missing rows from select."

        return

    def testInsertHugeNumber(self):
        """
        _testInsertHugeNumber_

        Verify that we can insert and select huge numbers.
        """
        insertSQL = "INSERT INTO test_bigcol VALUES(:val1)"
        selectSQL = "SELECT * FROM test_bigcol"

        bindsA = {"val1": 2012211901}
        bindsB = {"val1": 20122119010}

        myThread = threading.currentThread()
        myThread.dbi.processData(insertSQL, binds=bindsA)
        myThread.dbi.processData(insertSQL, binds=bindsB)

        resultSets = myThread.dbi.processData(selectSQL)
        results = []
        for resultSet in resultSets:
            for row in resultSet.fetchall():
                results.append(row[0])

        assert len(results) == 2, \
               "Error: Wrong number of results."
        assert bindsA["val1"] in results, \
               "Error: Value one is missing."
        assert bindsB["val1"] in results, \
               "Error: Value one is missing."

        return
Beispiel #41
0
class AlertProcessorTest(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging(logLevel=logging.DEBUG)
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=[
            "WMCore.WMBS", 'WMCore.Agent.Database', "WMCore.ResourceControl"
        ],
                                useDefault=False)
        self.testDir = self.testInit.generateWorkDir()

        self.config = Configuration()
        self.config.section_("Agent")
        self.config.Agent.useMsgService = False
        self.config.Agent.useTrigger = False
        self.config.component_("AlertProcessor")
        self.config.AlertProcessor.componentDir = self.testDir
        self.config.AlertProcessor.address = "tcp://127.0.0.1:5557"
        self.config.AlertProcessor.controlAddr = "tcp://127.0.0.1:5559"
        self.config.section_("CoreDatabase")

        self.config.CoreDatabase.socket = os.environ.get("DBSOCK")
        self.config.CoreDatabase.connectUrl = os.environ.get("DATABASE")

        self.config.AlertProcessor.section_("critical")
        self.config.AlertProcessor.section_("soft")

        self.config.AlertProcessor.critical.level = 5
        self.config.AlertProcessor.soft.level = 0
        self.config.AlertProcessor.soft.bufferSize = 3

        self.config.AlertProcessor.critical.section_("sinks")
        self.config.AlertProcessor.soft.section_("sinks")

    def tearDown(self):
        self.testInit.clearDatabase()
        self.testInit.delWorkDir()

    def testAlertProcessorBasic(self):
        alertProcessor = AlertProcessor(self.config)
        try:
            # alertProcessor.startComponent() causes the flow to stop, Harness.py
            # the method just calls prepareToStart() and waits for ever
            # alertProcessor.startDaemon() no good for this either ... puts everything
            # on background
            alertProcessor.prepareToStart()
        except Exception as ex:
            print ex
            self.fail(str(ex))

        logging.debug(
            "AlertProcessor and its sub-components should be running now ...")
        logging.debug("Going to stop the component ...")

        # stop via component method
        try:
            alertProcessor.stopAlertProcessor()
        except Exception as ex:
            print ex
            self.fail(str(ex))
Beispiel #42
0
    def setUp(self):
        """
        _setUp_
        """
        import WMQuality.TestInit
        WMQuality.TestInit.deleteDatabaseAfterEveryTest("I'm Serious")

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(
            customModules=["WMComponent.DBS3Buffer", "T0.WMBS"])

        self.splitterFactory = SplitterFactory(package="T0.JobSplitting")

        myThread = threading.currentThread()
        daoFactory = DAOFactory(package="T0.WMBS",
                                logger=logging,
                                dbinterface=myThread.dbi)

        myThread.dbi.processData("""INSERT INTO wmbs_location
                                    (id, site_name, state, state_time)
                                    VALUES (1, 'SomeSite', 1, 1)
                                    """,
                                 transaction=False)
        myThread.dbi.processData("""INSERT INTO wmbs_pnns
                                    (id, pnn)
                                    VALUES (2, 'SomePNN')
                                    """,
                                 transaction=False)

        myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
                                    (location, pnn)
                                    VALUES (1, 2)
                                    """,
                                 transaction=False)

        insertRunDAO = daoFactory(classname="RunConfig.InsertRun")
        insertRunDAO.execute(binds={
            'RUN': 1,
            'HLTKEY': "someHLTKey"
        },
                             transaction=False)

        insertLumiDAO = daoFactory(classname="RunConfig.InsertLumiSection")
        for lumi in range(1, 5):
            insertLumiDAO.execute(binds={
                'RUN': 1,
                'LUMI': lumi
            },
                                  transaction=False)

        insertStreamDAO = daoFactory(classname="RunConfig.InsertStream")
        insertStreamDAO.execute(binds={'STREAM': "Express"}, transaction=False)

        insertStreamFilesetDAO = daoFactory(
            classname="RunConfig.InsertStreamFileset")
        insertStreamFilesetDAO.execute(1, "Express", "TestFileset1")

        fileset1 = Fileset(name="TestFileset1")
        self.fileset2 = Fileset(name="TestFileset2")
        fileset1.load()
        self.fileset2.create()

        workflow1 = Workflow(spec="spec.xml",
                             owner="hufnagel",
                             name="TestWorkflow1",
                             task="Test")
        workflow2 = Workflow(spec="spec.xml",
                             owner="hufnagel",
                             name="TestWorkflow2",
                             task="Test")
        workflow1.create()
        workflow2.create()

        self.subscription1 = Subscription(fileset=fileset1,
                                          workflow=workflow1,
                                          split_algo="Express",
                                          type="Express")
        self.subscription2 = Subscription(fileset=self.fileset2,
                                          workflow=workflow2,
                                          split_algo="ExpressMerge",
                                          type="ExpressMerge")
        self.subscription1.create()
        self.subscription2.create()

        myThread.dbi.processData("""INSERT INTO wmbs_workflow_output
                                    (WORKFLOW_ID, OUTPUT_IDENTIFIER, OUTPUT_FILESET)
                                    VALUES (%d, 'SOMEOUTPUT', %d)
                                    """ % (workflow1.id, self.fileset2.id),
                                 transaction=False)

        # keep for later
        self.insertSplitLumisDAO = daoFactory(
            classname="JobSplitting.InsertSplitLumis")

        # default split parameters
        self.splitArgs = {}
        self.splitArgs['maxInputSize'] = 2 * 1024 * 1024 * 1024
        self.splitArgs['maxInputFiles'] = 500,
        self.splitArgs['maxLatency'] = 15 * 23

        return
Beispiel #43
0
class ProcessPoolTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection(destroyAllDatabase = True)
        self.testInit.setSchema(customModules = ["WMCore.Agent.Database"],
                                useDefault = False)
        return

    def tearDown(self):
        """
        _tearDown_

        """
        self.testInit.clearDatabase()
        return

    def testA_ProcessPool(self):
        """
        _testProcessPool_

        """
        raise nose.SkipTest
        config = self.testInit.getConfiguration()
        config.Agent.useHeartbeat = False
        self.testInit.generateWorkDir(config)

        processPool = ProcessPool("ProcessPool_t.ProcessPoolTestWorker",
                                  totalSlaves = 1,
                                  componentDir = config.General.workDir,
                                  config = config,
                                  namespace = "WMCore_t")

        processPool.enqueue(["One", "Two", "Three"])
        result =  processPool.dequeue(3)

        self.assertEqual(len(result), 3, "Error: Expected three items back.")
        self.assertTrue( "One" in result)
        self.assertTrue( "Two" in result)
        self.assertTrue( "Three" in result)

        return

    def testB_ProcessPoolStress(self):
        """
        _testProcessPoolStress_

        """
        raise nose.SkipTest
        config = self.testInit.getConfiguration()
        config.Agent.useHeartbeat = False
        self.testInit.generateWorkDir(config)

        processPool = ProcessPool("ProcessPool_t.ProcessPoolTestWorker",
                                  totalSlaves = 1,
                                  componentDir = config.General.workDir,
                                  namespace = "WMCore_t",
                                  config = config)

        result = None
        input  = None
        for i in range(1000):
            input = []
            while i > 0:
                input.append("COMMAND%s" % i)
                i -= 1

            processPool.enqueue(input)
            result =  processPool.dequeue(len(input))

            self.assertEqual(len(result), len(input),
                             "Error: Wrong number of results returned.")

        for k in result:
            self.assertTrue(k in input)

        return


    def testC_MultiPool(self):
        """
        _testMultiPool_

        Run a test with multiple workers
        """
        raise nose.SkipTest
        config = self.testInit.getConfiguration()
        config.Agent.useHeartbeat = False
        self.testInit.generateWorkDir(config)

        processPool = ProcessPool("ProcessPool_t.ProcessPoolTestWorker",
                                  totalSlaves = 3,
                                  componentDir = config.General.workDir,
                                  namespace = "WMCore_t",
                                  config = config)

        for i in range(100):
            input = []
            while i > 0:
                input.append("COMMAND%s" % i)
                i -= 1

            processPool.enqueue(input)
            result =  processPool.dequeue(len(input))

            self.assertEqual(len(result), len(input),
                             "Error: Wrong number of results returned.")
Beispiel #44
0
class DashboardInterfaceTest(unittest.TestCase):
    """
    Test for the dashboard interface and its monitoring interaction

    Well, once I've written them it will be
    """


    def setUp(self):
        """
        Basically, do nothing

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()


        self.testDir = self.testInit.generateWorkDir()

        return


    def tearDown(self):
        """
        Clean up the test directory

        """

        self.testInit.delWorkDir()

        return


    def createWorkload(self):
        """
        Create a workload in order to test things

        """
        generator = WMSpecGenerator()
        workload = generator.createReRecoSpec("Tier1ReReco")
        return workload

    def createTestJob(self):
        """
        Create a test job to pass to the DashboardInterface

        """

        job = Job(name = "ThisIsASillyName")

        testFileA = File(lfn = "/this/is/a/lfnA", size = 1024, events = 10)
        testFileA.addRun(Run(1, *[45]))
        testFileB = File(lfn = "/this/is/a/lfnB", size = 1024, events = 10)
        testFileB.addRun(Run(1, *[46]))

        job.addFile(testFileA)
        job.addFile(testFileB)

        job['id'] = 1

        return job


    def createReport(self, outcome = 0):
        """
        Create a test report

        """

        jobReport = Report()
        jobReport.addStep('cmsRun1')
        jobReport.setStepStartTime(stepName = 'cmsRun1')
        jobReport.setStepStopTime(stepName = 'cmsRun1')
        if outcome:
            jobReport.addError('cmsRun1', 200, 'FakeError', 'FakeError')

        return jobReport


    def setupJobEnvironment(self, name = 'test'):
        """
        _setupJobEnvironment_

        Make some sort of environment in which to run tests
        """

        os.environ['WMAGENT_SITE_CONFIG_OVERRIDE'] = os.path.join(getTestBase(),
                                            "WMCore_t/Storage_t",
                                            "T1_US_FNAL_SiteLocalConfig.xml")
        return

    def testASuccessfulJobMonitoring(self):
        """
        _testASuccessfulJobMonitoring_

        Check that the data packets make sense when a job completes successfully
        """

        # Get the necessary objects
        name     = 'testA'
        job      = self.createTestJob()
        workload = self.createWorkload()
        task     = workload.getTask(taskName = "DataProcessing")
        report   = self.createReport()

        # Fill the job environment
        self.setupJobEnvironment(name = name)

        # Instantiate DBInfo
        dbInfo   = DashboardInfo(job = job, task = task)
        dbInfo.addDestination('127.0.0.1', 8884)

        # Check jobStart information
        data = dbInfo.jobStart()
        self.assertEqual(data['MessageType'], 'JobStatus')
        self.assertEqual(data['StatusValue'], 'running')
        self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
        self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')

        # Do the first step
        step = task.getStep(stepName = "cmsRun1")

        # Do the step start
        data = dbInfo.stepStart(step = step.data)
        self.assertNotEqual(data['jobStart'], None)
        self.assertEqual(data['jobStart']['ExeStart'], step.name())
        self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
        self.assertEqual(data['1_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step = step.data, stepReport = report)
        self.assertEqual(data['1_ExeEnd'], step.name())
        self.assertEqual(data['1_ExeExitCode'], 0)
        self.assertTrue(data['1_ExeWCTime'] >= 0)
        self.assertEqual(report.retrieveStep("cmsRun1").counter, 1)

        #Do a second step
        step = task.getStep(stepName = "cmsRun1")

        #Do the step start (It's not the first step)
        data = dbInfo.stepStart(step = step.data)
        self.assertEqual(data['jobStart'], None)
        self.assertEqual(data['2_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step = step.data, stepReport = report)
        self.assertEqual(data['2_ExeEnd'], step.name())
        self.assertEqual(data['2_ExeExitCode'], 0)
        self.assertTrue(data['2_ExeWCTime'] >= 0)
        self.assertEqual(report.retrieveStep("cmsRun1").counter, 2)

        # End the job!
        data = dbInfo.jobEnd()
        self.assertEqual(data['ExeEnd'], "cmsRun1")
        self.assertEqual(data['JobExitCode'], 0)
        self.assertEqual(data['WrapperCPUTime'], 0)
        self.assertTrue(data['WrapperWCTime'] >= 0)
        self.assertNotEqual(data['JobExitReason'], "")

        return


    def testAFailedJobMonitoring(self):
        """
        _TestAFailedJobMonitoring_

        Simulate a job that completes but fails, check that the data sent is
        correct
        """

        # Get the necessary objects
        name     = 'testB'
        job      = self.createTestJob()
        workload = self.createWorkload()
        task     = workload.getTask(taskName = "DataProcessing")
        report   = self.createReport(outcome = 1)

        # Fill the job environment
        self.setupJobEnvironment(name = name)

        # Instantiate DBInfo
        dbInfo   = DashboardInfo(job = job, task = task)
        dbInfo.addDestination('127.0.0.1', 8884)

        # Check jobStart information
        data = dbInfo.jobStart()
        self.assertEqual(data['MessageType'], 'JobStatus')
        self.assertEqual(data['StatusValue'], 'running')
        self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
        self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')

        # Do the first step
        step = task.getStep(stepName = "cmsRun1")

        # Do the step start
        data = dbInfo.stepStart(step = step.data)
        self.assertNotEqual(data['jobStart'], None)
        self.assertEqual(data['jobStart']['ExeStart'], step.name())
        self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
        self.assertEqual(data['1_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step = step.data, stepReport = report)
        self.assertEqual(data['1_ExeEnd'], step.name())
        self.assertNotEqual(data['1_ExeExitCode'], 0)
        self.assertTrue(data['1_ExeWCTime'] >= 0)
        self.assertEqual(report.retrieveStep("cmsRun1").counter, 1)

        # End the job!
        data = dbInfo.jobEnd()
        self.assertEqual(data['ExeEnd'], "cmsRun1")
        self.assertNotEqual(data['JobExitCode'], 0)
        self.assertEqual(data['WrapperCPUTime'], 0)
        self.assertTrue(data['WrapperWCTime'] >= 0)
        self.assertNotEqual(data['JobExitReason'].find('cmsRun1'), -1)

        return

    def testAKilledJobMonitoring(self):
        """
        _TestAKilledJobMonitoring_

        Simulate a job that is killed check that the data sent is
        correct
        """

        # Get the necessary objects
        name     = 'testC'
        job      = self.createTestJob()
        workload = self.createWorkload()
        task     = workload.getTask(taskName = "DataProcessing")
        report   = self.createReport(outcome = 1)

        # Fill the job environment
        self.setupJobEnvironment(name = name)

        # Instantiate DBInfo
        dbInfo   = DashboardInfo(job = job, task = task)
        dbInfo.addDestination('127.0.0.1', 8884)

        # Check jobStart information
        data = dbInfo.jobStart()
        self.assertEqual(data['MessageType'], 'JobStatus')
        self.assertEqual(data['StatusValue'], 'running')
        self.assertEqual(data['StatusDestination'], "T1_US_FNAL")
        self.assertEqual(data['taskId'], 'wmagent_Tier1ReReco')

        # Do the first step
        step = task.getStep(stepName = "cmsRun1")

        # Do the step start
        data = dbInfo.stepStart(step = step.data)
        self.assertNotEqual(data['jobStart'], None)
        self.assertEqual(data['jobStart']['ExeStart'], step.name())
        self.assertEqual(data['jobStart']['WNHostName'], socket.gethostname())
        self.assertEqual(data['1_ExeStart'], step.name())

        #Do the step end
        data = dbInfo.stepEnd(step = step.data, stepReport = report)
        self.assertEqual(data['1_ExeEnd'], step.name())
        self.assertNotEqual(data['1_ExeExitCode'], 0)
        self.assertTrue(data['1_ExeWCTime'] >= 0)

        # Kill the job!
        data = dbInfo.jobKilled()
        self.assertEqual(data['ExeEnd'], "cmsRun1")
        self.assertNotEqual(data['JobExitCode'], 0)
        self.assertEqual(data['WrapperCPUTime'], 0)
        self.assertTrue(data['WrapperWCTime'] >= 0)
        self.assertNotEqual(data['JobExitReason'].find('killed'), -1)

        return

    @attr('integration')
    def testGetDN(self):
        """
        _testGetDN_

        Checks that we can get a DN
        """
        dn = getUserProxyDN()
        if 'X509_USER_PROXY' in os.environ:
            self.assertNotEqual(dn, None, 'Error: This should get a DN, if you have set one')
        else:
            self.assertEqual(dn, None, 'Error: There is no proxy in the environment, it should not get one')
Beispiel #45
0
class CursorLeakTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.  Also add some dummy locations.
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMCore.WMBS",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)

        locationAction = daofactory(classname="Locations.New")
        locationAction.execute(siteName="se1.cern.ch")
        locationAction.execute(siteName="se1.fnal.gov")

        return

    def tearDown(self):
        """
        _tearDown_

        Drop all the WMBS tables.
        """
        self.testInit.clearDatabase()

    def testCursor(self):
        """
        _testCursor_

        test the cursor closing is really affected

        create 100 files with 5 parents and  loop 100 times.
        If the cursors are exhausted will crash.?

        TODO: improve for more effective testing.

        """

        raise nose.SkipTest
        fileList = []
        parentFile = None
        for i in range(100):
            testFile = File(lfn="/this/is/a/lfn%s" % i,
                            size=1024,
                            events=10,
                            checksums={"cksum": "1"})
            testFile.addRun(Run(1, *[i]))
            testFile.create()

            for j in range(5):
                parentFile = File(lfn="/this/is/a/lfnP%s" % j,
                                  size=1024,
                                  events=10,
                                  checksums={"cksum": "1"})
                parentFile.addRun(Run(1, *[j]))
                parentFile.create()
                testFile.addParent(parentFile['lfn'])

            fileList.append(testFile)

        for i in range(100):
            for file in fileList:
                file.loadData()
                file.getAncestors(level=2)
                file.getAncestors(level=2, type="lfn")

        return

    def testLotsOfAncestors(self):
        """
        _testLotsOfAncestors_

        Create a file with 15 parents with each parent having 100 parents to
        verify that the query to return grandparents works correctly.
        """
        raise nose.SkipTest
        testFileA = File(lfn="/this/is/a/lfnA",
                         size=1024,
                         events=10,
                         checksums={"cksum": "1"},
                         locations="se1.fnal.gov")
        testFileA.create()

        for i in xrange(15):
            testParent = File(lfn=makeUUID(),
                              size=1024,
                              events=10,
                              checksums={"cksum": "1"},
                              locations="se1.fnal.gov")
            testParent.create()
            testFileA.addParent(testParent["lfn"])

            for i in xrange(100):
                testGParent = File(lfn=makeUUID(),
                                   size=1024,
                                   events=10,
                                   checksums={"cksum": "1"},
                                   locations="se1.fnal.gov")
                testGParent.create()
                testParent.addParent(testGParent["lfn"])

        assert len(testFileA.getAncestors(level=2, type="lfn")) == 1500, \
            "ERROR: Incorrect grand parents returned"

        return
Beispiel #46
0
class FixedDelayTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        Create two subscriptions: One that contains a single file and one that
        contains multiple files.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMCore.WMBS",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)

        locationAction = daofactory(classname="Locations.New")
        locationAction.execute(siteName="site1", pnn="T2_CH_CERN")

        self.multipleFileFileset = Fileset(name="TestFileset1")
        self.multipleFileFileset.create()
        for i in range(10):
            newFile = File(makeUUID(),
                           size=1000,
                           events=100,
                           locations=set(["T2_CH_CERN"]))
            newFile.addRun(Run(i, *[45 + i]))
            newFile.create()
            self.multipleFileFileset.addFile(newFile)
        self.multipleFileFileset.commit()

        self.singleFileFileset = Fileset(name="TestFileset2")
        self.singleFileFileset.create()
        newFile = File("/some/file/name",
                       size=1000,
                       events=100,
                       locations=set(["T2_CH_CERN"]))
        newFile.addRun(Run(1, *[45]))
        newFile.create()
        self.singleFileFileset.addFile(newFile)
        self.singleFileFileset.commit()

        self.multipleFileLumiset = Fileset(name="TestFileset3")
        self.multipleFileLumiset.create()
        for i in range(10):
            newFile = File(makeUUID(),
                           size=1000,
                           events=100,
                           locations=set(["T2_CH_CERN"]))
            newFile.addRun(Run(1, *[45 + i / 3]))
            newFile.create()
            self.multipleFileLumiset.addFile(newFile)
        self.multipleFileLumiset.commit()

        self.singleLumiFileset = Fileset(name="TestFileset4")
        self.singleLumiFileset.create()
        for i in range(10):
            newFile = File(makeUUID(),
                           size=1000,
                           events=100,
                           locations=set(["T2_CH_CERN"]))
            newFile.addRun(Run(1, *[45]))
            newFile.create()
            self.singleLumiFileset.addFile(newFile)
        self.singleLumiFileset.commit()

        testWorkflow = Workflow(spec="spec.xml",
                                owner="mnorman",
                                name="wf001",
                                task="Test")
        testWorkflow.create()
        self.multipleFileSubscription = Subscription(
            fileset=self.multipleFileFileset,
            workflow=testWorkflow,
            split_algo="FixedDelay",
            type="Processing")
        self.singleFileSubscription = Subscription(
            fileset=self.singleFileFileset,
            workflow=testWorkflow,
            split_algo="FixedDelay",
            type="Processing")
        self.multipleLumiSubscription = Subscription(
            fileset=self.multipleFileLumiset,
            workflow=testWorkflow,
            split_algo="FixedDelay",
            type="Processing")
        self.singleLumiSubscription = Subscription(
            fileset=self.singleLumiFileset,
            workflow=testWorkflow,
            split_algo="FixedDelay",
            type="Processing")

        self.multipleFileSubscription.create()
        self.singleFileSubscription.create()
        self.multipleLumiSubscription.create()
        self.singleLumiSubscription.create()
        return

    def tearDown(self):
        """
        _tearDown_

        Nothing to do...
        """
        self.testInit.clearDatabase()
        return

    def testNone(self):
        """
        _testNone_

        Since the subscriptions are open, we shouldn't get any jobs back
        """
        splitter = SplitterFactory()
        jobFactory = splitter(self.singleFileSubscription)
        jobGroups = jobFactory(trigger_time=int(time.time()) * 2)
        self.assertEquals(jobGroups, [], "Should have returned a null set")

        jobFactory = splitter(self.multipleFileSubscription)
        jobGroups = jobFactory(trigger_time=int(time.time()) * 2)
        self.assertEquals(jobGroups, [], "Should have returned a null set")

        jobFactory = splitter(self.multipleLumiSubscription)
        jobGroups = jobFactory(trigger_time=int(time.time()) * 2)
        self.assertEquals(jobGroups, [], "Should have returned a null set")

        jobFactory = splitter(self.singleLumiSubscription)
        jobGroups = jobFactory(trigger_time=int(time.time()) * 2)
        self.assertEquals(jobGroups, [], "Should have returned a null set")

        return

    def testClosed(self):
        """
        _testClosed_

        Since the subscriptions are closed and none of the files have been
        acquired, all of the files should show up
        """
        splitter = SplitterFactory()
        self.singleFileSubscription.getFileset().markOpen(False)
        jobFactory = splitter(self.singleFileSubscription)
        jobGroups = jobFactory(trigger_time=1)
        assert len(jobGroups) == 1, \
               "ERROR: JobFactory didn't return one JobGroup."

        assert len(jobGroups[0].jobs) == 1, \
               "ERROR: JobFactory didn't create a single job."

        job = jobGroups[0].jobs.pop()

        assert job.getFiles(type = "lfn") == ["/some/file/name"], \
               "ERROR: Job contains unknown files."

        self.multipleFileSubscription.getFileset().markOpen(False)
        jobFactory = splitter(self.multipleFileSubscription)
        jobGroups = jobFactory(trigger_time=1)

        self.assertEquals(len(jobGroups), 1)
        self.assertEquals(len(jobGroups[0].jobs), 1)
        myfiles = jobGroups[0].jobs[0].getFiles()
        self.assertEquals(len(myfiles), 10)

        self.multipleLumiSubscription.getFileset().markOpen(False)
        jobFactory = splitter(self.multipleLumiSubscription)
        jobGroups = jobFactory(trigger_time=1)
        self.assertEquals(len(jobGroups), 1)
        self.assertEquals(len(jobGroups[0].jobs), 1)
        myfiles = jobGroups[0].jobs[0].getFiles()
        self.assertEquals(len(myfiles), 10)
        #self.assertEquals(jobGroups, [], "Should have returned a null set")

        self.singleLumiSubscription.getFileset().markOpen(False)
        jobFactory = splitter(self.singleLumiSubscription)
        jobGroups = jobFactory(trigger_time=1)
        assert len(jobGroups) == 1, \
               "ERROR: JobFactory didn't return one JobGroup."

        assert len(jobGroups[0].jobs) == 1, \
               "ERROR: JobFactory didn't create a single job."
        myfiles = jobGroups[0].jobs[0].getFiles()
        self.assertEquals(len(myfiles), 10)

    def testAllAcquired(self):
        """
        _testAllAcquired_
        should all return no job groups
        """
        splitter = SplitterFactory()
        self.singleFileSubscription.acquireFiles(
            self.singleFileSubscription.availableFiles())
        jobFactory = splitter(self.singleFileSubscription)
        jobGroups = jobFactory(trigger_time=1)
        self.assertEquals(jobGroups, [], "Should have returned a null set")

        self.multipleFileSubscription.acquireFiles(
            self.multipleFileSubscription.availableFiles())
        jobFactory = splitter(self.multipleFileSubscription)
        jobGroups = jobFactory(trigger_time=1)
        self.assertEquals(jobGroups, [], "Should have returned a null set")

        self.multipleLumiSubscription.acquireFiles(
            self.multipleLumiSubscription.availableFiles())
        jobFactory = splitter(self.multipleLumiSubscription)
        jobGroups = jobFactory(trigger_time=1)
        self.assertEquals(jobGroups, [], "Should have returned a null set")

        self.singleLumiSubscription.acquireFiles(
            self.singleLumiSubscription.availableFiles())
        jobFactory = splitter(self.singleLumiSubscription)
        jobGroups = jobFactory(trigger_time=1)
        self.assertEquals(jobGroups, [], "Should have returned a null set")

    def testClosedSomeAcquired(self):
        """
        _testClosedSomeAcquired_
        since the subscriptions are closed and none of the files ahve been
        acquired, all of the files should show up
        """
        splitter = SplitterFactory()
        self.multipleFileSubscription.getFileset().markOpen(False)

        self.singleFileSubscription.acquireFiles(
            [self.singleFileSubscription.availableFiles().pop()])
        jobFactory = splitter(self.singleFileSubscription)
        jobGroups = jobFactory(trigger_time=1)
        self.assertEquals(jobGroups, [], "Should have returned a null set")

        self.multipleFileSubscription.getFileset().markOpen(False)
        self.multipleFileSubscription.acquireFiles(
            [self.multipleFileSubscription.availableFiles().pop()])
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.multipleFileSubscription)
        jobGroups = jobFactory(trigger_time=1)
        self.assertEquals(len(jobGroups), 1, "Should have gotten one jobGroup")
        self.assertEquals(len(jobGroups[0].jobs), 1, \
               "JobFactory should have made one job")
        myfiles = jobGroups[0].jobs[0].getFiles()
        self.assertEquals(len(myfiles), 9, \
                "JobFactory should have provides us with 9 files")

        self.multipleLumiSubscription.getFileset().markOpen(False)
        self.multipleLumiSubscription.acquireFiles(
            [self.multipleLumiSubscription.availableFiles().pop()])
        jobFactory = splitter(self.multipleLumiSubscription)
        jobGroups = jobFactory(trigger_time=1)
        self.assertEquals(len(jobGroups), 1, "Should have gotten one jobGroup")
        self.assertEquals(len(jobGroups[0].jobs), 1, \
               "JobFactory should have made one job")
        myfiles = jobGroups[0].jobs[0].getFiles()
        self.assertEquals(len(myfiles), 9, \
                "JobFactory should have provides us with 9 files")

        self.singleLumiSubscription.getFileset().markOpen(False)
        self.singleLumiSubscription.acquireFiles(
            [self.singleLumiSubscription.availableFiles().pop()])
        jobFactory = splitter(self.singleLumiSubscription)
        jobGroups = jobFactory(trigger_time=1)
        self.assertEquals(len(jobGroups), 1, "Should have gotten one jobGroup")
        self.assertEquals(len(jobGroups[0].jobs), 1, \
               "JobFactory should have made one job")
        myfiles = jobGroups[0].jobs[0].getFiles()
        self.assertEquals(len(myfiles), 9, \
                "JobFactory should have provides us with 9 files")

        self.assertEquals(len(myfiles), 9)
Beispiel #47
0
class ExpressMergeTest(unittest.TestCase):
    """
    _ExpressMergeTest_
    Test for ExpressMerge job splitter
    """
    def setUp(self):
        """
        _setUp_
        """
        import WMQuality.TestInit
        WMQuality.TestInit.deleteDatabaseAfterEveryTest("I'm Serious")

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(
            customModules=["WMComponent.DBS3Buffer", "T0.WMBS"])

        self.splitterFactory = SplitterFactory(package="T0.JobSplitting")

        myThread = threading.currentThread()
        daoFactory = DAOFactory(package="T0.WMBS",
                                logger=logging,
                                dbinterface=myThread.dbi)

        myThread.dbi.processData("""INSERT INTO wmbs_location
                                    (id, site_name, state, state_time)
                                    VALUES (1, 'SomeSite', 1, 1)
                                    """,
                                 transaction=False)
        myThread.dbi.processData("""INSERT INTO wmbs_pnns
                                    (id, pnn)
                                    VALUES (2, 'SomePNN')
                                    """,
                                 transaction=False)

        myThread.dbi.processData("""INSERT INTO wmbs_location_pnns
                                    (location, pnn)
                                    VALUES (1, 2)
                                    """,
                                 transaction=False)

        insertRunDAO = daoFactory(classname="RunConfig.InsertRun")
        insertRunDAO.execute(binds={
            'RUN': 1,
            'HLTKEY': "someHLTKey"
        },
                             transaction=False)

        insertLumiDAO = daoFactory(classname="RunConfig.InsertLumiSection")
        for lumi in range(1, 5):
            insertLumiDAO.execute(binds={
                'RUN': 1,
                'LUMI': lumi
            },
                                  transaction=False)

        insertStreamDAO = daoFactory(classname="RunConfig.InsertStream")
        insertStreamDAO.execute(binds={'STREAM': "Express"}, transaction=False)

        insertStreamFilesetDAO = daoFactory(
            classname="RunConfig.InsertStreamFileset")
        insertStreamFilesetDAO.execute(1, "Express", "TestFileset1")

        fileset1 = Fileset(name="TestFileset1")
        self.fileset2 = Fileset(name="TestFileset2")
        fileset1.load()
        self.fileset2.create()

        workflow1 = Workflow(spec="spec.xml",
                             owner="hufnagel",
                             name="TestWorkflow1",
                             task="Test")
        workflow2 = Workflow(spec="spec.xml",
                             owner="hufnagel",
                             name="TestWorkflow2",
                             task="Test")
        workflow1.create()
        workflow2.create()

        self.subscription1 = Subscription(fileset=fileset1,
                                          workflow=workflow1,
                                          split_algo="Express",
                                          type="Express")
        self.subscription2 = Subscription(fileset=self.fileset2,
                                          workflow=workflow2,
                                          split_algo="ExpressMerge",
                                          type="ExpressMerge")
        self.subscription1.create()
        self.subscription2.create()

        myThread.dbi.processData("""INSERT INTO wmbs_workflow_output
                                    (WORKFLOW_ID, OUTPUT_IDENTIFIER, OUTPUT_FILESET)
                                    VALUES (%d, 'SOMEOUTPUT', %d)
                                    """ % (workflow1.id, self.fileset2.id),
                                 transaction=False)

        # keep for later
        self.insertSplitLumisDAO = daoFactory(
            classname="JobSplitting.InsertSplitLumis")

        # default split parameters
        self.splitArgs = {}
        self.splitArgs['maxInputSize'] = 2 * 1024 * 1024 * 1024
        self.splitArgs['maxInputFiles'] = 500,
        self.splitArgs['maxLatency'] = 15 * 23

        return

    def tearDown(self):
        """
        _tearDown_
        """
        self.testInit.clearDatabase()

        return

    def deleteSplitLumis(self):
        """
        _deleteSplitLumis_
        """
        myThread = threading.currentThread()

        myThread.dbi.processData("""DELETE FROM lumi_section_split_active
                                    """,
                                 transaction=False)

        return

    def test00(self):
        """
        _test00_
        Test that the job name prefix feature works
        Test latency trigger (wait and 0)
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxLatency'] = 0
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertTrue(job['name'].startswith("ExpressMerge-"),
                        "ERROR: Job has wrong name")

        return

    def test01(self):
        """
        _test01_
        Test size and event triggers for single lumis (they are ignored)
        Test latency trigger (timed out)
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        mySplitArgs['maxInputSize'] = 1
        mySplitArgs['maxInputFiles'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        time.sleep(1)

        mySplitArgs['maxLatency'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        return

    def test02(self):
        """
        _test02_
        Test input files threshold on multi lumis
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        mySplitArgs['maxInputFiles'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        time.sleep(1)

        mySplitArgs['maxLatency'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        return

    def test03(self):
        """
        _test03_
        Test input size threshold on multi lumis
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        mySplitArgs['maxInputSize'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        time.sleep(1)

        mySplitArgs['maxLatency'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        return

    def test04(self):
        """
        _test04_
        Test multi lumis express merges
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        time.sleep(1)

        mySplitArgs['maxLatency'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        return

    def test05(self):
        """
        _test05_
        Test multi lumis express merges with holes
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2, 4]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        time.sleep(1)

        mySplitArgs['maxLatency'] = 1
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        return

    def test06(self):
        """
        _test06_
        Test active split lumis
        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1]:
            for i in range(2):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomePNN", immediateSave=False)
                newFile.create()
                self.fileset2.addFile(newFile)
        self.fileset2.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription2)

        self.insertSplitLumisDAO.execute(binds={
            'SUB': self.subscription1['id'],
            'LUMI': 1,
            'NFILES': 5
        })

        mySplitArgs['maxLatency'] = 0
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        self.deleteSplitLumis()

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        return
Beispiel #48
0
    def setUp(self):
        """
        _setUp_

        Create two subscriptions: One that contains a single file and one that
        contains multiple files.
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMCore.WMBS",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)

        locationAction = daofactory(classname="Locations.New")
        locationAction.execute(siteName="site1", pnn="T2_CH_CERN")

        self.multipleFileFileset = Fileset(name="TestFileset1")
        self.multipleFileFileset.create()
        for i in range(10):
            newFile = File(makeUUID(),
                           size=1000,
                           events=100,
                           locations="T2_CH_CERN")
            newFile.addRun(Run(i, *[45 + i]))
            newFile.create()
            self.multipleFileFileset.addFile(newFile)
        self.multipleFileFileset.commit()

        self.singleFileFileset = Fileset(name="TestFileset2")
        self.singleFileFileset.create()
        newFile = File("/some/file/name",
                       size=1000,
                       events=100,
                       locations="T2_CH_CERN")
        newFile.addRun(Run(1, *[45]))
        newFile.create()
        self.singleFileFileset.addFile(newFile)
        self.singleFileFileset.commit()

        self.multipleFileRunset = Fileset(name="TestFileset3")
        self.multipleFileRunset.create()
        for i in range(10):
            newFile = File(makeUUID(),
                           size=1000,
                           events=100,
                           locations="T2_CH_CERN")
            newFile.addRun(Run(i / 3, *[45]))
            newFile.create()
            self.multipleFileRunset.addFile(newFile)
        self.multipleFileRunset.commit()

        self.singleRunFileset = Fileset(name="TestFileset4")
        self.singleRunFileset.create()
        for i in range(10):
            newFile = File(makeUUID(),
                           size=1000,
                           events=100,
                           locations="T2_CH_CERN")
            newFile.addRun(Run(1, *[45]))
            newFile.create()
            self.singleRunFileset.addFile(newFile)
        self.singleRunFileset.commit()

        self.singleRunMultipleLumi = Fileset(name="TestFileset5")
        self.singleRunMultipleLumi.create()
        for i in range(10):
            newFile = File(makeUUID(),
                           size=1000,
                           events=100,
                           locations="T2_CH_CERN")
            newFile.addRun(Run(1, *[45 + i]))
            newFile.create()
            self.singleRunMultipleLumi.addFile(newFile)
        self.singleRunMultipleLumi.commit()

        testWorkflow = Workflow(spec="spec.xml",
                                owner="mnorman",
                                name="wf001",
                                task="Test")
        testWorkflow.create()
        self.multipleFileSubscription = Subscription(
            fileset=self.multipleFileFileset,
            workflow=testWorkflow,
            split_algo="RunBased",
            type="Processing")
        self.singleFileSubscription = Subscription(
            fileset=self.singleFileFileset,
            workflow=testWorkflow,
            split_algo="RunBased",
            type="Processing")
        self.multipleRunSubscription = Subscription(
            fileset=self.multipleFileRunset,
            workflow=testWorkflow,
            split_algo="RunBased",
            type="Processing")
        self.singleRunSubscription = Subscription(
            fileset=self.singleRunFileset,
            workflow=testWorkflow,
            split_algo="RunBased",
            type="Processing")
        self.singleRunMultipleLumiSubscription = Subscription(
            fileset=self.singleRunMultipleLumi,
            workflow=testWorkflow,
            split_algo="RunBased",
            type="Processing")

        self.multipleFileSubscription.create()
        self.singleFileSubscription.create()
        self.multipleRunSubscription.create()
        self.singleRunSubscription.create()
        self.singleRunMultipleLumiSubscription.create()

        return
Beispiel #49
0
class EventBasedTest(unittest.TestCase):
    """
    _EventBasedTest_

    Test event based job splitting.
    """
    def setUp(self):
        """
        _setUp_

        Create two subscriptions: One that contains a single file and one that
        contains multiple files.
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        daofactory = DAOFactory(package="WMCore.WMBS",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)

        locationAction = daofactory(classname="Locations.New")
        locationAction.execute(siteName="site1", pnn="T2_CH_CERN")

        self.multipleFileFileset = Fileset(name="TestFileset1")
        self.multipleFileFileset.create()
        for i in range(10):
            newFile = File(makeUUID(),
                           size=1000,
                           events=100,
                           locations="T2_CH_CERN")
            newFile.addRun(Run(i, *[45 + i]))
            newFile.create()
            self.multipleFileFileset.addFile(newFile)
        self.multipleFileFileset.commit()

        self.singleFileFileset = Fileset(name="TestFileset2")
        self.singleFileFileset.create()
        newFile = File("/some/file/name",
                       size=1000,
                       events=100,
                       locations="T2_CH_CERN")
        newFile.addRun(Run(1, *[45]))
        newFile.create()
        self.singleFileFileset.addFile(newFile)
        self.singleFileFileset.commit()

        self.multipleFileRunset = Fileset(name="TestFileset3")
        self.multipleFileRunset.create()
        for i in range(10):
            newFile = File(makeUUID(),
                           size=1000,
                           events=100,
                           locations="T2_CH_CERN")
            newFile.addRun(Run(i / 3, *[45]))
            newFile.create()
            self.multipleFileRunset.addFile(newFile)
        self.multipleFileRunset.commit()

        self.singleRunFileset = Fileset(name="TestFileset4")
        self.singleRunFileset.create()
        for i in range(10):
            newFile = File(makeUUID(),
                           size=1000,
                           events=100,
                           locations="T2_CH_CERN")
            newFile.addRun(Run(1, *[45]))
            newFile.create()
            self.singleRunFileset.addFile(newFile)
        self.singleRunFileset.commit()

        self.singleRunMultipleLumi = Fileset(name="TestFileset5")
        self.singleRunMultipleLumi.create()
        for i in range(10):
            newFile = File(makeUUID(),
                           size=1000,
                           events=100,
                           locations="T2_CH_CERN")
            newFile.addRun(Run(1, *[45 + i]))
            newFile.create()
            self.singleRunMultipleLumi.addFile(newFile)
        self.singleRunMultipleLumi.commit()

        testWorkflow = Workflow(spec="spec.xml",
                                owner="mnorman",
                                name="wf001",
                                task="Test")
        testWorkflow.create()
        self.multipleFileSubscription = Subscription(
            fileset=self.multipleFileFileset,
            workflow=testWorkflow,
            split_algo="RunBased",
            type="Processing")
        self.singleFileSubscription = Subscription(
            fileset=self.singleFileFileset,
            workflow=testWorkflow,
            split_algo="RunBased",
            type="Processing")
        self.multipleRunSubscription = Subscription(
            fileset=self.multipleFileRunset,
            workflow=testWorkflow,
            split_algo="RunBased",
            type="Processing")
        self.singleRunSubscription = Subscription(
            fileset=self.singleRunFileset,
            workflow=testWorkflow,
            split_algo="RunBased",
            type="Processing")
        self.singleRunMultipleLumiSubscription = Subscription(
            fileset=self.singleRunMultipleLumi,
            workflow=testWorkflow,
            split_algo="RunBased",
            type="Processing")

        self.multipleFileSubscription.create()
        self.singleFileSubscription.create()
        self.multipleRunSubscription.create()
        self.singleRunSubscription.create()
        self.singleRunMultipleLumiSubscription.create()

        return

    def tearDown(self):
        """
        _tearDown_

        Tear down WMBS architechture.
        """
        self.testInit.clearDatabase()
        return

    def testExactRuns(self):
        """
        _testExactRuns_

        Test run based job splitting when the number of events per job is
        exactly the same as the number of events in the input file.
        """

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.singleFileSubscription)

        jobGroups = jobFactory(files_per_job=1)

        assert len(jobGroups) == 1, \
               "ERROR: JobFactory didn't return one JobGroup."

        assert len(jobGroups[0].jobs) == 1, \
               "ERROR: JobFactory didn't create a single job."

        job = jobGroups[0].jobs.pop()

        assert job.getFiles(type = "lfn") == ["/some/file/name"], \
               "ERROR: Job contains unknown files."

        return

    def testMoreRuns(self):
        """
        _testMoreEvents_

        Test run based job splitting when the number of runs per job is
        greater than the number of runs in the input file.
        """

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.singleFileSubscription)

        jobGroups = jobFactory(files_per_job=2)

        assert len(jobGroups) == 1, \
               "ERROR: JobFactory didn't return one JobGroup."

        assert len(jobGroups[0].jobs) == 1, \
               "ERROR: JobFactory didn't create a single job."

        job = jobGroups[0].jobs.pop()

        assert job.getFiles(type = "lfn") == ["/some/file/name"], \
               "ERROR: Job contains unknown files."

        return

    def testMultipleRuns(self):
        """
        _testMultipleRuns_

        Test run based job splitting when the number of runs is
        equal to the number in each input file, with multiple files

        """

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.multipleFileSubscription)

        jobGroups = jobFactory(files_per_job=1)

        assert len(jobGroups) == 10, \
               "ERROR: JobFactory didn't return one JobGroup per run."

        assert len(jobGroups[0].jobs) == 1, \
               "ERROR: JobFactory didn't put each run in a file."

        self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 1)

        return

    def testMultipleRunsCombine(self):
        """
        _testMultipleRunsCombine_

        Test run based job splitting when the number of jobs is
        less then the number of files, with multiple files

        """

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.multipleRunSubscription)

        jobGroups = jobFactory(files_per_job=2)



        assert len(jobGroups) == 4, \
               "ERROR: JobFactory didn't return one JobGroup per run."

        assert len(jobGroups[1].jobs) == 2, \
               "ERROR: JobFactory didn't put only one job in the first job"

        #Last one in the queue should have one job, previous two (three files per run)
        self.assertEqual(len(jobGroups[1].jobs.pop().getFiles(type="lfn")), 1)
        self.assertEqual(len(jobGroups[1].jobs.pop().getFiles(type="lfn")), 2)

        return

    def testSingleRunsCombineUneven(self):
        """
        _testSingleRunsCombineUneven_

        Test run based job splitting when the number of jobs is
        less then and indivisible by the number of files, with multiple files.

        """

        #This should return two jobs, one with 8 and one with 2 files

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.singleRunSubscription)

        jobGroups = jobFactory(files_per_job=8)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 2)
        self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 2)
        self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 8)

        return

    def testPersistSingleRunsCombineUneven(self):
        """
        _testPerisistSingleRunsCombineUneven_

        Test run based job splitting when the number of jobs is
        less then and indivisible by the number of files, with multiple files.

        """

        #This should return two jobs, one with 8 and one with 2 files

        splitter = SplitterFactory()
        jobFactory = splitter(package="WMCore.WMBS",
                              subscription=self.singleRunSubscription)

        jobGroups = jobFactory(files_per_job=8)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 2)
        self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 2)
        self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 8)

        return

    def testSingleRunsMultipleLumiCombineUneven(self):
        """
        _testSingleRunsMultipeLumiCombineUneven_

        Test run based job splitting when the number of jobs is
        less then and indivisible by the number of files, with multiple files.

        """

        #This should return two jobs, one with 8 and one with 2 files

        splitter = SplitterFactory()
        jobFactory = splitter(
            package="WMCore.WMBS",
            subscription=self.singleRunMultipleLumiSubscription)

        jobGroups = jobFactory(files_per_job=8)

        self.assertEqual(len(jobGroups), 1)
        self.assertEqual(len(jobGroups[0].jobs), 2)
        self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 2)
        self.assertEqual(len(jobGroups[0].jobs.pop().getFiles(type="lfn")), 8)

        return
Beispiel #50
0
class Scram_t(unittest.TestCase):
    def setUp(self):
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testDir = self.testInit.generateWorkDir()
        self.oldCwd = os.getcwd()
        if PY3:
            self.assertItemsEqual = self.assertCountEqual

    def tearDown(self):
        self.testInit.delWorkDir()

    def testA(self):
        """
        instantiate a Scram instance in test mode.
        """
        try:
            Scram(initialise="/bin/date",
                  architecture="slc5_amd64_gcc454",
                  version="CMSSW_X_Y_Z",
                  test=True)
        except Exception as ex:
            msg = "Failed to instantiate Scram in test mode:\n %s " % str(ex)
            self.fail(msg)

    def testB(self):
        """
        instantiante a Scram instance in non-test mode
        limited what we can test here since we dont have scram etc in unittest env
        """
        try:
            Scram(initialise="/bin/date",
                  architecture="slc5_amd64_gcc454",
                  version="CMSSW_X_Y_Z")
        except Exception as ex:
            msg = "Failed to instantiate Scram:\n %s " % str(ex)
            self.fail(msg)

    def testC(self):
        """
        test all method calls in test mode

        """
        s = Scram(initialise="/bin/date",
                  architecture="slc5_amd64_gcc454",
                  version="CMSSW_X_Y_Z",
                  directory=self.testDir,
                  test=True)

        try:
            status = s.project()
        except Exception as ex:
            msg = "Error running Scram.project:\n %s" % str(ex)
            self.fail(msg)

        self.assertEqual(status, 0)
        self.assertTrue(os.path.exists(s.projectArea))
        self.assertTrue("project" in s.lastExecuted)
        self.assertTrue("CMSSW_X_Y_Z" in s.lastExecuted)

        try:
            status = s.runtime()
        except Exception as ex:
            msg = "Error running Scram.runtime:\n %s" % str(ex)
            self.fail(msg)

        self.assertEqual(status, 0)
        self.assertTrue("ru -sh" in s.lastExecuted)
        self.assertTrue("TEST_MODE" in s.runtimeEnv)

        comm = "echo \"Hello World\""
        try:
            status = s(comm)
        except Exception as ex:
            msg = "Failed to call Scram object:\n %s" % str(ex)

        self.assertEqual(status, 0)
        self.assertEqual(s.lastExecuted, comm)

    def testArchMap(self):
        self.assertItemsEqual(OS_TO_ARCH['rhel6'], ['slc5', 'slc6'])
        self.assertItemsEqual(OS_TO_ARCH['rhel7'], ['slc7'])
        self.assertItemsEqual(ARCH_TO_OS['slc6'], ['rhel6'])
        self.assertItemsEqual(ARCH_TO_OS['slc7'], ['rhel7'])

    def testScramArchParsing(self):
        """
        Test the various modes of parsing for the scram arch
        """
        try:
            os.chdir(self.testDir)
            with tempfile.NamedTemporaryFile() as tf:
                tf.write(b'GLIDEIN_REQUIRED_OS = "rhel6" \n')
                tf.write(b'Memory = 2048\n')
                tf.flush()
                with tmpEnv(_CONDOR_MACHINE_AD=tf.name):
                    self.assertEqual(getSingleScramArch('slc6_blah_blah'),
                                     'slc6_blah_blah')
                    self.assertEqual(getSingleScramArch('slc5_blah_blah'),
                                     'slc5_blah_blah')
                    self.assertEqual(
                        getSingleScramArch(
                            ['slc6_blah_blah', 'slc7_blah_blah']),
                        'slc6_blah_blah')
                    self.assertEqual(
                        getSingleScramArch(
                            ['slc6_blah_blah', 'slc5_blah_blah']),
                        'slc6_blah_blah')
                    self.assertEqual(
                        getSingleScramArch(
                            ['slc7_blah_blah', 'slc8_blah_blah']), None)
            with tempfile.NamedTemporaryFile() as tf:
                tf.write(b'GLIDEIN_REQUIRED_OS = "rhel7" \n')
                tf.write(b'Memory = 2048\n')
                tf.flush()
                with tmpEnv(_CONDOR_MACHINE_AD=tf.name):
                    self.assertEqual(getSingleScramArch('slc6_blah_blah'),
                                     'slc6_blah_blah')
                    self.assertEqual(getSingleScramArch('slc7_blah_blah'),
                                     'slc7_blah_blah')
                    self.assertEqual(
                        getSingleScramArch(
                            ['slc6_blah_blah', 'slc7_blah_blah']),
                        'slc7_blah_blah')
                    self.assertEqual(
                        getSingleScramArch(
                            ['slc6_blah_blah', 'slc5_blah_blah']), None)
                    self.assertEqual(
                        getSingleScramArch(
                            ['slc7_blah_blah', 'slc8_blah_blah']),
                        'slc7_blah_blah')
        except Exception:
            raise
        finally:
            os.chdir(self.oldCwd)
        return

    def testCMSSWSupported(self):
        """
        Test the functionality of isCMSSWSupported function
        """
        self.assertFalse(isCMSSWSupported('CMSSW_1_2_3', ''))
        self.assertFalse(isCMSSWSupported(None, 'a'))
        self.assertFalse(isCMSSWSupported('CMSSW_1_2_3', 'CMSSW_2_2_3'))
        self.assertFalse(isCMSSWSupported('CMSSW_1_2_3', 'CMSSW_1_3_3'))
        self.assertFalse(isCMSSWSupported('CMSSW_1_2_3', 'CMSSW_1_2_4'))
        self.assertFalse(isCMSSWSupported('CMSSW_1_2_3_pre1', 'CMSSW_1_2_3'))
        self.assertFalse(isCMSSWSupported('CMSSW_1_2_3', 'CMSSW_1_2_3_pre1'))
        self.assertFalse(
            isCMSSWSupported('CMSSW_1_2_3_pre1', 'CMSSW_1_2_3_pre2'))
        self.assertFalse(
            isCMSSWSupported('CMSSW_1_2_3_pre2', 'CMSSW_1_2_3_pre1'))
        self.assertFalse(isCMSSWSupported('CMSSW_7_1_25_patch2',
                                          'CMSSW_7_6_0'))
        self.assertFalse(isCMSSWSupported('CMSSW_7_3_2', 'CMSSW_10_4_0'))

        self.assertTrue(
            isCMSSWSupported('CMSSW_1_2_3_pre1', 'CMSSW_1_2_3_pre1'))
        self.assertTrue(isCMSSWSupported('CMSSW_1_2_3', 'CMSSW_1_2_3'))
        self.assertTrue(isCMSSWSupported('CMSSW_2_2_3', 'CMSSW_1_2_3'))
        self.assertTrue(isCMSSWSupported('CMSSW_1_3_3', 'CMSSW_1_2_3'))
        self.assertTrue(isCMSSWSupported('CMSSW_1_2_4', 'CMSSW_1_2_3'))

    def testisEnforceGUIDInFileNameSupported(self):
        """
        Test functionality of the `isEnforceGUIDInFileNameSupported` function
        """
        ### invalid input
        self.assertFalse(isEnforceGUIDInFileNameSupported(None))
        self.assertFalse(isEnforceGUIDInFileNameSupported(''))

        ### forever supported
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_11_0_0'))
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_11_0_2'))
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_11_1_0_pre1'))
        self.assertTrue(
            isEnforceGUIDInFileNameSupported('CMSSW_11_1_0_patch1'))
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_11_1_1'))

        ### specific releases
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_10_2_20_UL'))
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_9_4_16_UL'))
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_8_0_34_UL'))
        self.assertTrue(
            isEnforceGUIDInFileNameSupported('CMSSW_7_1_45_patch3'))

        ### minor supported releases
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_10_6_8'))
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_10_6_9'))
        self.assertTrue(
            isEnforceGUIDInFileNameSupported('CMSSW_10_6_8_patch1'))
        self.assertTrue(
            isEnforceGUIDInFileNameSupported('CMSSW_10_6_9_patch1'))
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_10_2_20'))
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_9_4_16'))
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_9_3_17'))
        self.assertTrue(isEnforceGUIDInFileNameSupported('CMSSW_8_0_34'))

        ### releases not supported
        self.assertFalse(isEnforceGUIDInFileNameSupported('CMSSW_10_6_7'))
        self.assertFalse(isEnforceGUIDInFileNameSupported('CMSSW_10_7_0'))
        self.assertFalse(isEnforceGUIDInFileNameSupported('CMSSW_10_2_19'))
        self.assertFalse(isEnforceGUIDInFileNameSupported('CMSSW_10_3_10'))
        self.assertFalse(isEnforceGUIDInFileNameSupported('CMSSW_5_3_10'))
Beispiel #51
0
class DBSUploadTest(unittest.TestCase):
    """
    TestCase for DBSUpload module

    """
    def setUp(self):
        """
        _setUp_

        setUp function for unittest

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMComponent.DBS3Buffer"],
                                useDefault = False)
        self.testDir = self.testInit.generateWorkDir(deleteOnDestruction = False)
        self.configFile = EmulatorSetup.setupWMAgentConfig()

        myThread = threading.currentThread()
        self.bufferFactory = DAOFactory(package = "WMComponent.DBS3Buffer",
                                         logger = myThread.logger,
                                         dbinterface = myThread.dbi)

        locationAction = self.bufferFactory(classname = "DBSBufferFiles.AddLocation")
        locationAction.execute(siteName = "se1.cern.ch")
        locationAction.execute(siteName = "se1.fnal.gov")
        locationAction.execute(siteName = "malpaquet")
        self.dbsUrl = "https://*****:*****@attr("integration")
    def testBasicUpload(self):
        """
        _testBasicUpload_

        Verify that we can successfully upload to DBS3.  Also verify that the
        uploader correctly handles files parentage when uploading.
        """
        self.dbsApi = DbsApi(url = self.dbsUrl)
        config = self.getConfig()
        dbsUploader = DBSUploadPoller(config = config)

        # First test verifies that uploader will poll and then not do anything
        # as the database is empty.
        dbsUploader.algorithm()

        acqEra = "Summer%s" % (int(time.time()))
        parentFiles = self.createParentFiles(acqEra)

        # The algorithm needs to be run twice.  On the first iteration it will
        # create all the blocks and upload one.  On the second iteration it will
        # timeout and upload the second block.
        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        # Verify the files made it into DBS3.
        self.verifyData(parentFiles[0]["datasetPath"], parentFiles)

        # Inject some more parent files and some child files into DBSBuffer.
        # Run the uploader twice, only the parent files should be added to DBS3.
        (moreParentFiles, childFiles) = \
                          self.createFilesWithChildren(parentFiles, acqEra)
        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(parentFiles[0]["datasetPath"],
                        parentFiles + moreParentFiles)

        # Run the uploader another two times to upload the child files.  Verify
        # that the child files were uploaded.
        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(childFiles[0]["datasetPath"], childFiles)
        return

    @attr("integration")
    def testDualUpload(self):
        """
        _testDualUpload_

        Verify that the dual upload mode works correctly.
        """
        self.dbsApi = DbsApi(url = self.dbsUrl)
        config = self.getConfig()
        dbsUploader = DBSUploadPoller(config = config)
        dbsUtil = DBSBufferUtil()

        # First test verifies that uploader will poll and then not do anything
        # as the database is empty.
        dbsUploader.algorithm()

        acqEra = "Summer%s" % (int(time.time()))
        parentFiles = self.createParentFiles(acqEra)
        (moreParentFiles, childFiles) = \
                          self.createFilesWithChildren(parentFiles, acqEra)

        allFiles = parentFiles + moreParentFiles
        allBlocks = []
        for i in range(4):
            DBSBufferDataset(parentFiles[0]["datasetPath"]).create()
            blockName = parentFiles[0]["datasetPath"] + "#" + makeUUID()
            dbsBlock = DBSBufferBlock(blockName,
                                      location = "malpaquet",
                                      datasetpath =  None)
            dbsBlock.status = "Open"
            dbsBlock.setDataset(parentFiles[0]["datasetPath"], 'data', 'VALID')
            dbsUtil.createBlocks([dbsBlock])
            for file in allFiles[i * 5 : (i * 5) + 5]:
                dbsBlock.addFile(file, 'data', 'VALID')
                dbsUtil.setBlockFiles({"block": blockName, "filelfn": file["lfn"]})
                if i < 2:
                    dbsBlock.status = "InDBS"
                dbsUtil.updateBlocks([dbsBlock])
            dbsUtil.updateFileStatus([dbsBlock], "InDBS")
            allBlocks.append(dbsBlock)

        DBSBufferDataset(childFiles[0]["datasetPath"]).create()
        blockName = childFiles[0]["datasetPath"] + "#" + makeUUID()
        dbsBlock = DBSBufferBlock(blockName,
                                  location = "malpaquet",
                                  datasetpath =  None)
        dbsBlock.status = "InDBS"
        dbsBlock.setDataset(childFiles[0]["datasetPath"], 'data', 'VALID')
        dbsUtil.createBlocks([dbsBlock])
        for file in childFiles:
            dbsBlock.addFile(file, 'data', 'VALID')
            dbsUtil.setBlockFiles({"block": blockName, "filelfn": file["lfn"]})

        dbsUtil.updateFileStatus([dbsBlock], "InDBS")

        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(parentFiles[0]["datasetPath"], parentFiles)

        # Change the status of the rest of the parent blocks so we can upload
        # them and the children.
        for dbsBlock in allBlocks:
            dbsBlock.status = "InDBS"
            dbsUtil.updateBlocks([dbsBlock])

        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(parentFiles[0]["datasetPath"], parentFiles + moreParentFiles)

        # Run the uploader one more time to upload the children.
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(childFiles[0]["datasetPath"], childFiles)
        return

    def testCloseSettingsPerWorkflow(self):
        """
        _testCloseSettingsPerWorkflow_

        Test the block closing mechanics in the DBS3 uploader,
        this uses a fake dbs api to avoid reliance on external services.
        """
        # Signal trapExit that we are a friend
        os.environ["DONT_TRAP_EXIT"] = "True"
        try:
            # Monkey patch the imports of DbsApi
            from WMComponent.DBS3Buffer import DBSUploadPoller as MockDBSUploadPoller
            MockDBSUploadPoller.DbsApi = MockDbsApi

            # Set the poller and the dbsUtil for verification
            myThread = threading.currentThread()
            (_, dbsFilePath) = mkstemp(dir = self.testDir)
            self.dbsUrl = dbsFilePath
            config = self.getConfig()
            dbsUploader = MockDBSUploadPoller.DBSUploadPoller(config = config)
            dbsUtil = DBSBufferUtil()

            # First test is event based limits and timeout with no new files.
            # Set the files and workflow
            acqEra = "TropicalSeason%s" % (int(time.time()))
            workflowName = 'TestWorkload%s' % (int(time.time()))
            taskPath = '/%s/TestProcessing' % workflowName
            self.injectWorkflow(workflowName, taskPath,
                                MaxWaitTime = 2, MaxFiles = 100,
                                MaxEvents = 150)
            self.createParentFiles(acqEra, nFiles = 20,
                                   workflowName = workflowName,
                                   taskPath = taskPath)

            # The algorithm needs to be run twice.  On the first iteration it will
            # create all the blocks and upload one with less than 150 events.
            # On the second iteration the second block is uploaded.
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 1)
            globalFiles = myThread.dbi.processData("SELECT id FROM dbsbuffer_file WHERE status = 'InDBS'")[0].fetchall()
            notUploadedFiles = myThread.dbi.processData("SELECT id FROM dbsbuffer_file WHERE status = 'NOTUPLOADED'")[0].fetchall()
            self.assertEqual(len(globalFiles), 14)
            self.assertEqual(len(notUploadedFiles), 6)
            # Check the fake DBS for data
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 2)
            for block in fakeDBSInfo:
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['file_count'], 7)
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)
            time.sleep(3)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 0)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 3)
            for block in fakeDBSInfo:
                if block['block']['file_count'] != 6:
                    self.assertEqual(block['block']['file_count'], 7)
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)

            # Now check the limit by size and timeout with new files
            acqEra = "TropicalSeason%s" % (int(time.time()))
            workflowName = 'TestWorkload%s' % (int(time.time()))
            taskPath = '/%s/TestProcessing' % workflowName
            self.injectWorkflow(workflowName, taskPath,
                                MaxWaitTime = 2, MaxFiles = 5,
                                MaxEvents = 200000000)
            self.createParentFiles(acqEra, nFiles = 16,
                                   workflowName = workflowName,
                                   taskPath = taskPath)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 1)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 6)
            for block in fakeDBSInfo:
                if acqEra in block['block']['block_name']:
                    self.assertEqual(block['block']['file_count'], 5)
                self.assertTrue('block_events' not in block['block'])
                self.assertTrue('close_settings' not in block)
                self.assertEqual(block['block']['open_for_writing'], 0)

            # Put more files, they will go into the same block and then it will be closed
            # after timeout
            time.sleep(3)
            self.createParentFiles(acqEra, nFiles = 3,
                                   workflowName = workflowName,
                                   taskPath = taskPath)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 0)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 7)
            for block in fakeDBSInfo:
                if acqEra in block['block']['block_name']:
                    if block['block']['file_count'] < 5:
                        self.assertEqual(block['block']['file_count'], 4)
                    else:
                        self.assertEqual(block['block']['file_count'], 5)
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)

            # Finally test size limits
            acqEra = "TropicalSeason%s" % (int(time.time()))
            workflowName = 'TestWorkload%s' % (int(time.time()))
            taskPath = '/%s/TestProcessing' % workflowName
            self.injectWorkflow(workflowName, taskPath,
                                MaxWaitTime = 1, MaxFiles = 500,
                                MaxEvents = 200000000, MaxSize = 2048)
            self.createParentFiles(acqEra, nFiles = 7,
                                   workflowName = workflowName,
                                   taskPath = taskPath)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            time.sleep(2)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()

            self.assertEqual(len(openBlocks), 0)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 11)
            for block in fakeDBSInfo:
                if acqEra in block['block']['block_name']:
                    if block['block']['file_count'] != 1:
                        self.assertEqual(block['block']['block_size'], 2048)
                        self.assertEqual(block['block']['file_count'], 2)
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)
        except:
            self.fail("We failed at some point in the test")
        finally:
            # We don't trust anyone else with _exit
            del os.environ["DONT_TRAP_EXIT"]
        return
Beispiel #52
0
    def setUp(self):
        """
        _setUp_

        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules = ["T0.WMBS"])

        self.splitterFactory = SplitterFactory(package = "T0.JobSplitting")

        myThread = threading.currentThread()
        daoFactory = DAOFactory(package = "T0.WMBS",
                                logger = logging,
                                dbinterface = myThread.dbi)

        wmbsDaoFactory = DAOFactory(package = "WMCore.WMBS",
                                    logger = logging,
                                    dbinterface = myThread.dbi)

        myThread.dbi.processData("""INSERT INTO wmbs_location
                                    (id, site_name, state)
                                    VALUES (1, 'SomeSite', 1)
                                    """, transaction = False)
        myThread.dbi.processData("""INSERT INTO wmbs_location_senames
                                    (location, se_name)
                                    VALUES (1, 'SomeSE')
                                    """, transaction = False)

        insertRunDAO = daoFactory(classname = "RunConfig.InsertRun")
        insertRunDAO.execute(binds = { 'RUN' : 1,
                                       'TIME' : int(time.time()),
                                       'HLTKEY' : "someHLTKey" },
                             transaction = False)

        insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection")
        insertLumiDAO.execute(binds = { 'RUN' : 1,
                                        'LUMI' : 1 },
                              transaction = False)

        insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream")
        insertStreamDAO.execute(binds = { 'STREAM' : "Express" },
                                transaction = False)

        insertStreamFilesetDAO = daoFactory(classname = "RunConfig.InsertStreamFileset")
        insertStreamFilesetDAO.execute(1, "Express", "TestFileset1")

        insertStreamerDAO = daoFactory(classname = "RunConfig.InsertStreamer")
        insertStreamerDAO.execute(binds = { 'RUN' : 1,
                                            'LUMI' : 1,
                                            'STREAM' : "Express",
                                            'TIME' : int(time.time()),
                                            'LFN' : "/streamer",
                                            'FILESIZE' : 0,
                                            'EVENTS' : 0 },
                                  transaction = False)

        insertPromptCalibrationDAO = daoFactory(classname = "RunConfig.InsertPromptCalibration")
        insertPromptCalibrationDAO.execute( { 'RUN' : 1,
                                              'STREAM' : "Express" },
                                            transaction = False)

        self.fileset1 = Fileset(name = "TestFileset1")
        self.fileset1.create()

        workflow1 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow1", task="Test")
        workflow1.create()

        self.subscription1  = Subscription(fileset = self.fileset1,
                                           workflow = workflow1,
                                           split_algo = "Condition",
                                           type = "Condition")
        self.subscription1.create()

        # set parentage chain and sqlite fileset
        alcaRecoFile = File("/alcareco", size = 0, events = 0)
        alcaRecoFile.addRun(Run(1, *[1]))
        alcaRecoFile.setLocation("SomeSE", immediateSave = False)
        alcaRecoFile.create()
        alcaPromptFile = File("/alcaprompt", size = 0, events = 0)
        alcaPromptFile.addRun(Run(1, *[1]))
        alcaPromptFile.setLocation("SomeSE", immediateSave = False)
        alcaPromptFile.create()
        sqliteFile = File("/sqlite", size = 0, events = 0)
        sqliteFile.create()
        self.fileset1.addFile(sqliteFile)
        self.fileset1.commit()

        results = myThread.dbi.processData("""SELECT lfn FROM wmbs_file_details
                                              """,
                                           transaction = False)[0].fetchall()

        setParentageDAO = wmbsDaoFactory(classname = "Files.SetParentage")
        setParentageDAO.execute(binds = [ { 'parent' : "/streamer",
                                            'child' : "/alcareco" },
                                          { 'parent' : "/alcareco",
                                            'child' : "/alcaprompt" },
                                          { 'parent' : "/alcaprompt",
                                            'child' : "/sqlite" } ],
                                transaction = False)

        # default split parameters
        self.splitArgs = {}
        self.splitArgs['runNumber'] = 1
        self.splitArgs['streamName'] = "Express"

        return
Beispiel #53
0
class StoreResultsTest(unittest.TestCase):
    def setUp(self):
        """
        _setUp_

        Initialize the database.
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMCore.WMBS"],
                                useDefault=False)
        self.testDir = self.testInit.generateWorkDir()

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMCore.WMBS",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)
        self.listTasksByWorkflow = self.daoFactory(
            classname="Workflow.LoadFromName")
        self.listFilesets = self.daoFactory(classname="Fileset.List")
        self.listSubsMapping = self.daoFactory(
            classname="Subscriptions.ListSubsAndFilesetsFromWorkflow")

        return

    def tearDown(self):
        """
        _tearDown_

        Clear out the database.
        """
        self.testInit.clearDatabase()
        self.testInit.delWorkDir()
        return

    def testStoreResults(self):
        """
        _testStoreResults_

        Create a StoreResults workflow and verify it installs into WMBS
        correctly.
        """
        arguments = StoreResultsWorkloadFactory.getTestArguments()

        factory = StoreResultsWorkloadFactory()
        testWorkload = factory.factoryWorkloadConstruction(
            "TestWorkload", arguments)

        testWMBSHelper = WMBSHelper(testWorkload,
                                    "StoreResults",
                                    "SomeBlock",
                                    cachepath=self.testDir)
        testWMBSHelper.createTopLevelFileset()
        testWMBSHelper._createSubscriptionsInWMBS(
            testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset)

        testWorkflow = Workflow(name="TestWorkload",
                                task="/TestWorkload/StoreResults")
        testWorkflow.load()

        self.assertEqual(len(testWorkflow.outputMap.keys()), 2,
                         "Error: Wrong number of WF outputs.")
        goldenOutputMods = {"Merged": "USER"}
        for goldenOutputMod, tier in goldenOutputMods.items():
            fset = goldenOutputMod + tier
            mergedOutput = testWorkflow.outputMap[fset][0][
                "merged_output_fileset"]
            unmergedOutput = testWorkflow.outputMap[fset][0]["output_fileset"]

            mergedOutput.loadData()
            unmergedOutput.loadData()

            self.assertEqual(
                mergedOutput.name,
                "/TestWorkload/StoreResults/merged-%s" % fset,
                "Error: Merged output fileset is wrong: %s" %
                mergedOutput.name)
            self.assertEqual(
                unmergedOutput.name,
                "/TestWorkload/StoreResults/merged-%s" % fset,
                "Error: Unmerged output fileset is wrong: %s." %
                unmergedOutput.name)

        logArchOutput = testWorkflow.outputMap["logArchive"][0][
            "merged_output_fileset"]
        unmergedLogArchOutput = testWorkflow.outputMap["logArchive"][0][
            "output_fileset"]
        logArchOutput.loadData()
        unmergedLogArchOutput.loadData()

        self.assertEqual(logArchOutput.name,
                         "/TestWorkload/StoreResults/merged-logArchive",
                         "Error: LogArchive output fileset is wrong.")
        self.assertEqual(unmergedLogArchOutput.name,
                         "/TestWorkload/StoreResults/merged-logArchive",
                         "Error: LogArchive output fileset is wrong.")

        topLevelFileset = Fileset(name="TestWorkload-StoreResults-SomeBlock")
        topLevelFileset.loadData()

        procSubscription = Subscription(fileset=topLevelFileset,
                                        workflow=testWorkflow)
        procSubscription.loadData()

        self.assertEqual(procSubscription["type"], "Merge",
                         "Error: Wrong subscription type.")
        self.assertEqual(procSubscription["split_algo"],
                         "ParentlessMergeBySize", "Error: Wrong split algo.")

        return

    def testFilesets(self):
        """
        Test workflow tasks, filesets and subscriptions creation
        """
        # expected tasks, filesets, subscriptions, etc
        expOutTasks = ['/TestWorkload/StoreResults']
        expWfTasks = [
            '/TestWorkload/StoreResults',
            '/TestWorkload/StoreResults/StoreResultsLogCollect'
        ]
        expFsets = [
            'TestWorkload-StoreResults-/MinimumBias/ComissioningHI-v1/RAW',
            '/TestWorkload/StoreResults/merged-MergedUSER',
            '/TestWorkload/StoreResults/merged-logArchive'
        ]
        subMaps = [
            (2, '/TestWorkload/StoreResults/merged-logArchive',
             '/TestWorkload/StoreResults/StoreResultsLogCollect',
             'MinFileBased', 'LogCollect'),
            (1, 'TestWorkload-StoreResults-/MinimumBias/ComissioningHI-v1/RAW',
             '/TestWorkload/StoreResults', 'ParentlessMergeBySize', 'Merge')
        ]

        testArguments = StoreResultsWorkloadFactory.getTestArguments()

        factory = StoreResultsWorkloadFactory()
        testWorkload = factory.factoryWorkloadConstruction(
            "TestWorkload", testArguments)

        testWMBSHelper = WMBSHelper(testWorkload,
                                    "StoreResults",
                                    blockName=testArguments['InputDataset'],
                                    cachepath=self.testInit.testDir)
        testWMBSHelper.createTopLevelFileset()
        testWMBSHelper._createSubscriptionsInWMBS(
            testWMBSHelper.topLevelTask, testWMBSHelper.topLevelFileset)

        self.assertItemsEqual(testWorkload.listOutputProducingTasks(),
                              expOutTasks)

        workflows = self.listTasksByWorkflow.execute(workflow="TestWorkload")
        self.assertItemsEqual([item['task'] for item in workflows], expWfTasks)

        # returns a tuple of id, name, open and last_update
        filesets = self.listFilesets.execute()
        self.assertItemsEqual([item[1] for item in filesets], expFsets)

        subscriptions = self.listSubsMapping.execute(workflow="TestWorkload",
                                                     returnTuple=True)
        self.assertItemsEqual(subscriptions, subMaps)
Beispiel #54
0
class ThreadPoolTest(unittest.TestCase):
    """
    _ThreadPool_t_

    Unit tests for threadpool

    """
    _nrOfThreads = 10
    _nrOfPools = 5

    def setUp(self):
        "make a logger instance and create tables"

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema()

    def tearDown(self):
        """
        Deletion of database
        """
        # FIXME: this might not work if your not using socket.

        self.testInit.clearDatabase()

    def testA(self):
        """
        __testSubscribe__

        Test subscription of a component.
        """
        raise nose.SkipTest

        myThread = threading.currentThread()
        # create a 'fake' component that contains a arg dictionary.
        component = Dummy()
        # load default parameters.
        config = self.testInit.getConfiguration()
        # normally assigned by the harness of the test component.
        config.Agent.componentName = "TestComponent"

        component.config = config

        threadPools = []
        for i in xrange(0, ThreadPoolTest._nrOfPools):
            threadPool = ThreadPool("WMCore.ThreadPool.ThreadSlave", \
                component, 'MyPool_'+str(i), ThreadPoolTest._nrOfThreads)
            threadPools.append(threadPool)

        # this is how you would use the threadpool. The threadpool retrieves
        # events/payloads from the message service. If a thread is available
        # it is dispatched, otherwise it is stored in the trheadpool.
        # make the number of tasks bigger than number of threads to tesT
        # the persistent queue.
        for i in xrange(0, ThreadPoolTest._nrOfThreads * 10):
            event = 'eventNr_' + str(i)
            payload = 'payloadNr_' + str(i)
            # normally you would have different events per threadpool and
            # even different objects per pool. the payload part will be
            # pickled into the database enabling flexibility in passing
            # information.
            for j in xrange(0, ThreadPoolTest._nrOfPools):
                threadPools[j].enqueue(event, \
                    {'event' : event, 'payload' : payload})

        # this commit you want to be in the agent harness, so the message is
        # actual removed from the msgService. we can do this as the threadpool
        # acts as a dispatcher and is a shortlived action: dispatch to thread
        # or queue and tell agent harness it is finished.
        finished = False
        timeout = 60  # secs
        currenttime = 0
        while not finished:
            print('waiting for threads to finishs. Work left:')
            for j in xrange(0, ThreadPoolTest._nrOfPools):
                print('pool_' + str(j) + ':' + str(threadPools[j].callQueue))
            time.sleep(1)
            finished = True
            currenttime += 1
            if (timeout == currenttime):
                raise RuntimeError
            for j in xrange(0, ThreadPoolTest._nrOfPools):
                if (len(threadPools[j].resultsQueue) <
                        ThreadPoolTest._nrOfThreads * 10):
                    finished = False
                    break
        # check if the tables are really empty and all messages
        # have been processed.
        for j in xrange(0, ThreadPoolTest._nrOfPools):
            assert len(threadPools[j].resultsQueue) == \
                ThreadPoolTest._nrOfThreads*10
        myThread.transaction.begin()
        for j in xrange(0, ThreadPoolTest._nrOfPools):
            self.assertEqual(threadPools[j].countMessages(), 0)
        myThread.transaction.commit()
Beispiel #55
0
class RepackTest(unittest.TestCase):
    """
    _RepackTest_

    Test for Repack job splitter
    """
    def setUp(self):
        """
        _setUp_

        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules=["T0.WMBS"])

        self.splitterFactory = SplitterFactory(package="T0.JobSplitting")

        myThread = threading.currentThread()
        daoFactory = DAOFactory(package="T0.WMBS",
                                logger=logging,
                                dbinterface=myThread.dbi)

        myThread.dbi.processData("""INSERT INTO wmbs_location
                                    (id, site_name, state)
                                    VALUES (1, 'SomeSite', 1)
                                    """,
                                 transaction=False)
        myThread.dbi.processData("""INSERT INTO wmbs_location_senames
                                    (location, se_name)
                                    VALUES (1, 'SomeSE')
                                    """,
                                 transaction=False)

        myThread.dbi.processData("""INSERT INTO wmbs_location_senames
                                    (location, se_name)
                                    VALUES (1, 'SomeSE2')
                                    """,
                                 transaction=False)

        insertRunDAO = daoFactory(classname="RunConfig.InsertRun")
        insertRunDAO.execute(binds={
            'RUN': 1,
            'TIME': int(time.time()),
            'HLTKEY': "someHLTKey"
        },
                             transaction=False)

        insertLumiDAO = daoFactory(classname="RunConfig.InsertLumiSection")
        for lumi in [1, 2, 3, 4]:
            insertLumiDAO.execute(binds={
                'RUN': 1,
                'LUMI': lumi
            },
                                  transaction=False)

        insertStreamDAO = daoFactory(classname="RunConfig.InsertStream")
        insertStreamDAO.execute(binds={'STREAM': "A"}, transaction=False)

        insertStreamFilesetDAO = daoFactory(
            classname="RunConfig.InsertStreamFileset")
        insertStreamFilesetDAO.execute(1, "A", "TestFileset1")

        self.fileset1 = Fileset(name="TestFileset1")
        self.fileset1.load()

        workflow1 = Workflow(spec="spec.xml",
                             owner="hufnagel",
                             name="TestWorkflow1",
                             task="Test")
        workflow1.create()

        self.subscription1 = Subscription(fileset=self.fileset1,
                                          workflow=workflow1,
                                          split_algo="Repack",
                                          type="Repack")
        self.subscription1.create()

        # keep for later
        self.insertClosedLumiDAO = daoFactory(
            classname="RunLumiCloseout.InsertClosedLumi")
        self.currentTime = int(time.time())

        # default split parameters
        self.splitArgs = {}
        self.splitArgs['maxSizeSingleLumi'] = 20 * 1024 * 1024 * 1024
        self.splitArgs['maxSizeMultiLumi'] = 10 * 1024 * 1024 * 1024
        self.splitArgs['maxInputEvents'] = 500000
        self.splitArgs['maxInputFiles'] = 1000

        return

    def tearDown(self):
        """
        _tearDown_

        """
        self.testInit.clearDatabase()

        return

    def getNumActiveSplitLumis(self):
        """
        _getNumActiveSplitLumis_

        helper function that counts the number of active split lumis
        """
        myThread = threading.currentThread()

        results = myThread.dbi.processData("""SELECT COUNT(*)
                                              FROM lumi_section_split_active
                                              """,
                                           transaction=False)[0].fetchall()

        return results[0][0]

    def test00(self):
        """
        _test00_

        Test that the job name prefix feature works
        Test multi lumi size threshold
        Multi lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        for lumi in [1, 2, 3, 4]:
            filecount = 2
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomeSE", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)

        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        mySplitArgs['maxSizeMultiLumi'] = self.splitArgs['maxSizeMultiLumi']
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxSizeMultiLumi'] = 5000
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertTrue(job['name'].startswith("Repack-"),
                        "ERROR: Job has wrong name")

        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.fileset1.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertTrue(job['name'].startswith("Repack-"),
                        "ERROR: Job has wrong name")

        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.assertEqual(self.getNumActiveSplitLumis(), 0,
                         "ERROR: Split lumis were created")

        return

    def test01(self):
        """
        _test01_

        Test multi lumi event threshold
        Multi lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1, 2, 3, 4]:
            filecount = 2
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomeSE", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputEvents'] = 500
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.fileset1.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.assertEqual(self.getNumActiveSplitLumis(), 0,
                         "ERROR: Split lumis were created")

        return

    def test02(self):
        """
        _test02_

        Test single lumi size threshold
        Single lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1]:
            filecount = 8
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomeSE", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxSizeSingleLumi'] = 6500
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 6,
                         "ERROR: Job does not process 6 files")

        job = jobGroups[0].jobs[1]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        self.assertEqual(self.getNumActiveSplitLumis(), 1,
                         "ERROR: Split lumis were not created")

        return

    def test03(self):
        """
        _test03_

        Test single lumi event threshold
        Single lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1]:
            filecount = 8
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomeSE", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputEvents'] = 650
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 2,
                         "ERROR: JobFactory didn't create two jobs")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 6,
                         "ERROR: Job does not process 6 files")

        job = jobGroups[0].jobs[1]
        self.assertEqual(len(job.getFiles()), 2,
                         "ERROR: Job does not process 2 files")

        self.assertEqual(self.getNumActiveSplitLumis(), 1,
                         "ERROR: Split lumis were not created")

        return

    def test04(self):
        """
        _test04_

        Test streamer count threshold (only multi lumi)
        Multi lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1, 2, 3, 4]:
            filecount = 2
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomeSE", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        mySplitArgs['maxInputFiles'] = 5
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.fileset1.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create a single job")

        job = jobGroups[0].jobs[0]
        self.assertEqual(len(job.getFiles()), 4,
                         "ERROR: Job does not process 4 files")

        self.assertEqual(self.getNumActiveSplitLumis(), 0,
                         "ERROR: Split lumis were created")

        return

    def test05(self):
        """
        _test05_

        Test repacking of multiple lumis with holes in the lumi sequence
        Multi lumi input

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1, 2, 4]:
            filecount = 2
            for i in range(filecount):
                newFile = File(makeUUID(), size=1000, events=100)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomeSE", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        mySplitArgs['maxInputFiles'] = 5
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        self.insertClosedLumiDAO.execute(binds={
            'RUN': 1,
            'LUMI': 3,
            'STREAM': "A",
            'FILECOUNT': 0,
            'INSERT_TIME': self.currentTime,
            'CLOSE_TIME': self.currentTime
        },
                                         transaction=False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 1,
                         "ERROR: JobFactory didn't create one job")

        self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4,
                         "ERROR: first job does not process 4 files")

        return

    def test06(self):
        """
        _test06_

        Test repacking of 3 lumis
        2 small lumis (single job), followed by a big one (multiple jobs)

        files for lumi 1 and 2 are below multi-lumi thresholds
        files for lumi 3 are above single-lumi threshold

        """
        mySplitArgs = self.splitArgs.copy()

        insertClosedLumiBinds = []
        for lumi in [1, 2, 3]:
            filecount = 2
            for i in range(filecount):
                if lumi == 3:
                    nevents = 500
                else:
                    nevents = 100
                newFile = File(makeUUID(), size=1000, events=nevents)
                newFile.addRun(Run(1, *[lumi]))
                newFile.setLocation("SomeSE", immediateSave=False)
                newFile.create()
                self.fileset1.addFile(newFile)
                insertClosedLumiBinds.append({
                    'RUN': 1,
                    'LUMI': lumi,
                    'STREAM': "A",
                    'FILECOUNT': filecount,
                    'INSERT_TIME': self.currentTime,
                    'CLOSE_TIME': self.currentTime
                })
        self.fileset1.commit()

        jobFactory = self.splitterFactory(package="WMCore.WMBS",
                                          subscription=self.subscription1)

        self.insertClosedLumiDAO.execute(binds=insertClosedLumiBinds,
                                         transaction=False)

        mySplitArgs['maxInputEvents'] = 900
        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 1,
                         "ERROR: JobFactory didn't return one JobGroup")

        self.assertEqual(len(jobGroups[0].jobs), 3,
                         "ERROR: JobFactory didn't create three jobs")

        self.assertEqual(len(jobGroups[0].jobs[0].getFiles()), 4,
                         "ERROR: first job does not process 4 files")

        self.assertEqual(len(jobGroups[0].jobs[1].getFiles()), 1,
                         "ERROR: second job does not process 1 file")

        self.assertEqual(len(jobGroups[0].jobs[2].getFiles()), 1,
                         "ERROR: third job does not process 1 file")

        return
Beispiel #56
0
class WorkQueueTestCase(unittest.TestCase):

    def setSchema(self):
        "this can be override if the schema setting is different"
        self.schema = ["WMCore.WMBS","WMComponent.DBS3Buffer","WMCore.BossAir"]
        self.couchApps = ["WorkQueue"]

    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Try to create all of the
        WMBS tables.  Also add some dummy locations.
        """
        self.queueDB = 'workqueue_t'
        self.queueInboxDB = 'workqueue_t_inbox'
        self.globalQDB = 'workqueue_t_global'
        self.globalQInboxDB = 'workqueue_t_global_inbox'
        self.localQDB = 'workqueue_t_local'
        self.localQInboxDB = 'workqueue_t_local_inbox'
        self.localQDB2 = 'workqueue_t_local2'
        self.localQInboxDB2 = 'workqueue_t_local2_inbox'
        self.configCacheDB = 'workqueue_t_config_cache'

        self.setSchema()
        self.testInit = TestInit('WorkQueueTest')
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = self.schema,
                                useDefault = False)
        self.testInit.setupCouch(self.queueDB, *self.couchApps)
        self.testInit.setupCouch(self.queueInboxDB, *self.couchApps)
        self.testInit.setupCouch(self.globalQDB, *self.couchApps)
        self.testInit.setupCouch(self.globalQInboxDB , *self.couchApps)
        self.testInit.setupCouch(self.localQDB, *self.couchApps)
        self.testInit.setupCouch(self.localQInboxDB, *self.couchApps)
        self.testInit.setupCouch(self.localQDB2, *self.couchApps)
        self.testInit.setupCouch(self.localQInboxDB2, *self.couchApps)
        self.testInit.setupCouch(self.configCacheDB, 'ConfigCache')

        couchServer = CouchServer(os.environ.get("COUCHURL"))
        self.configCacheDBInstance = couchServer.connectDatabase(self.configCacheDB)

        self.workDir = self.testInit.generateWorkDir()
        return

    def tearDown(self):
        """
        _tearDown_

        Drop all the WMBS tables.
        """
        #self.testInit.tearDownCouch()
        self.testInit.clearDatabase()
        self.testInit.delWorkDir()
Beispiel #57
0
class WMAgentTest(unittest.TestCase):
    """
    _WMAgentTest_

    Global unittest for all WMAgent components
    """

    # This is an integration test
    __integration__ = "Any old bollocks"

    sites = ['T2_US_Florida', 'T2_US_UCSD', 'T2_TW_Taiwan', 'T1_CH_CERN']
    components = ['JobCreator', 'JobSubmitter', 'JobTracker',
                  'JobAccountant', 'JobArchiver', 'TaskArchiver',
                  'RetryManager', 'ErrorHandler']

    def setUp(self):
        """
        _setUp_

        Set up vital components
        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules = ["WMCore.WMBS",'WMCore.MsgService',
                                                 'WMCore.ResourceControl', 'WMCore.ThreadPool',
                                                 'WMCore.Agent.Database'],
                                useDefault = False)

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package = "WMCore.WMBS",
                                     logger = myThread.logger,
                                     dbinterface = myThread.dbi)



        locationAction = self.daoFactory(classname = "Locations.New")
        pendingSlots  = self.daoFactory(classname = "Locations.SetPendingSlots")


        for site in self.sites:
            locationAction.execute(siteName = site, seName = 'se.%s' % (site), ceName = site)
            pendingSlots.execute(siteName = site, pendingSlots = 1000)


        #Create sites in resourceControl
        resourceControl = ResourceControl()
        for site in self.sites:
            resourceControl.insertSite(siteName = site, seName = 'se.%s' % (site), ceName = site)
            resourceControl.insertThreshold(siteName = site, taskType = 'Processing', \
                                            maxSlots = 10000, pendingSlots = 10000)


        self.testDir = self.testInit.generateWorkDir()


        # Set heartbeat
        for component in self.components:
            heartbeatAPI = HeartbeatAPI(component)
            heartbeatAPI.registerComponent()




        return


    def tearDown(self):
        """
        _tearDown_

        Tear down everything and go home.
        """

        self.testInit.clearDatabase()

        self.testInit.delWorkDir()

        return




    def createTestWorkload(self, workloadName = 'Test', emulator = True):
        """
        _createTestWorkload_

        Creates a test workload for us to run on, hold the basic necessities.
        """


        workload = testWorkload("TestWorkload")
        rereco = workload.getTask("ReReco")


        taskMaker = TaskMaker(workload, os.path.join(self.testDir, 'workloadTest'))
        taskMaker.skipSubscription = True
        taskMaker.processWorkload()

        workload.save(workloadName)

        return workload




    def getConfig(self):
        """
        _getConfig_

        This is the global test configuration object
        """



        config = Configuration()

        config.component_("Agent")
        config.Agent.WMSpecDirectory = self.testDir
        config.Agent.agentName       = 'testAgent'
        config.Agent.componentName   = 'test'


        # First the general stuff
        config.section_("General")
        config.General.workDir = os.getenv("TESTDIR", self.testDir)

        # Now the CoreDatabase information
        # This should be the dialect, dburl, etc

        config.section_("CoreDatabase")
        config.CoreDatabase.connectUrl = os.getenv("DATABASE")
        config.CoreDatabase.socket     = os.getenv("DBSOCK")



        # JobCreator
        config.component_("JobCreator")
        config.JobCreator.namespace = 'WMComponent.JobCreator.JobCreator'
        config.JobCreator.logLevel  = 'DEBUG'
        config.JobCreator.maxThreads                = 1
        config.JobCreator.UpdateFromResourceControl = True
        config.JobCreator.pollInterval              = 10
        config.JobCreator.jobCacheDir               = self.testDir
        config.JobCreator.defaultJobType            = 'processing' #Type of jobs that we run, used for resource control
        config.JobCreator.workerThreads             = 2
        config.JobCreator.componentDir              = os.path.join(os.getcwd(), 'Components')



        # JobSubmitter
        config.component_("JobSubmitter")
        config.JobSubmitter.namespace     = 'WMComponent.JobSubmitter.JobSubmitter'
        config.JobSubmitter.logLevel      = 'INFO'
        config.JobSubmitter.maxThreads    = 1
        config.JobSubmitter.pollInterval  = 10
        config.JobSubmitter.pluginName    = 'CondorGlobusPlugin'
        config.JobSubmitter.pluginDir     = 'JobSubmitter.Plugins'
        config.JobSubmitter.submitDir     = os.path.join(self.testDir, 'submit')
        config.JobSubmitter.submitNode    = os.getenv("HOSTNAME", 'badtest.fnal.gov')
        config.JobSubmitter.submitScript  = os.path.join(getWMBASE(),
                                                         'test/python/WMComponent_t/JobSubmitter_t',
                                                         'submit.sh')
        config.JobSubmitter.componentDir  = os.path.join(os.getcwd(), 'Components')
        config.JobSubmitter.workerThreads = 2
        config.JobSubmitter.jobsPerWorker = 200




        # JobTracker
        config.component_("JobTracker")
        config.JobTracker.logLevel      = 'DEBUG'
        config.JobTracker.pollInterval  = 10
        config.JobTracker.trackerName   = 'CondorTracker'
        config.JobTracker.pluginDir     = 'WMComponent.JobTracker.Plugins'
        config.JobTracker.componentDir  = os.path.join(os.getcwd(), 'Components')
        config.JobTracker.runTimeLimit  = 7776000 #Jobs expire after 90 days
        config.JobTracker.idleTimeLimit = 7776000
        config.JobTracker.heldTimeLimit = 7776000
        config.JobTracker.unknTimeLimit = 7776000



        # JobAccountant
        config.component_("JobAccountant")
        config.JobAccountant.pollInterval = 60
        config.JobAccountant.componentDir = os.path.join(os.getcwd(), 'Components')
        config.JobAccountant.logLevel     = 'INFO'



        # JobArchiver
        config.component_("JobArchiver")
        config.JobArchiver.pollInterval          = 60
        config.JobArchiver.logLevel              = 'INFO'
        config.JobArchiver.logDir                = os.path.join(self.testDir, 'logs')
        config.JobArchiver.componentDir          = os.path.join(os.getcwd(), 'Components')
        config.JobArchiver.numberOfJobsToCluster = 1000



        # Task Archiver
        config.component_("TaskArchiver")
        config.TaskArchiver.componentDir    = self.testInit.generateWorkDir()
        config.TaskArchiver.WorkQueueParams = {}
        config.TaskArchiver.pollInterval    = 60
        config.TaskArchiver.logLevel        = 'INFO'
        config.TaskArchiver.timeOut         = 0



        # JobStateMachine
        config.component_('JobStateMachine')
        config.JobStateMachine.couchurl        = os.getenv('COUCHURL',
                                                           'mnorman:[email protected]:5984')
        config.JobStateMachine.couchDBName     = "mnorman_test"


        # Needed, because this is a test
        os.makedirs(config.JobSubmitter.submitDir)


        return config



    def createFileCollection(self, name, nSubs, nFiles, workflowURL = 'test', site = None):
        """
        _createFileCollection_

        Create a collection of files for splitting into jobs
        """

        myThread = threading.currentThread()

        testWorkflow = Workflow(spec = workflowURL, owner = "mnorman",
                                name = name, task="/TestWorkload/ReReco")
        testWorkflow.create()

        for sub in range(nSubs):

            nameStr = '%s-%i' % (name, sub)

            testFileset = Fileset(name = nameStr)
            testFileset.create()

            for f in range(nFiles):
                # pick a random site
                if not site:
                    tmpSite = 'se.%s' % (random.choice(self.sites))
                else:
                    tmpSite = 'se.%s' % (site)
                testFile = File(lfn = "/lfn/%s/%i" % (nameStr, f), size = 1024, events = 10)
                testFile.setLocation(tmpSite)
                testFile.create()
                testFileset.addFile(testFile)

            testFileset.commit()
            testFileset.markOpen(isOpen = 0)
            testSubscription = Subscription(fileset = testFileset,
                                            workflow = testWorkflow,
                                            type = "Processing",
                                            split_algo = "FileBased")
            testSubscription.create()


        return



    def createReports(self, jobs, retryCount = 0):
        """
        _createReports_

        Create some dummy job reports for each job
        """


        report = Report()
        report.addStep('testStep', 0)

        for job in jobs:
            #reportPath = os.path.join(job['cache_dir'], 'Report.%i.pkl' % (retryCount))
            reportPath = job['fwjr_path']
            if os.path.exists(reportPath):
                os.remove(reportPath)
            report.save(reportPath)


        return



    def testA_StraightThrough(self):
        """
        _StraightThrough_

        Just run everything straight through without any variations
        """
        # Do pre-submit job check
        nRunning = getCondorRunningJobs()
        self.assertEqual(nRunning, 0, "User currently has %i running jobs.  Test will not continue" % (nRunning))

        myThread = threading.currentThread()
        workload = self.createTestWorkload()
        config   = self.getConfig()


        name         = 'WMAgent_Test1'
        site         = self.sites[0]
        nSubs        = 5
        nFiles       = 10
        workloadPath = os.path.join(self.testDir, 'workloadTest',
                                    'TestWorkload', 'WMSandbox',
                                    'WMWorkload.pkl')

        # Create a collection of files
        self.createFileCollection(name = name, nSubs = nSubs,
                                  nFiles = nFiles,
                                  workflowURL = workloadPath,
                                  site = site)



        ############################################################
        # Test the JobCreator


        config.Agent.componentName = 'JobCreator'
        testJobCreator = JobCreatorPoller(config = config)

        testJobCreator.algorithm()
        time.sleep(5)


        # Did all jobs get created?
        getJobsAction = self.daoFactory(classname = "Jobs.GetAllJobs")
        result = getJobsAction.execute(state = 'Created', jobType = "Processing")
        self.assertEqual(len(result), nSubs*nFiles)


        # Count database objects
        result = myThread.dbi.processData('SELECT * FROM wmbs_sub_files_acquired')[0].fetchall()
        self.assertEqual(len(result), nSubs * nFiles)


        # Find the test directory
        testDirectory = os.path.join(self.testDir, 'TestWorkload', 'ReReco')
        self.assertTrue('JobCollection_1_0' in os.listdir(testDirectory))
        self.assertTrue(len(os.listdir(testDirectory)) <= 20)

        groupDirectory = os.path.join(testDirectory, 'JobCollection_1_0')

        # First job should be in here
        self.assertTrue('job_1' in os.listdir(groupDirectory))
        jobFile = os.path.join(groupDirectory, 'job_1', 'job.pkl')
        self.assertTrue(os.path.isfile(jobFile))
        f = open(jobFile, 'r')
        job = cPickle.load(f)
        f.close()


        self.assertEqual(job['workflow'], name)
        self.assertEqual(len(job['input_files']), 1)
        self.assertEqual(os.path.basename(job['sandbox']), 'TestWorkload-Sandbox.tar.bz2')










        ###############################################################
        # Now test the JobSubmitter

        config.Agent.componentName = 'JobSubmitter'
        testJobSubmitter = JobSubmitterPoller(config = config)


        testJobSubmitter.algorithm()


        # Check that jobs are in the right state
        result = getJobsAction.execute(state = 'Created', jobType = "Processing")
        self.assertEqual(len(result), 0)
        result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
        self.assertEqual(len(result), nSubs * nFiles)



        # Check assigned locations
        getLocationAction = self.daoFactory(classname = "Jobs.GetLocation")
        for id in result:
            loc = getLocationAction.execute(jobid = id)
            self.assertEqual(loc, [[site]])


        # Check to make sure we have running jobs
        nRunning = getCondorRunningJobs()
        self.assertEqual(nRunning, nFiles * nSubs)


        #################################################################
        # Now the JobTracker


        config.Agent.componentName = 'JobTracker'
        testJobTracker = JobTrackerPoller(config = config)
        testJobTracker.setup()

        testJobTracker.algorithm()

        # Running the algo without removing the jobs should do nothing
        result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
        self.assertEqual(len(result), nSubs * nFiles)


        condorRM()
        time.sleep(1)

        # All jobs gone?
        nRunning = getCondorRunningJobs()
        self.assertEqual(nRunning, 0)


        testJobTracker.algorithm()
        time.sleep(5)

        # Running the algo without removing the jobs should do nothing
        result = getJobsAction.execute(state = 'Executing', jobType = "Processing")
        self.assertEqual(len(result), 0)
        result = getJobsAction.execute(state = 'Complete', jobType = "Processing")
        self.assertEqual(len(result), nSubs * nFiles)




        #################################################################
        # Now the JobAccountant

        # First you need to load all jobs


        self.getFWJRAction = self.daoFactory(classname = "Jobs.GetFWJRByState")
        completeJobs       = self.getFWJRAction.execute(state = "complete")


        # Create reports for all jobs
        self.createReports(jobs = completeJobs, retryCount = 0)






        config.Agent.componentName = 'JobAccountant'
        testJobAccountant = JobAccountantPoller(config = config)
        testJobAccountant.setup()


        # It should do something with the jobs
        testJobAccountant.algorithm()


        # All the jobs should be done now
        result = getJobsAction.execute(state = 'Complete', jobType = "Processing")
        self.assertEqual(len(result), 0)
        result = getJobsAction.execute(state = 'Success', jobType = "Processing")
        self.assertEqual(len(result), nSubs * nFiles)



        #######################################################################
        # Now the JobArchiver


        config.Agent.componentName = 'JobArchiver'
        testJobArchiver = JobArchiverPoller(config = config)


        testJobArchiver.algorithm()

        # All the jobs should be cleaned up
        result = getJobsAction.execute(state = 'Success', jobType = "Processing")
        self.assertEqual(len(result), 0)
        result = getJobsAction.execute(state = 'Cleanout', jobType = "Processing")
        self.assertEqual(len(result), nSubs * nFiles)


        logDir = os.path.join(self.testDir, 'logs')

        for job in completeJobs:
            self.assertFalse(os.path.exists(job['fwjr_path']))
            jobFolder = 'JobCluster_%i' \
                    % (int(job['id']/config.JobArchiver.numberOfJobsToCluster))
            jobPath = os.path.join(logDir, jobFolder, 'Job_%i.tar' %(job['id']))
            self.assertTrue(os.path.isfile(jobPath))
            self.assertTrue(os.path.getsize(jobPath) > 0)




        ###########################################################################
        # Now the TaskAchiver


        config.Agent.componentName = 'TaskArchiver'
        testTaskArchiver = TaskArchiverPoller(config = config)


        testTaskArchiver.algorithm()


        result = getJobsAction.execute(state = 'Cleanout', jobType = "Processing")
        self.assertEqual(len(result), 0)


        for jdict in completeJobs:
            job = Job(id = jdict['id'])
            self.assertFalse(job.exists())





        if os.path.isdir('testDir'):
            shutil.rmtree('testDir')
        shutil.copytree('%s' %self.testDir, os.path.join(os.getcwd(), 'testDir'))




        return
Beispiel #58
0
class RuntimeTest(unittest.TestCase):
    """
    _RuntimeTest_

    A unittest to test the WMRuntime/WMSpec/Storage/etc tree
    """


    # This is an integration test
    __integration__ = "Any old bollocks"


    def setUp(self):
        """
        Basic setUp

        """

        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()


        self.testDir = self.testInit.generateWorkDir()

        # Random variables
        self.workloadDir = None
        self.unpackDir   = None
        self.initialDir  = os.getcwd()
        self.origPath    = sys.path


        # Create some dirs
        os.makedirs(os.path.join(self.testDir, 'packages'))

        return


    def tearDown(self):
        """
        _tearDown_

        Remove any references you put directly into the modules
        """

        self.testInit.delWorkDir()

        # Clean up imports
        if 'WMSandbox' in sys.modules.keys():
            del sys.modules['WMSandbox']
        if 'WMSandbox.JobIndex' in sys.modules.keys():
            del sys.modules['WMSandbox.JobIndex']


        return


    def createTestWorkload(self, workloadName = 'Test', emulator = True):
        """
        _createTestWorkload_

        Creates a test workload for us to run on, hold the basic necessities.
        """

        workloadDir = os.path.join(self.testDir, workloadName)

        #arguments = getTestArguments()

        #workload = rerecoWorkload("Tier1ReReco", arguments)
        #rereco = workload.getTask("ReReco")

        workload = testWorkload(emulation = emulator)
        rereco = workload.getTask("ReReco")

        # Set environment and site-local-config
        siteConfigPath = os.path.join(workloadDir, 'SITECONF/local/JobConfig/')
        if not os.path.exists(siteConfigPath):
            os.makedirs(siteConfigPath)
        shutil.copy('site-local-config.xml', siteConfigPath)
        environment = rereco.data.section_('environment')
        environment.CMS_PATH = workloadDir

        taskMaker = TaskMaker(workload, workloadDir)
        taskMaker.skipSubscription = True
        taskMaker.processWorkload()

        workload.save(workloadName)

        return workload



    def unpackComponents(self, workload):
        """
        Run the unpacker to build the directories
        IMPORTANT NOTE:
          This is not how we do things on the worker node
          On the worker node we do not run multiple tasks
          So here we create multiple tasks in different directories
          To mimic running on multiple systems

        """

        listOfTasks = getListOfTasks(workload = workload)

        self.unpackDir = os.path.join(self.testDir, 'unpack')

        if not os.path.exists(self.unpackDir):
            os.makedirs(self.unpackDir)

        os.chdir(self.unpackDir)

        sandbox  = workload.data.sandbox

        for task in listOfTasks:
            # We have to create a directory, unpack in it, and then get out
            taskName = task.name()
            taskDir = os.path.join(self.unpackDir, taskName)
            if not os.path.exists(taskDir):
            # Well then we have to make it
                os.makedirs(taskDir)
            os.chdir(taskDir)
            # Now that we're here, run the unpacker

            package  = os.path.join(self.testDir, 'packages', '%sJobPackage.pkl' % (taskName))
            jobIndex = 1

            RunUnpacker(sandbox = sandbox, package = package,
                        jobIndex = jobIndex, jobname = taskName)

            # And go back to where we started
            os.chdir(self.unpackDir)


        os.chdir(self.initialDir)

        return


    def createWMBSComponents(self, workload):
        """
        Create the WMBS Components for this job

        """

        listOfTasks = []
        listOfSubs  = []

        rerecoTask  = None

        for primeTask in workload.taskIterator():
            # There should only be one prime task, and it should be the rerecoTask
            rerecoTask = primeTask
            for task in primeTask.taskIterator():
                listOfTasks.append(task)

        for task in listOfTasks:
            fileset = self.getFileset()
            sub = self.createSubscriptions(task = task,
                                           fileset = fileset)
            #listOfSubs.append(sub)



        return



    def createSubscriptions(self, task, fileset):
        """
        Create a subscription based on a task


        """
        type = task.taskType()
        work = task.makeWorkflow()

        sub = Subscription(fileset = fileset,
                           workflow = work,
                           split_algo = "FileBased",
                           type = type)

        package = self.createWMBSJobs(subscription = sub,
                                      task = task)

        packName = os.path.join(self.testDir, 'packages',
                                '%sJobPackage.pkl' %(task.name()))
        package.save(packName)

        return sub




    def createWMBSJobs(self, subscription, task):
        """
        Create the jobs for WMBS Components
        Send a subscription/task, get back a package.

        """

        splitter = SplitterFactory()
        geneFac  = GeneratorFactory()
        jobfactory = splitter(subscription = subscription,
                              package = "WMCore.DataStructs",
                              generators = geneFac.makeGenerators(task))
        params = task.jobSplittingParameters()
        jobGroups = jobfactory(**params)

        jobID = 1
        package = JobPackage()
        for group in jobGroups:
            for job in group.jobs:
                job['id'] = jobID
                jobID += 1
                package[job['id']] = job

        return package




    def getFileset(self):
        """
        Get a fileset based on the task

        """

        fileset = Fileset(name = 'Merge%s' %(type))


        for i in range(0, random.randint(15,25)):
            # Use the testDir to generate a random lfn
            inpFile = File(lfn = "%s/%s.root" %(self.testDir, makeUUID()),
                           size = random.randint(200000, 1000000),
                           events = random.randint(1000,2000) )
            inpFile.setLocation('Megiddo')
            fileset.addFile(inpFile)


        return fileset



    def runJobs(self, workload):
        """
        This might actually run the job.  Who knows?


        """
        listOfTasks = []

        for primeTask in workload.taskIterator():
            listOfTasks.append(primeTask)
            # Only run primeTasks for now


        for task in listOfTasks:
            jobName = task.name()
            taskDir = os.path.join(self.unpackDir, jobName, 'job')
            os.chdir(taskDir)
            sys.path.append(taskDir)

            # Scream, run around in panic, blow up machine
            print "About to run jobs"
            print taskDir
            miniStartup(dir = taskDir)


            # When exiting, go back to where you started
            os.chdir(self.initialDir)
            sys.path.remove(taskDir)

        return




    @attr('integration')
    def testA_CreateWorkload(self):
        """
        _CreateWorkload_

        Create a workload
        Unpack the workload
        Check for consistency
        """

        workloadName = 'basicWorkload'
        workload     = self.createTestWorkload(workloadName = workloadName)

        self.createWMBSComponents(workload = workload)

        taskNames = []
        for task in getListOfTasks(workload = workload):
            taskNames.append(task.name())

        workloadPath  = os.path.join(self.testDir, workloadName, "TestWorkload")
        siteConfigDir = os.path.join(self.testDir, workloadName, 'SITECONF/local/JobConfig/')


        # Pre-run checks

        # Does it have the right directories?
        dirList = os.listdir(workloadPath)
        self.assertEqual(dirList, ['WMSandbox', 'TestWorkload-Sandbox.tar.bz2'])
        dirList = os.listdir(os.path.join(workloadPath, 'WMSandbox'))
        for taskName in taskNames:
            self.assertTrue(taskName in dirList)

        # Do we have job packages
        for task in taskNames:
            self.assertTrue('%sJobPackage.pkl' % (task) in os.listdir(os.path.join(self.testDir, 'packages')))


        # Does it have the SITECONF?
        self.assertTrue('site-local-config.xml' in os.listdir(siteConfigDir))



        # Now actually see if you can unpack it.
        self.unpackComponents(workload = workload)


        # Check for proper unpacking
        # Check the the task has the right directories,
        # and that the PSetTweaks and WMSandbox directories
        # have the right contents
        taskContents = ['WMSandbox', 'WMCore', 'PSetTweaks']
        PSetContents = ['PSetTweak.pyc', 'CVS', 'PSetTweak.py',
                        '__init__.pyc', 'WMTweak.py', '__init__.py']
        taskSandbox  = ['JobPackage.pcl', 'JobIndex.py', '__init__.py', 'WMWorkload.pkl']
        taskSandbox.extend(taskNames)  # Should have a directory for each task

        for task in taskNames:
            self.assertTrue(task in os.listdir(os.path.join(self.testDir, 'unpack')))
            taskDir = os.path.join(self.testDir, 'unpack', task, 'job')
            self.assertTrue(os.path.isdir(taskDir))
            self.assertEqual(os.listdir(taskDir).sort(), taskContents.sort())
            self.assertEqual(os.listdir(os.path.join(taskDir, 'WMSandbox')).sort(),
                             taskSandbox.sort())
            self.assertEqual(os.listdir(os.path.join(taskDir, 'PSetTweaks')).sort(),
                             PSetContents.sort())


        # And we're done.
        # Assume if we got this far everything is good


        # At the end, copy the directory
        #if os.path.exists('tmpDir'):
        #    shutil.rmtree('tmpDir')
        #shutil.copytree(self.testDir, 'tmpDir')

        return

    @attr('integration')
    def testB_EmulatorTest(self):
        """
        _EmulatorTest_

        This is where things get scary.  We need to not only unpack the job,
        but also ascertain whether it can run locally in emulator mode.

        This requires...uh...emulator emulation.
        """


        # Assume all this works, because we tested it in testA
        workloadName = 'basicWorkload'
        workload     = self.createTestWorkload(workloadName = workloadName)

        self.createWMBSComponents(workload = workload)

        self.unpackComponents(workload = workload)


        self.runJobs(workload = workload)

        # Check the report
        taskDir = os.path.join(self.testDir, 'unpack/ReReco/job/WMTaskSpace')
        report = Report()
        report.load(os.path.join(taskDir, 'Report.0.pkl'))
        cmsReport = report.data.cmsRun1



        # Now validate the report
        self.assertEqual(report.data.ceName, socket.gethostname())
        self.assertEqual(report.data.seName, 'cmssrm.fnal.gov')
        self.assertEqual(report.data.siteName, 'T1_US_FNAL')
        self.assertEqual(report.data.hostName, socket.gethostname())
        self.assertTrue(report.data.completed)

        # Should have status 0 (emulator job)
        self.assertEqual(cmsReport.status, 0)

        # Should have one output module
        self.assertEqual(cmsReport.outputModules, ['TestOutputModule'])

        # It should have one file for input and output
        self.assertEqual(cmsReport.input.PoolSource.files.fileCount, 1)
        self.assertEqual(cmsReport.output.TestOutputModule.files.fileCount, 1)

        # So, um, I guess we're done


        # At the end, copy the directory
        #if os.path.exists('tmpDir'):
        #    shutil.rmtree('tmpDir')
        #shutil.copytree(self.testDir, 'tmpDir')

        return
Beispiel #59
0
    def setUp(self):
        """
        _setUp_

        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules=["T0.WMBS"])

        self.splitterFactory = SplitterFactory(package="T0.JobSplitting")

        myThread = threading.currentThread()
        daoFactory = DAOFactory(package="T0.WMBS",
                                logger=logging,
                                dbinterface=myThread.dbi)

        myThread.dbi.processData("""INSERT INTO wmbs_location
                                    (id, site_name, state)
                                    VALUES (1, 'SomeSite', 1)
                                    """,
                                 transaction=False)
        myThread.dbi.processData("""INSERT INTO wmbs_location_senames
                                    (location, se_name)
                                    VALUES (1, 'SomeSE')
                                    """,
                                 transaction=False)

        myThread.dbi.processData("""INSERT INTO wmbs_location_senames
                                    (location, se_name)
                                    VALUES (1, 'SomeSE2')
                                    """,
                                 transaction=False)

        insertRunDAO = daoFactory(classname="RunConfig.InsertRun")
        insertRunDAO.execute(binds={
            'RUN': 1,
            'TIME': int(time.time()),
            'HLTKEY': "someHLTKey"
        },
                             transaction=False)

        insertLumiDAO = daoFactory(classname="RunConfig.InsertLumiSection")
        for lumi in [1, 2, 3, 4]:
            insertLumiDAO.execute(binds={
                'RUN': 1,
                'LUMI': lumi
            },
                                  transaction=False)

        insertStreamDAO = daoFactory(classname="RunConfig.InsertStream")
        insertStreamDAO.execute(binds={'STREAM': "A"}, transaction=False)

        insertStreamFilesetDAO = daoFactory(
            classname="RunConfig.InsertStreamFileset")
        insertStreamFilesetDAO.execute(1, "A", "TestFileset1")

        self.fileset1 = Fileset(name="TestFileset1")
        self.fileset1.load()

        workflow1 = Workflow(spec="spec.xml",
                             owner="hufnagel",
                             name="TestWorkflow1",
                             task="Test")
        workflow1.create()

        self.subscription1 = Subscription(fileset=self.fileset1,
                                          workflow=workflow1,
                                          split_algo="Repack",
                                          type="Repack")
        self.subscription1.create()

        # keep for later
        self.insertClosedLumiDAO = daoFactory(
            classname="RunLumiCloseout.InsertClosedLumi")
        self.currentTime = int(time.time())

        # default split parameters
        self.splitArgs = {}
        self.splitArgs['maxSizeSingleLumi'] = 20 * 1024 * 1024 * 1024
        self.splitArgs['maxSizeMultiLumi'] = 10 * 1024 * 1024 * 1024
        self.splitArgs['maxInputEvents'] = 500000
        self.splitArgs['maxInputFiles'] = 1000

        return
Beispiel #60
0
class ConditionTest(unittest.TestCase):
    """
    _ExpressTest_

    Test for Express job splitter
    """

    def setUp(self):
        """
        _setUp_

        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()

        self.testInit.setSchema(customModules = ["T0.WMBS"])

        self.splitterFactory = SplitterFactory(package = "T0.JobSplitting")

        myThread = threading.currentThread()
        daoFactory = DAOFactory(package = "T0.WMBS",
                                logger = logging,
                                dbinterface = myThread.dbi)

        wmbsDaoFactory = DAOFactory(package = "WMCore.WMBS",
                                    logger = logging,
                                    dbinterface = myThread.dbi)

        myThread.dbi.processData("""INSERT INTO wmbs_location
                                    (id, site_name, state)
                                    VALUES (1, 'SomeSite', 1)
                                    """, transaction = False)
        myThread.dbi.processData("""INSERT INTO wmbs_location_senames
                                    (location, se_name)
                                    VALUES (1, 'SomeSE')
                                    """, transaction = False)

        insertRunDAO = daoFactory(classname = "RunConfig.InsertRun")
        insertRunDAO.execute(binds = { 'RUN' : 1,
                                       'TIME' : int(time.time()),
                                       'HLTKEY' : "someHLTKey" },
                             transaction = False)

        insertLumiDAO = daoFactory(classname = "RunConfig.InsertLumiSection")
        insertLumiDAO.execute(binds = { 'RUN' : 1,
                                        'LUMI' : 1 },
                              transaction = False)

        insertStreamDAO = daoFactory(classname = "RunConfig.InsertStream")
        insertStreamDAO.execute(binds = { 'STREAM' : "Express" },
                                transaction = False)

        insertStreamFilesetDAO = daoFactory(classname = "RunConfig.InsertStreamFileset")
        insertStreamFilesetDAO.execute(1, "Express", "TestFileset1")

        insertStreamerDAO = daoFactory(classname = "RunConfig.InsertStreamer")
        insertStreamerDAO.execute(binds = { 'RUN' : 1,
                                            'LUMI' : 1,
                                            'STREAM' : "Express",
                                            'TIME' : int(time.time()),
                                            'LFN' : "/streamer",
                                            'FILESIZE' : 0,
                                            'EVENTS' : 0 },
                                  transaction = False)

        insertPromptCalibrationDAO = daoFactory(classname = "RunConfig.InsertPromptCalibration")
        insertPromptCalibrationDAO.execute( { 'RUN' : 1,
                                              'STREAM' : "Express" },
                                            transaction = False)

        self.fileset1 = Fileset(name = "TestFileset1")
        self.fileset1.create()

        workflow1 = Workflow(spec = "spec.xml", owner = "hufnagel", name = "TestWorkflow1", task="Test")
        workflow1.create()

        self.subscription1  = Subscription(fileset = self.fileset1,
                                           workflow = workflow1,
                                           split_algo = "Condition",
                                           type = "Condition")
        self.subscription1.create()

        # set parentage chain and sqlite fileset
        alcaRecoFile = File("/alcareco", size = 0, events = 0)
        alcaRecoFile.addRun(Run(1, *[1]))
        alcaRecoFile.setLocation("SomeSE", immediateSave = False)
        alcaRecoFile.create()
        alcaPromptFile = File("/alcaprompt", size = 0, events = 0)
        alcaPromptFile.addRun(Run(1, *[1]))
        alcaPromptFile.setLocation("SomeSE", immediateSave = False)
        alcaPromptFile.create()
        sqliteFile = File("/sqlite", size = 0, events = 0)
        sqliteFile.create()
        self.fileset1.addFile(sqliteFile)
        self.fileset1.commit()

        results = myThread.dbi.processData("""SELECT lfn FROM wmbs_file_details
                                              """,
                                           transaction = False)[0].fetchall()

        setParentageDAO = wmbsDaoFactory(classname = "Files.SetParentage")
        setParentageDAO.execute(binds = [ { 'parent' : "/streamer",
                                            'child' : "/alcareco" },
                                          { 'parent' : "/alcareco",
                                            'child' : "/alcaprompt" },
                                          { 'parent' : "/alcaprompt",
                                            'child' : "/sqlite" } ],
                                transaction = False)

        # default split parameters
        self.splitArgs = {}
        self.splitArgs['runNumber'] = 1
        self.splitArgs['streamName'] = "Express"

        return

    def tearDown(self):
        """
        _tearDown_

        """
        self.testInit.clearDatabase()

        return

    def isPromptCalibFinished(self):
        """
        _isPromptCalibFinished_

        """
        myThread = threading.currentThread()

        result = myThread.dbi.processData("""SELECT finished
                                             FROM prompt_calib
                                             """,
                                          transaction = False)[0].fetchall()[0][0]

        return result

    def countPromptCalibFiles(self):
        """
        _deleteSplitLumis_

        """
        myThread = threading.currentThread()

        result = myThread.dbi.processData("""SELECT COUNT(*)
                                             FROM prompt_calib_file
                                             """,
                                          transaction = False)[0].fetchall()[0][0]

        return result

    def test00(self):
        """
        _test00_

        Make sure the job splitter behaves correctly.

        Just make sure the job splitter does nothing
        when the fileset is open and populates t0ast
        data structures when it's closed. In the later
        case all input files should be marked as
        acquired without creating a job as well.

        """
        mySplitArgs = self.splitArgs.copy()

        jobFactory = self.splitterFactory(package = "WMCore.WMBS",
                                          subscription = self.subscription1)

        self.assertEqual(self.isPromptCalibFinished(), 0,
                         "ERROR: prompt_calib should not be finished")

        self.assertEqual(self.countPromptCalibFiles(), 0,
                         "ERROR: there should be no prompt_calib_file")

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(self.isPromptCalibFinished(), 0,
                         "ERROR: prompt_calib should not be finished")

        self.assertEqual(self.countPromptCalibFiles(), 1,
                         "ERROR: there should be one prompt_calib_file")

        self.fileset1.markOpen(False)

        jobGroups = jobFactory(**mySplitArgs)

        self.assertEqual(len(jobGroups), 0,
                         "ERROR: JobFactory should have returned no JobGroup")

        self.assertEqual(self.isPromptCalibFinished(), 1,
                         "ERROR: prompt_calib should be finished")

        self.assertEqual(self.countPromptCalibFiles(), 1,
                         "ERROR: there should be one prompt_calib_file")

        return