Пример #1
0
    def testF_DBSUploadQueueSizeCheckForAlerts(self):
        """
        Test will not trigger a real alert being sent unless doing some
        mocking of the methods used during DBSUploadPoller.algorithm() ->
        DBSUploadPoller.uploadBlocks() method.
        As done here, it probably can't be deterministic, yet the feature
        shall be checked.

        """
        sizeLevelToTest = 1
        myThread = threading.currentThread()
        config = self.createConfig()
        # threshold / value to check
        config.DBSUpload.alertUploadQueueSize = sizeLevelToTest

        # without this uploadBlocks method returns immediately
        name = "ThisIsATest_%s" % (makeUUID())
        tier = "RECO"
        nFiles = sizeLevelToTest + 1
        files = self.getFiles(name=name, tier=tier, nFiles=nFiles)
        datasetPath = "/%s/%s/%s" % (name, name, tier)

        # load components that are necessary to check status
        # (this seems necessary, else some previous tests started failing)
        factory = WMFactory("dbsUpload", "WMComponent.DBSUpload.Database.Interface")
        dbinterface = factory.loadObject("UploadToDBS")

        dbsInterface = DBSInterface(config=config)
        localAPI = dbsInterface.getAPIRef()
        globeAPI = dbsInterface.getAPIRef(globalRef=True)
        testDBSUpload = DBSUploadPoller(config)
        # this is finally where the action (alert) should be triggered from
        testDBSUpload.algorithm()

        return
Пример #2
0
    def testC_FailTest(self):
        """
        _FailTest_

        THIS TEST IS DANGEROUS!
        Figure out what happens when we trigger rollbacks
        """
        myThread = threading.currentThread()
        config = self.createConfig()
        config.DBSUpload.abortStepTwo = True

        originalOut = sys.stdout
        originalErr = sys.stderr

        dbsInterface = DBSInterface(config = config)
        localAPI     = dbsInterface.getAPIRef()
        globeAPI     = dbsInterface.getAPIRef(globalRef = True)

        name = "ThisIsATest_%s" % (makeUUID())
        tier = "RECO"
        nFiles = 12
        files = self.getFiles(name = name, tier = tier, nFiles = nFiles)
        datasetPath = '/%s/%s/%s' % (name, name, tier)

        testDBSUpload = DBSUploadPoller(config = config)

        try:
            testDBSUpload.algorithm()
        except Exception, ex:
            pass
Пример #3
0
    def testC_FailTest(self):
        """
        _FailTest_

        THIS TEST IS DANGEROUS!
        Figure out what happens when we trigger rollbacks
        """
        myThread = threading.currentThread()
        config = self.createConfig()
        config.DBSUpload.abortStepTwo = True

        originalOut = sys.stdout
        originalErr = sys.stderr

        dbsInterface = DBSInterface(config=config)
        localAPI = dbsInterface.getAPIRef()
        globeAPI = dbsInterface.getAPIRef(globalRef=True)

        name = "ThisIsATest_%s" % (makeUUID())
        tier = "RECO"
        nFiles = 12
        files = self.getFiles(name=name, tier=tier, nFiles=nFiles)
        datasetPath = "/%s/%s/%s" % (name, name, tier)

        testDBSUpload = DBSUploadPoller(config=config)

        try:
            testDBSUpload.algorithm()
        except Exception, ex:
            pass
Пример #4
0
    def testF_DBSUploadQueueSizeCheckForAlerts(self):
        """
        Test will not trigger a real alert being sent unless doing some
        mocking of the methods used during DBSUploadPoller.algorithm() ->
        DBSUploadPoller.uploadBlocks() method.
        As done here, it probably can't be deterministic, yet the feature
        shall be checked.

        """
        sizeLevelToTest = 1
        myThread = threading.currentThread()
        config = self.createConfig()
        # threshold / value to check
        config.DBSUpload.alertUploadQueueSize = sizeLevelToTest

        # without this uploadBlocks method returns immediately
        name = "ThisIsATest_%s" % (makeUUID())
        tier = "RECO"
        nFiles = sizeLevelToTest + 1
        files = self.getFiles(name = name, tier = tier, nFiles = nFiles)
        datasetPath = '/%s/%s/%s' % (name, name, tier)

        # load components that are necessary to check status
        # (this seems necessary, else some previous tests started failing)
        factory = WMFactory("dbsUpload", "WMComponent.DBSUpload.Database.Interface")
        dbinterface = factory.loadObject("UploadToDBS")

        dbsInterface = DBSInterface(config = config)
        localAPI = dbsInterface.getAPIRef()
        globeAPI = dbsInterface.getAPIRef(globalRef = True)
        testDBSUpload = DBSUploadPoller(config)
        # this is finally where the action (alert) should be triggered from
        testDBSUpload.algorithm()

        return
Пример #5
0
    def testE_NoMigration(self):
        """
        _NoMigration_

        Test the DBSUpload system with no global migration
        """
        myThread = threading.currentThread()
        config = self.createConfig()
        config.DBSInterface.DBSBlockMaxTime   = 3
        config.DBSInterface.doGlobalMigration = False
        config.DBSUpload.pollInterval         = 4

        name = "ThisIsATest_%s" % (makeUUID())
        tier = "RECO"
        nFiles = 12
        files = self.getFiles(name = name, tier = tier, nFiles = nFiles)
        datasetPath = '/%s/%s/%s' % (name, name, tier)


        # Load components that are necessary to check status
        factory     = WMFactory("dbsUpload", "WMComponent.DBSUpload.Database.Interface")
        dbinterface = factory.loadObject("UploadToDBS")

        dbsInterface = DBSInterface(config = config)
        localAPI     = dbsInterface.getAPIRef()
        globeAPI     = dbsInterface.getAPIRef(globalRef = True)

        # In the first round we should create blocks for the first dataset
        # The child dataset should not be handled until the parent is uploaded
        testDBSUpload = DBSUploadPoller(config = config)
        testDBSUpload.algorithm()

        # First, see if there are any blocks
        # One in DBS, one not in DBS
        result = myThread.dbi.processData("SELECT status FROM dbsbuffer_block")[0].fetchall()
        self.assertEqual(len(result), 2)
        self.assertEqual(result, [('InGlobalDBS',), ('Open',)])


        result = myThread.dbi.processData("SELECT status FROM dbsbuffer_file WHERE dataset_algo = 1")[0].fetchall()
        for r in result:
            self.assertEqual(r[0], 'GLOBAL')


        return
Пример #6
0
    def testE_NoMigration(self):
        """
        _NoMigration_

        Test the DBSUpload system with no global migration
        """
        myThread = threading.currentThread()
        config = self.createConfig()
        config.DBSInterface.DBSBlockMaxTime   = 3
        config.DBSInterface.doGlobalMigration = False
        config.DBSUpload.pollInterval         = 4

        name = "ThisIsATest_%s" % (makeUUID())
        tier = "RECO"
        nFiles = 12
        files = self.getFiles(name = name, tier = tier, nFiles = nFiles)
        datasetPath = '/%s/%s/%s' % (name, name, tier)


        # Load components that are necessary to check status
        factory     = WMFactory("dbsUpload", "WMComponent.DBSUpload.Database.Interface")
        dbinterface = factory.loadObject("UploadToDBS")

        dbsInterface = DBSInterface(config = config)
        localAPI     = dbsInterface.getAPIRef()
        globeAPI     = dbsInterface.getAPIRef(globalRef = True)

        # In the first round we should create blocks for the first dataset
        # The child dataset should not be handled until the parent is uploaded
        testDBSUpload = DBSUploadPoller(config = config)
        testDBSUpload.algorithm()

        # First, see if there are any blocks
        # One in DBS, one not in DBS
        result = myThread.dbi.processData("SELECT status FROM dbsbuffer_block")[0].fetchall()
        self.assertEqual(len(result), 2)
        self.assertEqual(result, [('InGlobalDBS',), ('Open',)])


        result = myThread.dbi.processData("SELECT status FROM dbsbuffer_file WHERE dataset_algo = 1")[0].fetchall()
        for r in result:
            self.assertEqual(r[0], 'GLOBAL')


        return
Пример #7
0
    def testB_AlgoMigration(self):
        """
        _AlgoMigration_

        Test our ability to migrate multiple algos to global

        Do this by creating, mid-poll, two separate batches of files
        One with the same dataset but a different algo
        One with the same algo, but a different dataset
        See that they both get to global
        """
        # raise nose.SkipTest
        myThread = threading.currentThread()
        config = self.createConfig()
        config.DBSInterface.DBSBlockMaxTime = 20

        name = "ThisIsATest_%s" % (makeUUID())
        tier = "RECO"
        nFiles = 12
        files = self.getFiles(name=name, tier=tier, nFiles=nFiles)
        datasetPath = "/%s/%s/%s" % (name, name, tier)

        # Load components that are necessary to check status
        factory = WMFactory("dbsUpload", "WMComponent.DBSUpload.Database.Interface")
        dbinterface = factory.loadObject("UploadToDBS")

        dbsInterface = DBSInterface(config=config)
        localAPI = dbsInterface.getAPIRef()
        globeAPI = dbsInterface.getAPIRef(globalRef=True)

        testDBSUpload = DBSUploadPoller(config=config)
        testDBSUpload.algorithm()

        # There should now be one block
        result = listBlocks(apiRef=globeAPI, datasetPath=datasetPath)
        self.assertEqual(len(result), 1)

        # Okay, by now, the first migration should have gone through.
        # Now create a second batch of files with the same dataset
        # but a different algo.
        for i in range(0, nFiles):
            testFile = DBSBufferFile(
                lfn="%s-batch2-%i" % (name, i), size=1024, events=20, checksums={"cksum": 1}, locations="malpaquet"
            )
            testFile.setAlgorithm(
                appName="cmsRun",
                appVer="CMSSW_3_1_1",
                appFam=tier,
                psetHash="GIBBERISH_PART2",
                configContent=self.configURL,
            )
            testFile.setDatasetPath(datasetPath)
            testFile.addRun(Run(1, *[46]))
            testFile.create()

        # Have to do things twice to get parents
        testDBSUpload.algorithm()
        testDBSUpload.algorithm()

        # There should now be two blocks
        result = listBlocks(apiRef=globeAPI, datasetPath=datasetPath)
        self.assertEqual(len(result), 2)

        # Now create another batch of files with the original algo
        # But in a different dataset
        for i in range(0, nFiles):
            testFile = DBSBufferFile(
                lfn="%s-batch3-%i" % (name, i), size=1024, events=20, checksums={"cksum": 1}, locations="malpaquet"
            )
            testFile.setAlgorithm(
                appName=name, appVer="CMSSW_3_1_1", appFam=tier, psetHash="GIBBERISH", configContent=self.configURL
            )
            testFile.setDatasetPath("/%s/%s_3/%s" % (name, name, tier))
            testFile.addRun(Run(1, *[46]))
            testFile.create()

        # Do it twice for parentage.
        testDBSUpload.algorithm()
        testDBSUpload.algorithm()

        # There should now be one block
        result = listBlocks(apiRef=globeAPI, datasetPath="/%s/%s_3/%s" % (name, name, tier))
        self.assertEqual(len(result), 1)

        # Well, all the blocks got there, so we're done
        return
Пример #8
0
    def testA_basicUploadTest(self):
        """
        _basicUploadTest_

        Do everything simply once
        Create dataset, algo, files, blocks,
        upload them,
        mark as done, finish them, migrate them
        Also check the timeout
        """
        myThread = threading.currentThread()
        config = self.createConfig()
        config.DBSInterface.DBSBlockMaxTime = 3
        config.DBSUpload.pollInterval = 4

        name = "ThisIsATest_%s" % (makeUUID())
        tier = "RECO"
        nFiles = 12
        files = self.getFiles(name=name, tier=tier, nFiles=nFiles)
        datasetPath = "/%s/%s/%s" % (name, name, tier)

        # Load components that are necessary to check status
        factory = WMFactory("dbsUpload", "WMComponent.DBSUpload.Database.Interface")
        dbinterface = factory.loadObject("UploadToDBS")

        dbsInterface = DBSInterface(config=config)
        localAPI = dbsInterface.getAPIRef()
        globeAPI = dbsInterface.getAPIRef(globalRef=True)

        # In the first round we should create blocks for the first dataset
        # The child dataset should not be handled until the parent is uploaded
        testDBSUpload = DBSUploadPoller(config=config)
        testDBSUpload.algorithm()

        # First, see if there are any blocks
        # One in DBS, one not in DBS
        result = myThread.dbi.processData("SELECT status FROM dbsbuffer_block")[0].fetchall()
        self.assertEqual(len(result), 2)
        self.assertEqual(result, [("InGlobalDBS",), ("Open",)])

        # Check to see if datasets and algos are in local DBS
        result = listAlgorithms(apiRef=localAPI, patternExe=name)
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]["ExecutableName"], name)
        result = listPrimaryDatasets(apiRef=localAPI, match=name)
        self.assertEqual(result, [name])
        result = listProcessedDatasets(apiRef=localAPI, primary=name, dataTier="*")

        # Then check and see that the closed block made it into local DBS
        affectedBlocks = listBlocks(apiRef=localAPI, datasetPath=datasetPath)
        if affectedBlocks[0]["OpenForWriting"] == "0":
            self.assertEqual(affectedBlocks[1]["OpenForWriting"], "1")
            self.assertEqual(affectedBlocks[0]["NumberOfFiles"], 10)
            self.assertEqual(affectedBlocks[1]["NumberOfFiles"], 2)
        else:
            self.assertEqual(affectedBlocks[0]["OpenForWriting"], "1")
            self.assertEqual(affectedBlocks[1]["NumberOfFiles"], 10)
            self.assertEqual(affectedBlocks[0]["NumberOfFiles"], 2)

        # Check to make sure all the files are in local
        result = listDatasetFiles(apiRef=localAPI, datasetPath=datasetPath)
        fileLFNs = [x["lfn"] for x in files]
        for lfn in fileLFNs:
            self.assertTrue(lfn in result)

        # Make sure the child files aren't there
        flag = False
        try:
            listDatasetFiles(apiRef=localAPI, datasetPath="/%s/%s_2/%s" % (name, name, tier))
        except Exception, ex:
            flag = True
Пример #9
0
    seName = blockLocation[newBlockName]
    (datasetPath, junk) = newBlockName.split("#", 1)
    dbsApi.insertBlock(datasetPath, newBlockName, storage_element_list = [seName])

    blockRef = dbsApi.listBlocks(dataset = datasetPath, block_name = newBlockName)[0]
    print blockRef

    newFiles = []
    for newFileLFN in badFiles[newBlockName]:
        localFile = DBSBufferFile(lfn = newFileLFN)
        localFile.load(parentage = 1)

        (primaryDS, procDS, tier) = datasetPath[1:].split("/", 3)
        primary = DbsPrimaryDataset(Name = primaryDS, Type = "mc")
        algo = DbsAlgorithm(ExecutableName = localFile["appName"],
                            ApplicationVersion = localFile["appVer"],
                            ApplicationFamily = localFile["appFam"],
                            ParameterSetID = psetInstance)
        processed = DbsProcessedDataset(PrimaryDataset = primary,
                                        AlgoList = [algo],
                                        Name = procDS,
                                        TierList = [tier],
                                        ParentList = [],
                                        PhysicsGroup = "NoGroup",
                                        Status = "VALID",
                                        GlobalTag = "")
        newFiles.append(DBSInterface.createDBSFileFromBufferFile(localFile, processed))

    dbsApi.insertFiles(datasetPath, newFiles, blockRef)
    dbsApi.closeBlock(block = newBlockName)
Пример #10
0
                       storage_element_list=[seName])

    blockRef = dbsApi.listBlocks(dataset=datasetPath,
                                 block_name=newBlockName)[0]
    print blockRef

    newFiles = []
    for newFileLFN in badFiles[newBlockName]:
        localFile = DBSBufferFile(lfn=newFileLFN)
        localFile.load(parentage=1)

        (primaryDS, procDS, tier) = datasetPath[1:].split("/", 3)
        primary = DbsPrimaryDataset(Name=primaryDS, Type="mc")
        algo = DbsAlgorithm(ExecutableName=localFile["appName"],
                            ApplicationVersion=localFile["appVer"],
                            ApplicationFamily=localFile["appFam"],
                            ParameterSetID=psetInstance)
        processed = DbsProcessedDataset(PrimaryDataset=primary,
                                        AlgoList=[algo],
                                        Name=procDS,
                                        TierList=[tier],
                                        ParentList=[],
                                        PhysicsGroup="NoGroup",
                                        Status="VALID",
                                        GlobalTag="")
        newFiles.append(
            DBSInterface.createDBSFileFromBufferFile(localFile, processed))

    dbsApi.insertFiles(datasetPath, newFiles, blockRef)
    dbsApi.closeBlock(block=newBlockName)
Пример #11
0
    def testB_AlgoMigration(self):
        """
        _AlgoMigration_

        Test our ability to migrate multiple algos to global

        Do this by creating, mid-poll, two separate batches of files
        One with the same dataset but a different algo
        One with the same algo, but a different dataset
        See that they both get to global
        """
        #raise nose.SkipTest
        myThread = threading.currentThread()
        config = self.createConfig()
        config.DBSInterface.DBSBlockMaxTime = 20

        name = "ThisIsATest_%s" % (makeUUID())
        tier = "RECO"
        nFiles = 12
        files = self.getFiles(name = name, tier = tier, nFiles = nFiles)
        datasetPath = '/%s/%s/%s' % (name, name, tier)


        # Load components that are necessary to check status
        factory     = WMFactory("dbsUpload", "WMComponent.DBSUpload.Database.Interface")
        dbinterface = factory.loadObject("UploadToDBS")

        dbsInterface = DBSInterface(config = config)
        localAPI     = dbsInterface.getAPIRef()
        globeAPI     = dbsInterface.getAPIRef(globalRef = True)


        testDBSUpload = DBSUploadPoller(config = config)
        testDBSUpload.algorithm()

        # There should now be one block
        result    = listBlocks(apiRef = globeAPI, datasetPath = datasetPath)
        self.assertEqual(len(result), 1)

        # Okay, by now, the first migration should have gone through.
        # Now create a second batch of files with the same dataset
        # but a different algo.
        for i in range(0, nFiles):
            testFile = DBSBufferFile(lfn = '%s-batch2-%i' %(name, i), size = 1024,
                                     events = 20, checksums = {'cksum': 1},
                                     locations = "malpaquet")
            testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_3_1_1",
                                  appFam = tier, psetHash = "GIBBERISH_PART2",
                                  configContent = self.configURL)
            testFile.setDatasetPath(datasetPath)
            testFile.addRun(Run( 1, *[46]))
            testFile.create()


        # Have to do things twice to get parents
        testDBSUpload.algorithm()
        testDBSUpload.algorithm()

        # There should now be two blocks
        result    = listBlocks(apiRef = globeAPI, datasetPath = datasetPath)
        self.assertEqual(len(result), 2)


        # Now create another batch of files with the original algo
        # But in a different dataset
        for i in range(0, nFiles):
            testFile = DBSBufferFile(lfn = '%s-batch3-%i' %(name, i), size = 1024,
                                     events = 20, checksums = {'cksum': 1},
                                     locations = "malpaquet")
            testFile.setAlgorithm(appName = name, appVer = "CMSSW_3_1_1",
                                  appFam = tier, psetHash = "GIBBERISH",
                                  configContent = self.configURL)
            testFile.setDatasetPath('/%s/%s_3/%s' % (name, name, tier))
            testFile.addRun(Run( 1, *[46]))
            testFile.create()

        # Do it twice for parentage.
        testDBSUpload.algorithm()
        testDBSUpload.algorithm()


        # There should now be one block
        result    = listBlocks(apiRef = globeAPI, datasetPath = '/%s/%s_3/%s' % (name, name, tier))
        self.assertEqual(len(result), 1)


        # Well, all the blocks got there, so we're done
        return
Пример #12
0
    def testA_basicUploadTest(self):
        """
        _basicUploadTest_

        Do everything simply once
        Create dataset, algo, files, blocks,
        upload them,
        mark as done, finish them, migrate them
        Also check the timeout
        """
        myThread = threading.currentThread()
        config = self.createConfig()
        config.DBSInterface.DBSBlockMaxTime = 3
        config.DBSUpload.pollInterval  = 4

        name = "ThisIsATest_%s" % (makeUUID())
        tier = "RECO"
        nFiles = 12
        files = self.getFiles(name = name, tier = tier, nFiles = nFiles)
        datasetPath = '/%s/%s/%s' % (name, name, tier)


        # Load components that are necessary to check status
        factory     = WMFactory("dbsUpload", "WMComponent.DBSUpload.Database.Interface")
        dbinterface = factory.loadObject("UploadToDBS")

        dbsInterface = DBSInterface(config = config)
        localAPI     = dbsInterface.getAPIRef()
        globeAPI     = dbsInterface.getAPIRef(globalRef = True)

        # In the first round we should create blocks for the first dataset
        # The child dataset should not be handled until the parent is uploaded
        testDBSUpload = DBSUploadPoller(config = config)
        testDBSUpload.algorithm()

        # First, see if there are any blocks
        # One in DBS, one not in DBS
        result = myThread.dbi.processData("SELECT status FROM dbsbuffer_block")[0].fetchall()
        self.assertEqual(len(result), 2)
        self.assertEqual(result, [('InGlobalDBS',), ('Open',)])

        # Check to see if datasets and algos are in local DBS
        result  = listAlgorithms(apiRef = localAPI, patternExe = name)
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['ExecutableName'], name)
        result  = listPrimaryDatasets(apiRef = localAPI, match = name)
        self.assertEqual(result, [name])
        result    = listProcessedDatasets(apiRef = localAPI, primary = name, dataTier = "*")

        # Then check and see that the closed block made it into local DBS
        affectedBlocks = listBlocks(apiRef = localAPI, datasetPath = datasetPath)
        if affectedBlocks[0]['OpenForWriting'] == '0':
            self.assertEqual(affectedBlocks[1]['OpenForWriting'], '1')
            self.assertEqual(affectedBlocks[0]['NumberOfFiles'], 10)
            self.assertEqual(affectedBlocks[1]['NumberOfFiles'], 2)
        else:
            self.assertEqual(affectedBlocks[0]['OpenForWriting'], '1')
            self.assertEqual(affectedBlocks[1]['NumberOfFiles'], 10)
            self.assertEqual(affectedBlocks[0]['NumberOfFiles'], 2)

        # Check to make sure all the files are in local
        result = listDatasetFiles(apiRef = localAPI, datasetPath = datasetPath)
        fileLFNs = [x['lfn'] for x in files]
        for lfn in fileLFNs:
            self.assertTrue(lfn in result)

        # Make sure the child files aren't there
        flag = False
        try:
            listDatasetFiles(apiRef = localAPI,
                             datasetPath = '/%s/%s_2/%s' % (name, name, tier))
        except Exception, ex:
            flag = True
Пример #13
0
    seName = blockLocation[newBlockName]
    (datasetPath, junk) = newBlockName.split("#", 1)
    dbsApi.insertBlock(datasetPath, newBlockName, storage_element_list = [seName])

    blockRef = dbsApi.listBlocks(dataset = datasetPath, block_name = newBlockName)[0]
    print blockRef

    newFiles = []
    for newFileLFN in badFiles[newBlockName]:
        localFile = DBSBufferFile(lfn = newFileLFN)
        localFile.load(parentage = 1)

        (primaryDS, procDS, tier) = datasetPath[1:].split("/", 3)
        primary = DbsPrimaryDataset(Name = primaryDS, Type = "mc")
        algo = DbsAlgorithm(ExecutableName = localFile["appName"],
                            ApplicationVersion = localFile["appVer"],
                            ApplicationFamily = localFile["appFam"],
                            ParameterSetID = psetInstance)
        processed = DbsProcessedDataset(PrimaryDataset = primary,
                                        AlgoList = [algo],
                                        Name = procDS,
                                        TierList = [tier],
                                        ParentList = [],
                                        PhysicsGroup = "NoGroup",
                                        Status = "VALID",
                                        GlobalTag = "")
        newFiles.append(DBSInterface.createDBSFileFromBufferFile(localFile, processed))

    dbsApi.insertFiles(datasetPath, newFiles, blockRef)
    dbsApi.closeBlock(block = newBlockName)
Пример #14
0
    def testC_MultipleSites(self):
        """
        _MultipleSites_

        See if it opens blocks in multiple sites.
        """

        # Skip this test as it is now obsolete
        # keep it around for legacy value (i.e., I need to know what's in it)
        return


        files = []

        config = self.createConfig()

        name   = "ThisIsATest_%s" %(makeUUID())
        tier   = "RECO"
        nFiles = 12
        datasetPath = '/%s/%s/%s' % (name, name, tier)
        files.extend(self.getFiles(name = name, tier = tier, nFiles = nFiles, site = 'Ramilles'))
        files.extend(self.getFiles(name = name, tier = tier, nFiles = nFiles, site = 'Blenheim'))


        dbsInterface = DBSInterface(config)
        localAPI     = dbsInterface.getAPIRef()
        globeAPI     = dbsInterface.getAPIRef(globalRef = True)

        algo, dataset = self.createAlgoDataset(name = name, datasetPath = datasetPath)

        affectedBlocks = dbsInterface.runDBSBuffer(algo = algo, dataset = dataset)
        #files = files)
        result  = listAlgorithms(apiRef = localAPI, patternExe = name)
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['ExecutableName'], name)
        result  = listPrimaryDatasets(apiRef = localAPI, match = name)
        self.assertEqual(result, [name])
        result    = listProcessedDatasets(apiRef = localAPI, primary = name, dataTier = "*")
        self.assertEqual(result, [name])


        # Should have four blocks, two for each site
        self.assertEqual(len(affectedBlocks), 4)
        RamBlocks = []
        BleBlocks = []
        for block in affectedBlocks:
            if block['StorageElementList'][0]['Name'] == 'Blenheim':
                BleBlocks.append(block)
            elif block['StorageElementList'][0]['Name'] == 'Ramilles':
                RamBlocks.append(block)

        self.assertEqual(len(RamBlocks), 2)
        self.assertEqual(len(BleBlocks), 2)

        self.assertEqual(RamBlocks[0]['NumberOfFiles'], 10)
        self.assertEqual(BleBlocks[0]['NumberOfFiles'], 10)
        self.assertEqual(RamBlocks[1]['NumberOfFiles'], 2)
        self.assertEqual(BleBlocks[1]['NumberOfFiles'], 2)


        # We should have two blocks in global
        # Both should have ten files, and both should be closed
        result    = listBlocks(apiRef = globeAPI, datasetPath = datasetPath)
        self.assertEqual(len(result), 2)
        self.assertEqual(result[0]['OpenForWriting'], '0')
        self.assertEqual(result[0]['NumberOfFiles'], 10)


        return
Пример #15
0
    def testB_DBSInterfaceSimple(self):
        """
        _DBSInterfaceSimple_

        Do some simple checks using the DBSInterface methods instead
        of the individual functions.
        """

        # Skip this test due to block format change
        # It's tested by DBSUploadPoller_t
        # I'd like to keep it in the code so I can look
        # at it, and maybe bring it back one day.
        return



        config = self.createConfig()

        name   = "ThisIsATest_%s" %(makeUUID())
        tier   = "RECO"
        nFiles = 12
        datasetPath = '/%s/%s/%s' % (name, name, tier)
        files  = self.getFiles(name = name, tier = tier, nFiles = nFiles)


        dbsInterface = DBSInterface(config)
        localAPI     = dbsInterface.getAPIRef()
        globeAPI     = dbsInterface.getAPIRef(globalRef = True)

        algo, dataset = self.createAlgoDataset(name = name, datasetPath = datasetPath)



        # Can we insert a dataset algo?
        affectedBlocks = dbsInterface.runDBSBuffer(algo = algo, dataset = dataset,
                                                   files = files)
        result  = listAlgorithms(apiRef = localAPI, patternExe = name)
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['ExecutableName'], name)
        result  = listPrimaryDatasets(apiRef = localAPI, match = name)
        self.assertEqual(result, [name])
        result    = listProcessedDatasets(apiRef = localAPI, primary = name, dataTier = "*")
        self.assertEqual(result, [name])


        result = listDatasetFiles(apiRef = localAPI, datasetPath = datasetPath)
        self.assertEqual(len(result), 12)
        self.assertEqual(len(affectedBlocks), 2)
        # Create two blocks, one open, one closed, one with ten files, one with two
        self.assertEqual(affectedBlocks[0]['OpenForWriting'], '0')
        self.assertEqual(affectedBlocks[1]['OpenForWriting'], '1')
        self.assertEqual(affectedBlocks[0]['NumberOfFiles'], 10)
        self.assertEqual(affectedBlocks[1]['NumberOfFiles'], 2)


        # There should be one block in global
        # It should have ten files and be closed
        result    = listBlocks(apiRef = globeAPI, datasetPath = datasetPath)
        self.assertEqual(len(result), 1)
        self.assertEqual(result[0]['OpenForWriting'], '0')
        self.assertEqual(result[0]['NumberOfFiles'], 10)


        return
Пример #16
0
    def testA_directReadWrite(self):
        """
        Test whether you can read and write directly into DBS using DBSInterface

        """

        config = self.createConfig()

        name   = "ThisIsATest_%s" %(makeUUID())
        tier   = "RECO"
        nFiles = 12
        datasetPath = '/%s/%s/%s' % (name, name, tier)
        files  = self.getFiles(name = name, tier = tier, nFiles = nFiles)
		

        dbsInterface = DBSInterface(config)
        localAPI = dbsInterface.getAPIRef()



        # Can we create an algo?
        algo = createAlgorithm(apiRef = localAPI, appName = name,
                               appVer = "CMSSW_3_1_1", appFam = "RECO")
        result  = listAlgorithms(apiRef = localAPI, patternExe = name)
        self.assertEqual(len(result), 1)  # Should only be one algo
        self.assertEqual(result[0]['ExecutableName'], name)
        

        # Can we create a primary dataset?
        primary = createPrimaryDataset(primaryName = name, apiRef = localAPI)
        result  = listPrimaryDatasets(apiRef = localAPI, match = name)
        self.assertEqual(result, [name])


        # Can we create a processed dataset?
        processed = createProcessedDataset(algorithm = algo, apiRef = localAPI,
                                           primary = primary, processedName = name,
                                           dataTier = tier)
        result    = listProcessedDatasets(apiRef = localAPI, primary = name, dataTier = "*")
        self.assertEqual(result, [name])


        # Can we create file blocks?
        fileBlock = createFileBlock(apiRef = localAPI, datasetPath = datasetPath, seName = 'test')
        result    = listBlocks(apiRef = localAPI, datasetPath = datasetPath)
        self.assertEqual(len(result), 1)  # There should be only one result
        self.assertEqual(result[0]['Path'], datasetPath)
        self.assertEqual(fileBlock['newFiles'], [])
        self.assertEqual(fileBlock['NumberOfFiles'], 0)
        self.assertEqual(fileBlock['OpenForWriting'], '1')


        # Can we create files?
        dbsfiles = []
        for bfile in files:
            for run in bfile.getRuns():
                insertDBSRunsFromRun(apiRef = localAPI, dSRun = run)
            dbsfiles.append(createDBSFileFromBufferFile(procDataset = processed, bufferFile = bfile))
        #print dbsfiles
        insertFiles(apiRef = localAPI, datasetPath = datasetPath, files = dbsfiles, block = fileBlock)
        result = listDatasetFiles(apiRef = localAPI, datasetPath = datasetPath)
        self.assertEqual(len(result), nFiles)
        for filename in result:
            self.assertTrue(re.search(name, filename))


        # Can we close blocks?
        closeBlock(apiRef = localAPI, block = fileBlock)
        result    = listBlocks(apiRef = localAPI, datasetPath = datasetPath)
        self.assertEqual(len(result), 1) # Only got one block back
        self.assertEqual(result[0]['OpenForWriting'], '0')



        return