Exemplo n.º 1
0
    def listFilesInBlockWithParents(self, fileBlockName):
        """
        _listFilesInBlockWithParents_

        Get a list of files in the named fileblock including
        the parents of that file.

        """
        try:
            files = self.dbs.listFiles(
                "",  # path
                "",  #primary
                "",  # processed
                [],  #tier_list
                "",  #analysisDataset
                fileBlockName,
                details=None,
                retriveList=['retrive_parent'])

        except DbsException, ex:
            msg = "Error in "
            msg += "DBSReader.listFilesInBlockWithParents(%s)\n" % (
                fileBlockName, )
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 2
0
    def listFilesInBlockWithParents(self, fileBlockName):
        """
        _listFilesInBlockWithParents_

        Get a list of files in the named fileblock including
        the parents of that file.

        """
        try:
            files = self.dbs.listFiles(
                 "", # path
                 "", #primary
                 "", # processed
                 [], #tier_list
                 "", #analysisDataset
                 fileBlockName,
                 details = None,
                 retriveList = ['retrive_parent' ])
              
        except DbsException, ex:
            msg = "Error in "
            msg += "DBSReader.listFilesInBlockWithParents(%s)\n" % (
                fileBlockName, )
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 3
0
 def __init__(self, url, **contact):
     args = {"url": url, "level": 'ERROR'}
     args.update(contact)
     try:
         self.dbs = DbsApi(args)
     except DbsException, ex:
         msg = "Error in DBSReader with DbsApi\n"
         msg += "%s\n" % formatEx(ex)
         raise DBSReaderError(msg)
Exemplo n.º 4
0
 def __init__(self, url,  **contact):
     args = { "url" : url, "level" : 'ERROR'}
     args.update(contact)
     try:
      self.dbs = DbsApi(args)
     except DbsException, ex:
         msg = "Error in DBSWriterError with DbsApi\n"
         msg += "%s\n" % formatEx(ex)
         raise DBSWriterError(msg)
Exemplo n.º 5
0
 def getFileBlocksInfo(self, dataset, onlyClosedBlocks=False):
     """
     """
     self.checkDatasetPath(dataset)
     try:
         blocks = self.dbs.listBlocks(dataset)
     except DbsException, ex:
         msg = "Error in DBSReader.listFileBlocks(%s)\n" % dataset
         msg += "%s\n" % formatEx(ex)
         raise DBSReaderError(msg)
Exemplo n.º 6
0
 def getFileBlocksInfo(self, dataset, onlyClosedBlocks = False):
     """
     """
     self.checkDatasetPath(dataset)
     try:
          blocks = self.dbs.listBlocks(dataset)
     except DbsException, ex:
         msg = "Error in DBSReader.listFileBlocks(%s)\n" % dataset
         msg += "%s\n" % formatEx(ex)
         raise DBSReaderError(msg)
Exemplo n.º 7
0
    def importParentDataset(self, globalDBS, datasetpath):
        """
           WARNING: it works only with DBS_2_0_9_patch_6
        """

        args = {'url': globalDBS}
        try:
            api_reader = DbsApi(args)
        except DbsApiException, ex:
            msg = "%s\n" % formatEx(ex)
            raise CrabException(msg)
Exemplo n.º 8
0
    def importParentDataset(self,globalDBS, datasetpath):
        """
           WARNING: it works only with DBS_2_0_9_patch_6
        """

        args={'url':globalDBS}
        try:
            api_reader = DbsApi(args)
        except DbsApiException, ex:
            msg = "%s\n" % formatEx(ex)
            raise CrabException(msg)
Exemplo n.º 9
0
    def matchProcessedDatasets(self, primary, tier, process):
        """
        _matchProcessedDatasets_

        return a list of Processed datasets 
        """
        try:
            result = self.dbs.listProcessedDatasets(primary, tier, process)
        except DbsException, ex:
            msg = "Error in DBSReader.listProcessedDatasets(%s)\n" % primary
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 10
0
    def matchProcessedDatasets(self, primary, tier, process):
        """
        _matchProcessedDatasets_

        return a list of Processed datasets 
        """
        try:
            result = self.dbs.listProcessedDatasets(primary, tier, process)
        except DbsException, ex:
            msg = "Error in DBSReader.listProcessedDatasets(%s)\n" % primary
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 11
0
    def listOpenFileBlocks(self, dataset):
        """
        _listOpenFileBlocks_

        Retrieve a list of open fileblock names for a dataset

        """
        self.checkDatasetPath(dataset)
        try:
            blocks = self.dbs.listBlocks(dataset)
        except DbsException, ex:
            msg = "Error in DBSReader.listFileBlocks(%s)\n" % dataset
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 12
0
    def listOpenFileBlocks(self, dataset):
        """
        _listOpenFileBlocks_

        Retrieve a list of open fileblock names for a dataset

        """
        self.checkDatasetPath(dataset)
        try:
             blocks = self.dbs.listBlocks(dataset)
        except DbsException, ex:
            msg = "Error in DBSReader.listFileBlocks(%s)\n" % dataset
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 13
0
    def createDatasets(self, workflowSpec):
        """
        _createDatasets_

        Create All the output datasets found in the workflow spec instance
        provided

        """
        try:
            workflowSpec.payload.operate(
                _CreateDatasetOperator(self.dbs, workflowSpec))
        except DbsException, ex:
            msg = "Error in DBSWriter.createDatasets\n"
            msg += "For Workflow: %s\n" % workflowSpec.workflowName()
            msg += "%s\n" % formatEx(ex)
            raise DBSWriterError(msg)
Exemplo n.º 14
0
 def listPrimaryDatasets(self, match = None):
     """
     _listPrimaryDatasets_
     
     return a list of primary datasets matching the glob expression.
     If no expression is provided, all datasets are returned
     """
     arg = "*"
     if match != None:
         arg = match
     try:
         result = self.dbs.listPrimaryDatasets(arg)
     except DbsException, ex:
         msg = "Error in DBSReader.listPrimaryDataset(%s)\n" % arg
         msg += "%s\n" % formatEx(ex)
         raise DBSReaderError(msg)
Exemplo n.º 15
0
 def listPrimaryDatasets(self, match=None):
     """
     _listPrimaryDatasets_
     
     return a list of primary datasets matching the glob expression.
     If no expression is provided, all datasets are returned
     """
     arg = "*"
     if match != None:
         arg = match
     try:
         result = self.dbs.listPrimaryDatasets(arg)
     except DbsException, ex:
         msg = "Error in DBSReader.listPrimaryDataset(%s)\n" % arg
         msg += "%s\n" % formatEx(ex)
         raise DBSReaderError(msg)
Exemplo n.º 16
0
    def createDatasets(self, workflowSpec):
        """
        _createDatasets_

        Create All the output datasets found in the workflow spec instance
        provided

        """
        try:
            workflowSpec.payload.operate(
                _CreateDatasetOperator(self.dbs, workflowSpec)
                )
        except DbsException, ex:
            msg = "Error in DBSWriter.createDatasets\n"
            msg += "For Workflow: %s\n" % workflowSpec.workflowName()
            msg += "%s\n" % formatEx(ex)
            raise DBSWriterError(msg)
Exemplo n.º 17
0
    def blockToDatasetPath(self, blockName):
        """
        _blockToDatasetPath_

        Given a block name, get the dataset Path associated with that
        Block.

        Returns the dataset path, or None if not found

        """
        self.checkBlockName(blockName)
        try:
            blocks = self.dbs.listBlocks(block_name = blockName)
        except DbsException, ex:
            msg = "Error in "
            msg += "DBSReader.blockToDataset(%s)\n" % blockName
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 18
0
    def listProcessedDatasets(self, primary, dataTier = None):
        """
        _listProcessedDatasets_

        return a list of Processed datasets for the primary and optional
        data tier value

        """
        tier = "*"
        if dataTier != None:
            tier = dataTier

        try:
            result = self.dbs.listProcessedDatasets(primary, tier)
        except DbsException, ex:
            msg = "Error in DBSReader.listProcessedDatasets(%s)\n" % primary
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 19
0
    def listProcessedDatasets(self, primary, dataTier=None):
        """
        _listProcessedDatasets_

        return a list of Processed datasets for the primary and optional
        data tier value

        """
        tier = "*"
        if dataTier != None:
            tier = dataTier

        try:
            result = self.dbs.listProcessedDatasets(primary, tier)
        except DbsException, ex:
            msg = "Error in DBSReader.listProcessedDatasets(%s)\n" % primary
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 20
0
    def blockToDatasetPath(self, blockName):
        """
        _blockToDatasetPath_

        Given a block name, get the dataset Path associated with that
        Block.

        Returns the dataset path, or None if not found

        """
        self.checkBlockName(blockName)
        try:
            blocks = self.dbs.listBlocks(block_name=blockName)
        except DbsException, ex:
            msg = "Error in "
            msg += "DBSReader.blockToDataset(%s)\n" % blockName
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 21
0
    def blockExists(self, fileBlockName):
        """
        _blockExists_

        Check to see if block with name provided exists in the DBS
        Instance.

        Return True if exists, False if not

        """
        self.checkBlockName(fileBlockName)
        try:

            blocks = self.dbs.listBlocks(block_name=fileBlockName)
        except DbsException, ex:
            msg = "Error in "
            msg += "DBSReader.blockExists(%s)\n" % fileBlockName
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 22
0
    def blockExists(self, fileBlockName):
        """
        _blockExists_

        Check to see if block with name provided exists in the DBS
        Instance.

        Return True if exists, False if not

        """
        self.checkBlockName(fileBlockName)
        try:

            blocks = self.dbs.listBlocks(block_name = fileBlockName)
        except DbsException, ex:
            msg = "Error in "
            msg += "DBSReader.blockExists(%s)\n" % fileBlockName
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 23
0
    def createMergeDatasets(self, workflowSpec, fastMerge=True):
        """
        _createMergeDatasets_

        Create merge output datasets for a workflow Spec
        Expects a Processing workflow spec from which it will generate the
        merge datasets automatically.

        
        """
        mergeSpec = createMergeDatasetWorkflow(workflowSpec, fastMerge)
        try:
            mergeSpec.payload.operate(
                _CreateMergeDatasetOperator(self.dbs, workflowSpec))

        except DbsException, ex:
            msg = "Error in DBSWriter.createMergeDatasets\n"
            msg += "For Workflow: %s\n" % workflowSpec.workflowName()
            msg += "%s\n" % formatEx(ex)
            raise DBSWriterError(msg)
Exemplo n.º 24
0
    def lfnsInBlock(self, fileBlockName):
        """
        _lfnsInBlock_

        LFN list only for block, details = False => faster query
        
        """
        try:
            files = self.dbs.listFiles(
                "", # path
                "", #primary
                "", # processed
                [], #tier_list
                "", #analysisDataset
                fileBlockName, details="False")
        except DbsException, ex:
            msg = "Error in "
            msg += "DBSReader.lfnsInBlock(%s)\n" % fileBlockName
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 25
0
    def createMergeDatasets(self, workflowSpec, fastMerge = True):
        """
        _createMergeDatasets_

        Create merge output datasets for a workflow Spec
        Expects a Processing workflow spec from which it will generate the
        merge datasets automatically.

        
        """
        mergeSpec = createMergeDatasetWorkflow(workflowSpec, fastMerge)        
        try:
            mergeSpec.payload.operate(
                _CreateMergeDatasetOperator(self.dbs, workflowSpec)
                )
            
        except DbsException, ex:
            msg = "Error in DBSWriter.createMergeDatasets\n"
            msg += "For Workflow: %s\n" % workflowSpec.workflowName()
            msg += "%s\n" % formatEx(ex)
            raise DBSWriterError(msg)
Exemplo n.º 26
0
    def listFilesInBlock(self, fileBlockName):
        """
        _listFilesInBlock_

        Get a list of files in the named fileblock

        """
        try:
            files = self.dbs.listFiles(
                 "", # path
                 "", #primary
                 "", # processed
                 [], #tier_list
                 "", #analysisDataset
                 fileBlockName, details="True")
            
        except DbsException, ex:
            msg = "Error in "
            msg += "DBSReader.listFilesInBlock(%s)\n" % fileBlockName
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 27
0
    def lfnsInBlock(self, fileBlockName):
        """
        _lfnsInBlock_

        LFN list only for block, details = False => faster query
        
        """
        try:
            files = self.dbs.listFiles(
                "",  # path
                "",  #primary
                "",  # processed
                [],  #tier_list
                "",  #analysisDataset
                fileBlockName,
                details="False")
        except DbsException, ex:
            msg = "Error in "
            msg += "DBSReader.lfnsInBlock(%s)\n" % fileBlockName
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 28
0
    def listFilesInBlock(self, fileBlockName):
        """
        _listFilesInBlock_

        Get a list of files in the named fileblock

        """
        try:
            files = self.dbs.listFiles(
                "",  # path
                "",  #primary
                "",  # processed
                [],  #tier_list
                "",  #analysisDataset
                fileBlockName,
                details="True")

        except DbsException, ex:
            msg = "Error in "
            msg += "DBSReader.listFilesInBlock(%s)\n" % fileBlockName
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)
Exemplo n.º 29
0
    def importDataset(self, sourceDBS, sourceDatasetPath, targetDBS,
                      onlyClosed = True, skipNoSiteError=False):
        """
        _importDataset_

        Import a dataset into the local scope DBS with full parentage hirerarchy
        (at least not slow because branches info is dropped). Parents are also
        imported. This method imports block by block, then each time a block
        is imported, its parent blocks will be imported first.

        - *sourceDBS* : URL for input DBS instance

        - *sourceDatasetPath* : Dataset Path to be imported

        - *targetDBS* : URL for DBS to have dataset imported to

        - *onlyClosed* : Only closed blocks will be imported if set to True

        - *skipNoSiteError* : If this is True, then this method wont raise an
                              Exception if a block has no site information in 
                              sourceDBS.

        """
        reader = DBSReader(sourceDBS)
        inputBlocks = reader.getFileBlocksInfo(sourceDatasetPath, onlyClosed)
        blkCounter=0
        for inputBlock in inputBlocks:
            block = inputBlock['Name']
            #  //
            # // Test block does not exist in target
            #//
            blkCounter=blkCounter+1
            msg="Importing block %s of %s: %s " % (blkCounter,len(inputBlocks),block)
            logging.debug(msg)
            if self.reader.blockExists(block):
                #  //
                # // block exists
                #//  If block is closed dont attempt transfer
                if str(inputBlock['OpenForWriting']) != '1':
                    msg = "Block already exists in target DBS and is closed:\n"
                    msg += " ==> %s\n" % block
                    msg += "Skipping Import of that block"
                    logging.warning(msg)
                    locations = reader.listFileBlockLocation(block)
                    # only empty file blocks can have no location
                    if not locations and str(inputBlock['NumberOfFiles']) != "0":
                        # we don't skip the error raising
                        if not skipNoSiteError:
                            msg = "Error in DBSWriter.importDataset\n"
                            msg += "Block has no locations defined: %s" % block
                            raise DBSWriterError(msg)
                        msg = "Block has no locations defined: %s" % block
                        logging.info(msg)
                    logging.info("Update block locations to:")
                    for sename in locations:
                        self.dbs.addReplicaToBlock(block,sename)
                        logging.info(sename)
                    continue

            try:

                self.dbs.dbsMigrateBlock(sourceDBS, targetDBS, block_name=block)
            except DbsException, ex:
                msg = "Error in DBSWriter.importDataset\n"
                msg += "Could not write content of dataset:\n ==> %s\n" % (
                    sourceDatasetPath,)
                msg += "Block name:\n ==> %s\n" % block
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)
                    
            locations = reader.listFileBlockLocation(block)
            # only empty file blocks can have no location
            if not locations and str(inputBlock['NumberOfFiles']) != "0":
                # we don't skip the error raising
                if not skipNoSiteError:
                    msg = "Error in DBSWriter.importDataset\n"
                    msg += "Block has no locations defined: %s" % block
                    raise DBSWriterError(msg)
                msg = "Block has no locations defined: %s" % block
                logging.info(msg)
            for sename in locations:
                self.dbs.addReplicaToBlock(block,sename)
Exemplo n.º 30
0
    def migrateDatasetBlocks(self, inputDBSUrl, datasetPath, blocks):
        """
        _migrateDatasetBlocks_

        Migrate the list of fileblocks provided by blocks, belonging
        to the dataset specified by the dataset path to this DBS instance
        from the inputDBSUrl provided

        - *inputDBSUrl* : URL for connection to input DBS
        - *datasetPath* : Name of dataset in input DBS (must exist in input
                          DBS)
        - *blocks*      : list of block names to be migrated (must exist
                          in input DBS)

        """
        if len(blocks) == 0:
            msg = "FileBlocks not provided.\n"
            msg += "You must provide the name of at least one fileblock\n"
            msg += "to be migrated"
            raise DBSWriterError(msg)
        #  //
        # // Hook onto input DBSUrl and verify that the dataset & blocks
        #//  exist
        reader = DBSReader(inputDBSUrl)

        inputBlocks = reader.listFileBlocks(datasetPath)

        for block in blocks:
            #  //
            # // Test block exists at source
            #//
            if block not in inputBlocks:
                msg = "Block name:\n ==> %s\n" % block
                msg += "Not found in input dataset:\n ==> %s\n" % datasetPath
                msg += "In DBS Instance:\n ==> %s\n" % inputDBSUrl
                raise DBSWriterError(msg)

            #  //
            # // Test block does not exist in target
            #//
            if self.reader.blockExists(block):
                #  //
                # // block exists
                #//  If block is closed dont attempt transfer
                if not self.reader.blockIsOpen(block):
                    msg = "Block already exists in target DBS and is closed:\n"
                    msg += " ==> %s\n" % block
                    msg += "Skipping Migration of that block"
                    logging.warning(msg)
                    continue

            try:
                xferData = reader.dbs.listDatasetContents(datasetPath, block)
            except DbsException, ex:
                msg = "Error in DBSWriter.migrateDatasetBlocks\n"
                msg += "Could not read content of dataset:\n ==> %s\n" % (
                    datasetPath, )
                msg += "Block name:\n ==> %s\n" % block
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)

            xferData = _remapBlockParentage(datasetPath, xferData)

            try:
                self.dbs.insertDatasetContents(xferData)
            except DbsException, ex:
                msg = "Error in DBSWriter.migrateDatasetBlocks\n"
                msg += "Could not write content of dataset:\n ==> %s\n" % (
                    datasetPath, )
                msg += "Block name:\n ==> %s\n" % block
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)
Exemplo n.º 31
0
    def importDatasetWithExistingParents(self,
                                         sourceDBS,
                                         sourceDatasetPath,
                                         targetDBS,
                                         onlyClosed=True):
        """
        _importDataset_

        Import a dataset into the local scope DBS.
        It complains if the parent dataset ar not there!!

        - *sourceDBS* : URL for input DBS instance

        - *sourceDatasetPath* : Dataset Path to be imported
        
        - *targetDBS* : URL for DBS to have dataset imported to

        """
        reader = DBSReader(sourceDBS)
        inputBlocks = reader.getFileBlocksInfo(sourceDatasetPath, onlyClosed)
        for inputBlock in inputBlocks:
            block = inputBlock['Name']
            #  //
            # // Test block does not exist in target
            #//
            if self.reader.blockExists(block):
                #  //
                # // block exists
                #//  If block is closed dont attempt transfer
                if not str(inputBlock['OpenForWriting']) != '1':
                    msg = "Block already exists in target DBS and is closed:\n"
                    msg += " ==> %s\n" % block
                    msg += "Skipping Import of that block"
                    logging.warning(msg)
                    locations = reader.listFileBlockLocation(block)
                    # only empty file blocks can have no location
                    if not locations and str(
                            inputBlock['NumberOfFiles']) != "0":
                        msg = "Error in DBSWriter.importDatasetWithExistingParents\n"
                        msg += "Block has no locations defined: %s" % block
                        raise DBSWriterError(msg)
                    logging.info("Update block locations to:")
                    for sename in locations:
                        self.dbs.addReplicaToBlock(block, sename)
                        logging.info(sename)
                    continue

            try:
                xferData = reader.dbs.listDatasetContents(
                    sourceDatasetPath, block)
            except DbsException, ex:
                msg = "Error in DBSWriter.importDatasetWithExistingParents\n"
                msg += "Could not read content of dataset:\n ==> %s\n" % (
                    sourceDatasetPath, )
                msg += "Block name:\n ==> %s\n" % block
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)
            try:
                self.dbs.insertDatasetContents(xferData)
            except DbsException, ex:
                msg = "Error in DBSWriter.importDatasetWithExistingParents\n"
                msg += "Could not write content of dataset:\n ==> %s\n" % (
                    sourceDatasetPath, )
                msg += "Block name:\n ==> %s\n" % block
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)
Exemplo n.º 32
0
    def insertFilesForDBSBuffer(self, files, procDataset, algos, jobType = "NotMerge", insertDetectorData = False):
        """
        _insertFiles_

        list of files inserted in DBS
        """
        #TODO: Whats the purpose of insertDetectorData

	if len(files) < 1: 
		return 
        affectedBlocks = set()
        insertFiles =  []
        addedRuns=[]
        seName = None
        
        #Get the algos in insertable form
        ialgos = [DBSWriterObjects.createAlgorithmForInsert(dict(algo)) for algo in algos ]
       
        for outFile in files:
            #  //
            # // Convert each file into a DBS File object
            #//
            lumiList = []

	    #Somehing similar should be the real deal when multiple runs/lumi could be returned from wmbs file

            for runlumiinfo in outFile.getRuns():
                lrun=long(runlumiinfo.run)
                run = DbsRun(
                    RunNumber = lrun,
                    NumberOfEvents = 0,
                    NumberOfLumiSections = 0,
                    TotalLuminosity = 0,
                    StoreNumber = 0,
                    StartOfRun = 0,
                    EndOfRun = 0,
                    )
                #Only added if not added by another file in this loop, why waste a call to DBS
                if lrun not in addedRuns:
                	self.dbs.insertRun(run)
                    	addedRuns.append(lrun) #save it so we do not try to add it again to DBS
			logging.debug("run %s added to DBS " % str(lrun))
                for alsn in runlumiinfo:    
                	lumi = DbsLumiSection(
                    		LumiSectionNumber = long(alsn),
                    		StartEventNumber = 0,
                    		EndEventNumber = 0,
                    		LumiStartTime = 0,
                    		LumiEndTime = 0,
                    		RunNumber = lrun,
                	)
                	lumiList.append(lumi)

            logging.debug("lumi list created for the file")

            dbsfile = DbsFile(
                              Checksum = str(outFile['cksum']),
                              NumberOfEvents = outFile['events'],
                              LogicalFileName = outFile['lfn'],
                              FileSize = int(outFile['size']),
                              Status = "VALID",
                              ValidationStatus = 'VALID',
                              FileType = 'EDM',
                              Dataset = procDataset,
                              TierList = DBSWriterObjects.makeTierList(procDataset['Path'].split('/')[3]),
                              AlgoList = ialgos,
                              LumiList = lumiList,
                              ParentList = outFile.getParentLFNs(),
                              #BranchHash = outFile['BranchHash'],
                            )
            #This check comes from ProdAgent, not sure if its required
            if len(outFile["locations"]) > 0:
                  seName = list(outFile["locations"])[0]
                  logging.debug("SEname associated to file is: %s"%seName)
            else:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "No SEname associated to file"
                #print "FAKING seName for now"
		#seName="cmssrm.fnal.gov"
                raise DBSWriterError(msg)
            insertFiles.append(dbsfile)
        #  //Processing Jobs: 
        # // Insert the lists of sorted files into the appropriate
        #//  fileblocks
       
        try:
            fileBlock = DBSWriterObjects.getDBSFileBlock(
                    self.dbs,
                    procDataset,
                    seName)
        except DbsException, ex:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "Cannot retrieve FileBlock for dataset:\n"
                msg += " %s\n" % procDataset['Path']
                #msg += "In Storage Element:\n %s\n" % insertFiles.seName
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)
Exemplo n.º 33
0
    def importDataset(self,
                      sourceDBS,
                      sourceDatasetPath,
                      targetDBS,
                      onlyClosed=True,
                      skipNoSiteError=False):
        """
        _importDataset_

        Import a dataset into the local scope DBS with full parentage hirerarchy
        (at least not slow because branches info is dropped). Parents are also
        imported. This method imports block by block, then each time a block
        is imported, its parent blocks will be imported first.

        - *sourceDBS* : URL for input DBS instance

        - *sourceDatasetPath* : Dataset Path to be imported

        - *targetDBS* : URL for DBS to have dataset imported to

        - *onlyClosed* : Only closed blocks will be imported if set to True

        - *skipNoSiteError* : If this is True, then this method wont raise an
                              Exception if a block has no site information in 
                              sourceDBS.

        """
        reader = DBSReader(sourceDBS)
        inputBlocks = reader.getFileBlocksInfo(sourceDatasetPath, onlyClosed)
        blkCounter = 0
        for inputBlock in inputBlocks:
            block = inputBlock['Name']
            #  //
            # // Test block does not exist in target
            #//
            blkCounter = blkCounter + 1
            msg = "Importing block %s of %s: %s " % (blkCounter,
                                                     len(inputBlocks), block)
            logging.debug(msg)
            if self.reader.blockExists(block):
                #  //
                # // block exists
                #//  If block is closed dont attempt transfer
                if str(inputBlock['OpenForWriting']) != '1':
                    msg = "Block already exists in target DBS and is closed:\n"
                    msg += " ==> %s\n" % block
                    msg += "Skipping Import of that block"
                    logging.warning(msg)
                    locations = reader.listFileBlockLocation(block)
                    # only empty file blocks can have no location
                    if not locations and str(
                            inputBlock['NumberOfFiles']) != "0":
                        # we don't skip the error raising
                        if not skipNoSiteError:
                            msg = "Error in DBSWriter.importDataset\n"
                            msg += "Block has no locations defined: %s" % block
                            raise DBSWriterError(msg)
                        msg = "Block has no locations defined: %s" % block
                        logging.info(msg)
                    logging.info("Update block locations to:")
                    for sename in locations:
                        self.dbs.addReplicaToBlock(block, sename)
                        logging.info(sename)
                    continue

            try:

                self.dbs.dbsMigrateBlock(sourceDBS,
                                         targetDBS,
                                         block_name=block)
            except DbsException, ex:
                msg = "Error in DBSWriter.importDataset\n"
                msg += "Could not write content of dataset:\n ==> %s\n" % (
                    sourceDatasetPath, )
                msg += "Block name:\n ==> %s\n" % block
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)

            locations = reader.listFileBlockLocation(block)
            # only empty file blocks can have no location
            if not locations and str(inputBlock['NumberOfFiles']) != "0":
                # we don't skip the error raising
                if not skipNoSiteError:
                    msg = "Error in DBSWriter.importDataset\n"
                    msg += "Block has no locations defined: %s" % block
                    raise DBSWriterError(msg)
                msg = "Block has no locations defined: %s" % block
                logging.info(msg)
            for sename in locations:
                self.dbs.addReplicaToBlock(block, sename)
Exemplo n.º 34
0
    def insertFiles(self, fwkJobRep, insertDetectorData = False):
        """
        _insertFiles_

        Process the files in the FwkJobReport instance and insert
        them into the associated datasets

        A list of affected fileblock names is returned both for merged 
        and unmerged fileblocks. Only merged blocks will have to be managed. 
        #for merged file
        #blocks to facilitate management of those blocks.
        #This list is not populated for processing jobs since we dont really
        #care about the processing job blocks.

        """

        insertLists = {}
        orderedHashes = []
        affectedBlocks = set()

        if len(fwkJobRep.files)<=0:
           msg = "Error in DBSWriter.insertFiles\n"
           msg += "No files found in FrameWorkJobReport for:\n"
           msg += "==> JobSpecId: %s"%fwkJobRep.jobSpecId
           msg += " Workflow: %s"%fwkJobRep.workflowSpecId
           raise DBSWriterError(msg)


        for outFile in fwkJobRep.sortFiles():
            #  //
            # // Convert each file into a DBS File object
            #//
            seName = None
            if outFile.has_key("SEName"):
               if outFile['SEName'] :
                  seName = outFile['SEName']
                  logging.debug("SEname associated to file is: %s"%seName)
## remove the fallback to site se-name if no SE is associated to File
## because it's likely that there is some stage out problem if there
## is no SEName associated to the file.
#            if not seName:
#                if fwkJobRep.siteDetails.has_key("se-name"):
#                   seName = fwkJobRep.siteDetails['se-name']
#                   seName = str(seName)
#                   logging.debug("site SEname: %s"%seName) 
            if not seName:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "No SEname associated to files in FrameWorkJobReport for "
#                msg += "No SEname found in FrameWorkJobReport for "
                msg += "==> JobSpecId: %s"%fwkJobRep.jobSpecId
                msg += " Workflow: %s"%fwkJobRep.workflowSpecId
                raise DBSWriterError(msg)
            try:
                if ( insertDetectorData ):
                    dbsFiles = DBSWriterObjects.createDBSFiles(outFile,
                                                               fwkJobRep.jobType,
                                                               self.dbs)
                else:
                    dbsFiles = DBSWriterObjects.createDBSFiles(outFile,
                                                               fwkJobRep.jobType)
            except DbsException, ex:
                msg = "Error in DBSWriter.insertFiles:\n"
                msg += "Error creating DbsFile instances for file:\n"
                msg += "%s\n" % outFile['LFN']
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)

            if len(dbsFiles)<=0:
               msg="No DbsFile instances created. Not enough info in the FrameWorkJobReport for"
               msg += "==> JobSpecId: %s"%fwkJobRep.jobSpecId
               msg += " Workflow: %s"%fwkJobRep.workflowSpecId
               raise DBSWriterError(msg)  

            for f in dbsFiles:
                datasetName = makeDBSDSName(f)
                hashName = "%s-%s" % (seName, datasetName)
                
                if not insertLists.has_key(hashName):
                    insertLists[hashName] = _InsertFileList(seName,
                                                            datasetName)
                insertLists[hashName].append(f)
                
                if not orderedHashes.count(hashName):
                    orderedHashes.append(hashName)
Exemplo n.º 35
0
    def insertFilesForDBSBuffer(self,
                                files,
                                procDataset,
                                algos,
                                jobType="NotMerge",
                                insertDetectorData=False):
        """
        _insertFiles_

        list of files inserted in DBS
        """
        #TODO: Whats the purpose of insertDetectorData

        if len(files) < 1:
            return
        affectedBlocks = set()
        insertFiles = []
        addedRuns = []
        seName = None

        #Get the algos in insertable form
        ialgos = [
            DBSWriterObjects.createAlgorithmForInsert(dict(algo))
            for algo in algos
        ]

        for outFile in files:
            #  //
            # // Convert each file into a DBS File object
            #//
            lumiList = []

            #Somehing similar should be the real deal when multiple runs/lumi could be returned from wmbs file

            for runlumiinfo in outFile.getRuns():
                lrun = long(runlumiinfo.run)
                run = DbsRun(
                    RunNumber=lrun,
                    NumberOfEvents=0,
                    NumberOfLumiSections=0,
                    TotalLuminosity=0,
                    StoreNumber=0,
                    StartOfRun=0,
                    EndOfRun=0,
                )
                #Only added if not added by another file in this loop, why waste a call to DBS
                if lrun not in addedRuns:
                    self.dbs.insertRun(run)
                    addedRuns.append(
                        lrun)  #save it so we do not try to add it again to DBS
                    logging.debug("run %s added to DBS " % str(lrun))
                for alsn in runlumiinfo:
                    lumi = DbsLumiSection(
                        LumiSectionNumber=long(alsn),
                        StartEventNumber=0,
                        EndEventNumber=0,
                        LumiStartTime=0,
                        LumiEndTime=0,
                        RunNumber=lrun,
                    )
                    lumiList.append(lumi)

            logging.debug("lumi list created for the file")

            dbsfile = DbsFile(
                Checksum=str(outFile['cksum']),
                NumberOfEvents=outFile['events'],
                LogicalFileName=outFile['lfn'],
                FileSize=int(outFile['size']),
                Status="VALID",
                ValidationStatus='VALID',
                FileType='EDM',
                Dataset=procDataset,
                TierList=DBSWriterObjects.makeTierList(
                    procDataset['Path'].split('/')[3]),
                AlgoList=ialgos,
                LumiList=lumiList,
                ParentList=outFile.getParentLFNs(),
                #BranchHash = outFile['BranchHash'],
            )
            #This check comes from ProdAgent, not sure if its required
            if len(outFile["locations"]) > 0:
                seName = list(outFile["locations"])[0]
                logging.debug("SEname associated to file is: %s" % seName)
            else:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "No SEname associated to file"
                #print "FAKING seName for now"
                #seName="cmssrm.fnal.gov"
                raise DBSWriterError(msg)
            insertFiles.append(dbsfile)
        #  //Processing Jobs:
        # // Insert the lists of sorted files into the appropriate
        #//  fileblocks

        try:
            fileBlock = DBSWriterObjects.getDBSFileBlock(
                self.dbs, procDataset, seName)
        except DbsException, ex:
            msg = "Error in DBSWriter.insertFiles\n"
            msg += "Cannot retrieve FileBlock for dataset:\n"
            msg += " %s\n" % procDataset['Path']
            #msg += "In Storage Element:\n %s\n" % insertFiles.seName
            msg += "%s\n" % formatEx(ex)
            raise DBSWriterError(msg)
Exemplo n.º 36
0
    def migrateDatasetBlocks(self, inputDBSUrl, datasetPath, blocks):
        """
        _migrateDatasetBlocks_

        Migrate the list of fileblocks provided by blocks, belonging
        to the dataset specified by the dataset path to this DBS instance
        from the inputDBSUrl provided

        - *inputDBSUrl* : URL for connection to input DBS
        - *datasetPath* : Name of dataset in input DBS (must exist in input
                          DBS)
        - *blocks*      : list of block names to be migrated (must exist
                          in input DBS)

        """
        if len(blocks) == 0:
            msg = "FileBlocks not provided.\n"
            msg += "You must provide the name of at least one fileblock\n"
            msg += "to be migrated"
            raise DBSWriterError(msg)
        #  //
        # // Hook onto input DBSUrl and verify that the dataset & blocks
        #//  exist
        reader = DBSReader(inputDBSUrl)
        
        inputBlocks = reader.listFileBlocks(datasetPath)
        
        for block in blocks:
            #  //
            # // Test block exists at source
            #// 
            if block not in inputBlocks:
                msg = "Block name:\n ==> %s\n" % block
                msg += "Not found in input dataset:\n ==> %s\n" % datasetPath
                msg += "In DBS Instance:\n ==> %s\n" % inputDBSUrl
                raise DBSWriterError(msg)

            #  //
            # // Test block does not exist in target
            #//
            if self.reader.blockExists(block):
                #  //
                # // block exists
                #//  If block is closed dont attempt transfer
                if not self.reader.blockIsOpen(block):
                    msg = "Block already exists in target DBS and is closed:\n"
                    msg += " ==> %s\n" % block
                    msg += "Skipping Migration of that block"
                    logging.warning(msg)
                    continue
                
            try:
                xferData = reader.dbs.listDatasetContents(datasetPath,  block)
            except DbsException, ex:
                msg = "Error in DBSWriter.migrateDatasetBlocks\n"
                msg += "Could not read content of dataset:\n ==> %s\n" % (
                    datasetPath,)
                msg += "Block name:\n ==> %s\n" % block
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)
            
            xferData = _remapBlockParentage(datasetPath, xferData)
            
            try:
                self.dbs.insertDatasetContents(xferData)
            except DbsException, ex:
                msg = "Error in DBSWriter.migrateDatasetBlocks\n"
                msg += "Could not write content of dataset:\n ==> %s\n" % (
                    datasetPath,)
                msg += "Block name:\n ==> %s\n" % block
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)
Exemplo n.º 37
0
    def importDatasetWithExistingParents(self, sourceDBS, sourceDatasetPath, targetDBS,
                      onlyClosed = True):
        """
        _importDataset_

        Import a dataset into the local scope DBS.
        It complains if the parent dataset ar not there!!

        - *sourceDBS* : URL for input DBS instance

        - *sourceDatasetPath* : Dataset Path to be imported
        
        - *targetDBS* : URL for DBS to have dataset imported to

        """
        reader = DBSReader(sourceDBS)
        inputBlocks = reader.getFileBlocksInfo(sourceDatasetPath, onlyClosed)
        for inputBlock in inputBlocks:
            block = inputBlock['Name']
            #  //
            # // Test block does not exist in target
            #//
            if self.reader.blockExists(block):
                #  //
                # // block exists
                #//  If block is closed dont attempt transfer
                if not str(inputBlock['OpenForWriting']) != '1':
                    msg = "Block already exists in target DBS and is closed:\n"
                    msg += " ==> %s\n" % block
                    msg += "Skipping Import of that block"
                    logging.warning(msg)
                    locations = reader.listFileBlockLocation(block)
                    # only empty file blocks can have no location
                    if not locations and str(inputBlock['NumberOfFiles']) != "0":
                        msg = "Error in DBSWriter.importDatasetWithExistingParents\n"
                        msg += "Block has no locations defined: %s" % block
                        raise DBSWriterError(msg)
                    logging.info("Update block locations to:")
                    for sename in locations:
                        self.dbs.addReplicaToBlock(block,sename)
                        logging.info(sename)
                    continue

            
            try:
                xferData = reader.dbs.listDatasetContents(
                    sourceDatasetPath,  block
                    )
            except DbsException, ex:
                msg = "Error in DBSWriter.importDatasetWithExistingParents\n"
                msg += "Could not read content of dataset:\n ==> %s\n" % (
                    sourceDatasetPath,)
                msg += "Block name:\n ==> %s\n" % block
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)
            try:
                self.dbs.insertDatasetContents(xferData)
            except DbsException, ex:
                msg = "Error in DBSWriter.importDatasetWithExistingParents\n"
                msg += "Could not write content of dataset:\n ==> %s\n" % (
                    sourceDatasetPath,)
                msg += "Block name:\n ==> %s\n" % block
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)
Exemplo n.º 38
0
    def insertFiles(self, fwkJobRep, insertDetectorData=False):
        """
        _insertFiles_

        Process the files in the FwkJobReport instance and insert
        them into the associated datasets

        A list of affected fileblock names is returned both for merged 
        and unmerged fileblocks. Only merged blocks will have to be managed. 
        #for merged file
        #blocks to facilitate management of those blocks.
        #This list is not populated for processing jobs since we dont really
        #care about the processing job blocks.

        """

        insertLists = {}
        orderedHashes = []
        affectedBlocks = set()

        if len(fwkJobRep.files) <= 0:
            msg = "Error in DBSWriter.insertFiles\n"
            msg += "No files found in FrameWorkJobReport for:\n"
            msg += "==> JobSpecId: %s" % fwkJobRep.jobSpecId
            msg += " Workflow: %s" % fwkJobRep.workflowSpecId
            raise DBSWriterError(msg)

        for outFile in fwkJobRep.sortFiles():
            #  //
            # // Convert each file into a DBS File object
            #//
            seName = None
            if outFile.has_key("SEName"):
                if outFile['SEName']:
                    seName = outFile['SEName']
                    logging.debug("SEname associated to file is: %s" % seName)
## remove the fallback to site se-name if no SE is associated to File
## because it's likely that there is some stage out problem if there
## is no SEName associated to the file.
#            if not seName:
#                if fwkJobRep.siteDetails.has_key("se-name"):
#                   seName = fwkJobRep.siteDetails['se-name']
#                   seName = str(seName)
#                   logging.debug("site SEname: %s"%seName)
            if not seName:
                msg = "Error in DBSWriter.insertFiles\n"
                msg += "No SEname associated to files in FrameWorkJobReport for "
                #                msg += "No SEname found in FrameWorkJobReport for "
                msg += "==> JobSpecId: %s" % fwkJobRep.jobSpecId
                msg += " Workflow: %s" % fwkJobRep.workflowSpecId
                raise DBSWriterError(msg)
            try:
                if (insertDetectorData):
                    dbsFiles = DBSWriterObjects.createDBSFiles(
                        outFile, fwkJobRep.jobType, self.dbs)
                else:
                    dbsFiles = DBSWriterObjects.createDBSFiles(
                        outFile, fwkJobRep.jobType)
            except DbsException, ex:
                msg = "Error in DBSWriter.insertFiles:\n"
                msg += "Error creating DbsFile instances for file:\n"
                msg += "%s\n" % outFile['LFN']
                msg += "%s\n" % formatEx(ex)
                raise DBSWriterError(msg)

            if len(dbsFiles) <= 0:
                msg = "No DbsFile instances created. Not enough info in the FrameWorkJobReport for"
                msg += "==> JobSpecId: %s" % fwkJobRep.jobSpecId
                msg += " Workflow: %s" % fwkJobRep.workflowSpecId
                raise DBSWriterError(msg)

            for f in dbsFiles:
                datasetName = makeDBSDSName(f)
                hashName = "%s-%s" % (seName, datasetName)

                if not insertLists.has_key(hashName):
                    insertLists[hashName] = _InsertFileList(
                        seName, datasetName)
                insertLists[hashName].append(f)

                if not orderedHashes.count(hashName):
                    orderedHashes.append(hashName)
Exemplo n.º 39
0
                        self.dbs.getServerInfo()['InstanceName'] == 'GLOBAL':
            dlsType = 'DLS_TYPE_PHEDEX'
            dlsUrl = 'https://cmsweb.cern.ch/phedex/datasvc/xml/prod'
        else:
            dlsType = 'DLS_TYPE_DBS'
            dlsUrl = url
        try:
            self.dls = dlsClient.getDlsApi(dls_type=dlsType,
                                           dls_endpoint=dlsUrl)
        except DlsApiError, ex:
            msg = "Error in DBSReader with DlsApi\n"
            msg += "%s\n" % str(ex)
            raise DBSReaderError(msg)
        except DbsException, ex:
            msg = "Error in DBSReader with DbsApi\n"
            msg += "%s\n" % formatEx(ex)
            raise DBSReaderError(msg)

    def listPrimaryDatasets(self, match=None):
        """
        _listPrimaryDatasets_
        
        return a list of primary datasets matching the glob expression.
        If no expression is provided, all datasets are returned
        """
        arg = "*"
        if match != None:
            arg = match
        try:
            result = self.dbs.listPrimaryDatasets(arg)
        except DbsException, ex:
Exemplo n.º 40
0
                affectedBlocks.add(fileBlock['Name'])
                msg="calling: self.dbs.insertMergedFile(%s, %s)" % (str(mergedFile['ParentList']),str(mergedFile))
                logging.debug(msg)
                try:
                    
                    #
                    #
                    # NOTE To Anzar From Anzar (File cloning as in DBS API can be done here and then I can use Bulk insert on Merged files as well)
                    self.dbs.insertMergedFile(mergedFile['ParentList'],
                                                  mergedFile)
                        
                except DbsException, ex:
                    msg = "Error in DBSWriter.insertFiles\n"
                    msg += "Cannot insert merged file:\n"
                    msg += "  %s\n" % mergedFile['LogicalFileName']
                    msg += "%s\n" % formatEx(ex)
                    raise DBSWriterError(msg)
                logging.debug("Inserted merged file: %s to FileBlock: %s"%(mergedFile['LogicalFileName'],fileBlock['Name']))
        else:
                #  //
                # // Processing files
                #//
                affectedBlocks.add(fileBlock['Name'])
                msg="calling: self.dbs.insertFiles(%s, %s, %s)" % (str(procDataset['Path']),str(insertFiles),str(fileBlock))
                logging.debug(msg)

                try:
                    self.dbs.insertFiles(procDataset, insertFiles,
                                         fileBlock)
                except DbsException, ex:
                    msg = "Error in DBSWriter.insertFiles\n"
Exemplo n.º 41
0
class Publisher(Actor):
    def __init__(self, cfg_params):
        """
        Publisher class:

        - parses CRAB FrameworkJobReport on UI
        - returns <file> section of xml in dictionary format for each xml file in crab_0_xxxx/res directory
        - publishes output data on DBS and DLS
        """

        self.cfg_params = cfg_params
        self.fjrDirectory = cfg_params.get('USER.outputdir',
                                           common.work_space.resDir()) + '/'

        if not cfg_params.has_key('USER.publish_data_name'):
            raise CrabException(
                'Cannot publish output data, because you did not specify USER.publish_data_name parameter in the crab.cfg file'
            )
        self.userprocessedData = cfg_params['USER.publish_data_name']
        self.processedData = None

        if (not cfg_params.has_key('USER.copy_data') or int(cfg_params['USER.copy_data']) != 1 ) or \
            (not cfg_params.has_key('USER.publish_data') or int(cfg_params['USER.publish_data']) != 1 ):
            msg = 'You can not publish data because you did not selected \n'
            msg += '\t*** copy_data = 1 and publish_data = 1  *** in the crab.cfg file'
            raise CrabException(msg)

        if not cfg_params.has_key('CMSSW.pset'):
            raise CrabException(
                'Cannot publish output data, because you did not specify the psetname in [CMSSW] of your crab.cfg file'
            )
        self.pset = cfg_params['CMSSW.pset']

        self.globalDBS = cfg_params.get(
            'CMSSW.dbs_url',
            "http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet")

        #if not cfg_params.has_key('USER.dbs_url_for_publication'):
        #    msg = "Warning. The [USER] section does not have 'dbs_url_for_publication'"
        #    msg = msg + " entry, necessary to publish the data.\n"
        #    msg = msg + "Use the command **crab -publish -USER.dbs_url_for_publication=dbs_url_for_publication*** \nwhere dbs_url_for_publication is your local dbs instance."
        #    raise CrabException(msg)

        self.DBSURL = cfg_params.get('USER.dbs_url_for_publication',
                                     'DBS3/phys03')
        #common.logger.info('<dbs_url_for_publication> = '+self.DBSURL)
        if (
                self.DBSURL ==
                "http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet"
        ) or (self.DBSURL ==
              "https://cmsdbsprod.cern.ch:8443/cms_dbs_prod_global_writer/servlet/DBSServlet"
              ):
            msg = "You can not publish your data in the globalDBS = " + self.DBSURL + "\n"
            msg = msg + "Please write your local one in the [USER] section 'dbs_url_for_publication'"
            raise CrabException(msg)

        self.content = file(self.pset).read()
        self.resDir = common.work_space.resDir()

        self.dataset_to_import = []

        self.datasetpath = cfg_params['CMSSW.datasetpath']
        if (self.datasetpath.upper() != 'NONE'):
            self.dataset_to_import.append(self.datasetpath)

        ### Added PU dataset
        tmp = cfg_params.get('CMSSW.dataset_pu', None)
        if tmp:
            datasets = tmp.split(',')
            for dataset in datasets:
                dataset = string.strip(dataset)
                self.dataset_to_import.append(dataset)
        ###

        self.import_all_parents = cfg_params.get(
            'USER.publish_with_import_all_parents', 1)

        if (int(self.import_all_parents) == 0):
            common.logger.info(
                "WARNING: The option USER.publish_with_import_all_parents=0 has been deprecated. The import of parents is compulsory and done by default"
            )
        self.skipOcheck = cfg_params.get('CMSSW.publish_zero_event', 1)
        if (int(self.skipOcheck) == 0):
            common.logger.info(
                "WARNING: The option CMSSW.publish_zero_event has been deprecated. The publication is done by default also for files with 0 events"
            )
        self.SEName = ''
        self.CMSSW_VERSION = ''
        self.exit_status = ''
        self.time = time.strftime('%y%m%d_%H%M%S', time.localtime(time.time()))
        self.problemFiles = []
        self.noEventsFiles = []
        self.noLFN = []

        #### FEDE to allow publication without input data in <file>
        if cfg_params.has_key('USER.no_inp'):
            self.no_inp = cfg_params['USER.no_inp']
        else:
            self.no_inp = 0
        ############################################################
    def importParentDataset(self, globalDBS, datasetpath):
        """
           WARNING: it works only with DBS_2_0_9_patch_6
        """

        args = {'url': globalDBS}
        try:
            api_reader = DbsApi(args)
        except DbsApiException, ex:
            msg = "%s\n" % formatEx(ex)
            raise CrabException(msg)

        args = {'url': self.DBSURL}
        try:
            api_writer = DbsApi(args)
        except DbsApiException, ex:
            msg = "%s\n" % formatEx(ex)
            raise CrabException(msg)