Пример #1
0
 def __call__(self, pnode):
     if pnode.type != "CMSSW":
         return
     datasets = getOutputDatasetsWithPSet(pnode, sorted = True)
     cfgMeta = None
     try:
         cfgInt = pnode.cfgInterface
         cfgMeta = cfgInt.configMetadata
         cfgMeta['Type'] = self.workflow.parameters["RequestCategory"]      
     except Exception, ex:
         msg = "Unable to Extract cfg data from workflow"
         msg += str(ex)
         logging.error(msg)
         return
Пример #2
0
 def __call__(self, pnode):
     if pnode.type != "CMSSW":
         return
     datasets = getOutputDatasetsWithPSet(pnode, sorted=True)
     cfgMeta = None
     try:
         cfgInt = pnode.cfgInterface
         cfgMeta = cfgInt.configMetadata
         cfgMeta['Type'] = self.workflow.parameters["RequestCategory"]
     except Exception, ex:
         msg = "Unable to Extract cfg data from workflow"
         msg += str(ex)
         logging.error(msg)
         return
Пример #3
0
    def __call__(self, pnode):
        if pnode.type != "CMSSW":
            return

        #Can't take datasetInfo from getOutputDatasetsWithPSet as that changes
        #the AppFamily to the processing configuration, hence file insertions
        #fail due to a missing algo. WARNING: relies on identical dataset order
        glblTags = [
            x['Conditions']
            for x in getOutputDatasetsWithPSet(pnode, sorted=True)
        ]
        for dataset, globalTag in zip(getOutputDatasets(pnode, sorted=True),
                                      glblTags):

            dataset['Conditions'] = globalTag

            primary = DBSWriterObjects.createPrimaryDataset(
                dataset, self.apiRef)

            mergeAlgo = DBSWriterObjects.createMergeAlgorithm(
                dataset, self.apiRef)
            DBSWriterObjects.createProcessedDataset(primary, mergeAlgo,
                                                    dataset, self.apiRef)

            inputDataset = dataset.get('ParentDataset', None)
            if inputDataset == None:
                continue
            processedDataset = dataset["ProcessedDataset"]
            self.apiRef.insertMergedDataset(inputDataset, processedDataset,
                                            mergeAlgo)

            # algorithm used when process jobs produce merged files directly
            # doesnt contain pset content - taken from processing (same hash)
            mergeDirectAlgo = DBSWriterObjects.createAlgorithm(
                dataset, None, self.apiRef)
            self.apiRef.insertAlgoInPD(makeDSName2(dataset), mergeDirectAlgo)

            logging.debug("ProcessedDataset: %s" % processedDataset)
            logging.debug("inputDataset: %s" % inputDataset)
            logging.debug("mergeAlgo: %s" % mergeAlgo)
        return
Пример #4
0
    def __call__(self, pnode):
        if pnode.type != "CMSSW":
            return

        #Can't take datasetInfo from getOutputDatasetsWithPSet as that changes
        #the AppFamily to the processing configuration, hence file insertions
        #fail due to a missing algo. WARNING: relies on identical dataset order 
        glblTags = [x['Conditions'] for x in getOutputDatasetsWithPSet(pnode,
                                                                sorted = True)]
        for dataset, globalTag in zip(getOutputDatasets(pnode, sorted = True),
                                      glblTags):
            
            dataset['Conditions'] = globalTag
            
            primary = DBSWriterObjects.createPrimaryDataset(
                dataset, self.apiRef)
            
            mergeAlgo = DBSWriterObjects.createMergeAlgorithm(dataset,
                                                              self.apiRef)
            DBSWriterObjects.createProcessedDataset(
                primary, mergeAlgo, dataset, self.apiRef)
            
            inputDataset = dataset.get('ParentDataset', None)
            if inputDataset == None:
                continue
            processedDataset = dataset["ProcessedDataset"]
            self.apiRef.insertMergedDataset(
                inputDataset, processedDataset, mergeAlgo)
            
            # algorithm used when process jobs produce merged files directly
            # doesnt contain pset content - taken from processing (same hash)
            mergeDirectAlgo = DBSWriterObjects.createAlgorithm(
                dataset, None, self.apiRef)
            self.apiRef.insertAlgoInPD(makeDSName2(dataset), mergeDirectAlgo)
            
            logging.debug("ProcessedDataset: %s"%processedDataset)
            logging.debug("inputDataset: %s"%inputDataset)
            logging.debug("mergeAlgo: %s"%mergeAlgo)
        return
Пример #5
0
from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec
from ProdCommon.MCPayloads.DatasetTools import getOutputDatasetsWithPSet

specfile = "/uscms/home/gutsche/CSA08-JetET110-CSA08_S43_S43_rereco_may19_PIC_v1-Workflow.xml"

rawCfgFile = "%s.raw.cfg" % os.path.basename(specfile)
origCfgFile = "%s.orig.cfg" % os.path.basename(specfile)
dbsCfgFile = "%s.dbs.cfg" % os.path.basename(specfile)

spec = WorkflowSpec()
spec.load(specfile)

rawCfg = spec.payload.cfgInterface.rawCfg
originalCfg = spec.payload.cfgInterface.originalCfg

dbsDatasets = getOutputDatasetsWithPSet(spec.payload)

handle = open(dbsCfgFile, 'w')
handle.write(dbsDatasets[0]['PSetContent'])
handle.close()

handle = open(origCfgFile, 'w')
handle.write(originalCfg)
handle.close()

loader = CMSSWAPILoader(os.environ['SCRAM_ARCH'],
                        spec.payload.application['Version'],
                        os.environ['CMS_PATH'])

loader.load()

specfile = "/uscms/home/gutsche/CSA08-JetET110-CSA08_S43_S43_rereco_may19_PIC_v1-Workflow.xml"

rawCfgFile = "%s.raw.cfg" % os.path.basename(specfile)
origCfgFile = "%s.orig.cfg" % os.path.basename(specfile)
dbsCfgFile = "%s.dbs.cfg" % os.path.basename(specfile)

spec = WorkflowSpec()
spec.load(specfile)


rawCfg = spec.payload.cfgInterface.rawCfg
originalCfg = spec.payload.cfgInterface.originalCfg

dbsDatasets = getOutputDatasetsWithPSet(spec.payload)

handle = open(dbsCfgFile, 'w')
handle.write(
    dbsDatasets[0]['PSetContent']
    )
handle.close()

handle = open(origCfgFile, 'w')
handle.write(originalCfg)
handle.close()

loader = CMSSWAPILoader(
    os.environ['SCRAM_ARCH'],
    spec.payload.application['Version'],
    os.environ['CMS_PATH']