Exemplo n.º 1
0
    def __call__(self, pnode):
        if pnode.type != "CMSSW":
            return

        #Can't take datasetInfo from getOutputDatasetsWithPSet as that changes
        #the AppFamily to the processing configuration, hence file insertions
        #fail due to a missing algo. WARNING: relies on identical dataset order
        glblTags = [
            x['Conditions']
            for x in getOutputDatasetsWithPSet(pnode, sorted=True)
        ]
        for dataset, globalTag in zip(getOutputDatasets(pnode, sorted=True),
                                      glblTags):

            dataset['Conditions'] = globalTag

            primary = DBSWriterObjects.createPrimaryDataset(
                dataset, self.apiRef)

            mergeAlgo = DBSWriterObjects.createMergeAlgorithm(
                dataset, self.apiRef)
            DBSWriterObjects.createProcessedDataset(primary, mergeAlgo,
                                                    dataset, self.apiRef)

            inputDataset = dataset.get('ParentDataset', None)
            if inputDataset == None:
                continue
            processedDataset = dataset["ProcessedDataset"]
            self.apiRef.insertMergedDataset(inputDataset, processedDataset,
                                            mergeAlgo)

            # algorithm used when process jobs produce merged files directly
            # doesnt contain pset content - taken from processing (same hash)
            mergeDirectAlgo = DBSWriterObjects.createAlgorithm(
                dataset, None, self.apiRef)
            self.apiRef.insertAlgoInPD(makeDSName2(dataset), mergeDirectAlgo)

            logging.debug("ProcessedDataset: %s" % processedDataset)
            logging.debug("inputDataset: %s" % inputDataset)
            logging.debug("mergeAlgo: %s" % mergeAlgo)
        return
Exemplo n.º 2
0
    def __call__(self, pnode):
        if pnode.type != "CMSSW":
            return

        #Can't take datasetInfo from getOutputDatasetsWithPSet as that changes
        #the AppFamily to the processing configuration, hence file insertions
        #fail due to a missing algo. WARNING: relies on identical dataset order 
        glblTags = [x['Conditions'] for x in getOutputDatasetsWithPSet(pnode,
                                                                sorted = True)]
        for dataset, globalTag in zip(getOutputDatasets(pnode, sorted = True),
                                      glblTags):
            
            dataset['Conditions'] = globalTag
            
            primary = DBSWriterObjects.createPrimaryDataset(
                dataset, self.apiRef)
            
            mergeAlgo = DBSWriterObjects.createMergeAlgorithm(dataset,
                                                              self.apiRef)
            DBSWriterObjects.createProcessedDataset(
                primary, mergeAlgo, dataset, self.apiRef)
            
            inputDataset = dataset.get('ParentDataset', None)
            if inputDataset == None:
                continue
            processedDataset = dataset["ProcessedDataset"]
            self.apiRef.insertMergedDataset(
                inputDataset, processedDataset, mergeAlgo)
            
            # algorithm used when process jobs produce merged files directly
            # doesnt contain pset content - taken from processing (same hash)
            mergeDirectAlgo = DBSWriterObjects.createAlgorithm(
                dataset, None, self.apiRef)
            self.apiRef.insertAlgoInPD(makeDSName2(dataset), mergeDirectAlgo)
            
            logging.debug("ProcessedDataset: %s"%processedDataset)
            logging.debug("inputDataset: %s"%inputDataset)
            logging.debug("mergeAlgo: %s"%mergeAlgo)
        return