def __call__(self, pnode): if pnode.type != "CMSSW": return #Can't take datasetInfo from getOutputDatasetsWithPSet as that changes #the AppFamily to the processing configuration, hence file insertions #fail due to a missing algo. WARNING: relies on identical dataset order glblTags = [ x['Conditions'] for x in getOutputDatasetsWithPSet(pnode, sorted=True) ] for dataset, globalTag in zip(getOutputDatasets(pnode, sorted=True), glblTags): dataset['Conditions'] = globalTag primary = DBSWriterObjects.createPrimaryDataset( dataset, self.apiRef) mergeAlgo = DBSWriterObjects.createMergeAlgorithm( dataset, self.apiRef) DBSWriterObjects.createProcessedDataset(primary, mergeAlgo, dataset, self.apiRef) inputDataset = dataset.get('ParentDataset', None) if inputDataset == None: continue processedDataset = dataset["ProcessedDataset"] self.apiRef.insertMergedDataset(inputDataset, processedDataset, mergeAlgo) # algorithm used when process jobs produce merged files directly # doesnt contain pset content - taken from processing (same hash) mergeDirectAlgo = DBSWriterObjects.createAlgorithm( dataset, None, self.apiRef) self.apiRef.insertAlgoInPD(makeDSName2(dataset), mergeDirectAlgo) logging.debug("ProcessedDataset: %s" % processedDataset) logging.debug("inputDataset: %s" % inputDataset) logging.debug("mergeAlgo: %s" % mergeAlgo) return
def __call__(self, pnode): if pnode.type != "CMSSW": return #Can't take datasetInfo from getOutputDatasetsWithPSet as that changes #the AppFamily to the processing configuration, hence file insertions #fail due to a missing algo. WARNING: relies on identical dataset order glblTags = [x['Conditions'] for x in getOutputDatasetsWithPSet(pnode, sorted = True)] for dataset, globalTag in zip(getOutputDatasets(pnode, sorted = True), glblTags): dataset['Conditions'] = globalTag primary = DBSWriterObjects.createPrimaryDataset( dataset, self.apiRef) mergeAlgo = DBSWriterObjects.createMergeAlgorithm(dataset, self.apiRef) DBSWriterObjects.createProcessedDataset( primary, mergeAlgo, dataset, self.apiRef) inputDataset = dataset.get('ParentDataset', None) if inputDataset == None: continue processedDataset = dataset["ProcessedDataset"] self.apiRef.insertMergedDataset( inputDataset, processedDataset, mergeAlgo) # algorithm used when process jobs produce merged files directly # doesnt contain pset content - taken from processing (same hash) mergeDirectAlgo = DBSWriterObjects.createAlgorithm( dataset, None, self.apiRef) self.apiRef.insertAlgoInPD(makeDSName2(dataset), mergeDirectAlgo) logging.debug("ProcessedDataset: %s"%processedDataset) logging.debug("inputDataset: %s"%inputDataset) logging.debug("mergeAlgo: %s"%mergeAlgo) return
class _CreateDatasetOperator: """ _CreateDatasetOperator_ Operator for creating datasets from a workflow node """ def __init__(self, apiRef, workflow): self.apiRef = apiRef self.workflow = workflow def __call__(self, pnode): if pnode.type != "CMSSW": return datasets = getOutputDatasetsWithPSet(pnode, sorted=True) cfgMeta = None try: cfgInt = pnode.cfgInterface cfgMeta = cfgInt.configMetadata cfgMeta['Type'] = self.workflow.parameters["RequestCategory"] except Exception, ex: msg = "Unable to Extract cfg data from workflow" msg += str(ex) logging.error(msg) return for dataset in datasets: primary = DBSWriterObjects.createPrimaryDataset( dataset, self.apiRef) algo = DBSWriterObjects.createAlgorithm(dataset, cfgMeta, self.apiRef) processed = DBSWriterObjects.createProcessedDataset( primary, algo, dataset, self.apiRef) return
datasetStrmr = jobReportFile.newDataset() datasetStrmr['PrimaryDataset'] = primaryDataset datasetStrmr['PrimaryDatasetType'] = 'data' datasetStrmr['ProcessedDataset'] = processedDataset datasetStrmr['DataTier'] = dataTier jobReportFile['TotalEvents'] = nEvents jobReportFile['SEName'] = "srm.cern.ch" ##jobReport.write('FrameworkJobReport.xml') localDbsUrl = "https://cmst0dbs.cern.ch:8443/cms_dbs_prod_tier0_writer/servlet/DBSServlet" dbswriter = DBSWriter(localDbsUrl,level='ERROR') primary = DBSWriterObjects.createPrimaryDataset(datasetStrmr, dbswriter.dbs) datasetStrmr['ApplicationName'] = appName datasetStrmr['ApplicationVersion'] = appVersion datasetStrmr['ApplicationFamily'] = 'DAQ' datasetStrmr['PSetHash'] = 'NA' datasetStrmr['PSetContent'] = 'NA' algo = DBSWriterObjects.createAlgorithm(datasetStrmr, None, dbswriter.dbs) processed = DBSWriterObjects.createProcessedDataset(primary, algo, datasetStrmr, dbswriter.dbs) try: blocks = dbswriter.insertFiles(jobReport, insertDetectorData = True) except DBSWriterError, ex: print "%s"%ex