Esempio n. 1
0
        runDict['datasetUsed'] = useableDatasets[0]
        if 'HIExpress' in useableDatasets[0] :
            runDict['HIrun'] = '1'
        else :
            runDict['HIrun'] = '0'
    else :
        print 'Removing run %d due to no Express FEVT datasets available' % runNo
        runsNoDataset.append(runNo)

map(newRuns.pop, runsNoDataset)

print 'Found %d new runs!' % len(newRuns)

# Guard against runs being too new for DAS
for runNo in sorted(newRuns.keys(), reverse=True) :
    files = util.getFilesForRun(runNo, newRuns[runNo]['datasetUsed'])
    if len(files) > 0 :
        break
    print 'Removing run %d due to no files present in DAS yet' % runNo
    newRuns.pop(runNo)

runCache.update(newRuns)

with open('runCache.json', 'w') as cacheFile :
    json.dump(runCache, cacheFile, indent=4, sort_keys=True)

with open('batchSubmit.sh', 'w') as outFile :
    outFile.write('''#!/bin/bash

# Batch file for submission. Will be overwritten by getNewRuns.py!
mkdir -p logs
Esempio n. 2
0
    runDict['batchSubmitted'] = True
    availableDatasets = util.runGetDatasetsAvailable(runNo)
    useableDatasets = filter(lambda d : re.match('/Express.*/FEVT', d), availableDatasets)
    if len(useableDatasets) > 0 :
        runDict['datasetUsed'] = useableDatasets[0]
    else :
        print 'Removing run %d due to no Express FEVT datasets available' % runNo
        runsNoDataset.append(runNo)

map(newRuns.pop, runsNoDataset)

print 'Found %d new runs!' % len(newRuns)

# Guard against runs being too new for DAS
for runNo in sorted(newRuns.keys(), reverse=True) :
    files = util.getFilesForRun(runNo, newRuns[runNo]['datasetUsed'])
    if len(files) > 0 :
        break
    print 'Removing run %d due to no files present in DAS yet' % runNo
    newRuns.pop(runNo)

runCache.update(newRuns)

with open('runCache.json', 'w') as cacheFile :
    json.dump(runCache, cacheFile, indent=4, sort_keys=True)

with open('batchSubmit.sh', 'w') as outFile :
    outFile.write('''#!/bin/bash

# Batch file for submission. Will be overwritten by getNewRuns.py!
mkdir -p logs
options.register('runNumber', 0, VarParsing.multiplicity.singleton, VarParsing.varType.int, 'Run to analyze')
options.register('lumis', '1-max', VarParsing.multiplicity.singleton, VarParsing.varType.string, 'Lumis')
options.register('dataStream', '/ExpressPhysics/Run2015D-Express-v3/FEVT', VarParsing.multiplicity.singleton, VarParsing.varType.string, 'Dataset to look for run in')
options.register('inputFiles', [], VarParsing.multiplicity.list, VarParsing.varType.string, 'Manual file list input, will query DAS if empty')
options.register('inputFileList', '', VarParsing.multiplicity.singleton, VarParsing.varType.string, 'Manual file list input, will query DAS if empty')
options.register('useORCON', False, VarParsing.multiplicity.singleton, VarParsing.varType.bool, 'Use ORCON for conditions.  This is necessary for very recent runs where conditions have not propogated to Frontier')
options.parseArguments()

def formatLumis(lumistring, run) :
    lumis = (lrange.split('-') for lrange in lumistring.split(','))
    runlumis = (['%d:%s' % (run,lumi) for lumi in lrange] for lrange in lumis)
    return ['-'.join(l) for l in runlumis]

print 'Getting files for run %d...' % options.runNumber
if len(options.inputFiles) is 0 and options.inputFileList is '' :
    inputFiles = util.getFilesForRun(options.runNumber, options.dataStream)
elif len(options.inputFileList) > 0 :
    with open(options.inputFileList) as f :
        inputFiles = list((line.strip() for line in f))
else :
    inputFiles = cms.untracked.vstring(options.inputFiles)
if len(inputFiles) is 0 :
    raise Exception('No files found for dataset %s run %d' % (options.dataStream, options.runNumber))
print 'Ok, time to analyze'
process = cms.Process("RCTofflineTEST")

process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet( reportEvery = cms.untracked.int32(1000) )
process.MessageLogger.cerr.WARNING = cms.untracked.PSet( limit = cms.untracked.int32(100) )
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
Esempio n. 4
0
options = VarParsing()
options.register('runNumber', 0, VarParsing.multiplicity.singleton, VarParsing.varType.int, 'Run to analyze')
options.register('lumis', '1-max', VarParsing.multiplicity.singleton, VarParsing.varType.string, 'Lumis')
options.register('dataStream', '/ExpressPhysics/Run2015D-Express-v3/FEVT', VarParsing.multiplicity.singleton, VarParsing.varType.string, 'Dataset to look for run in')
options.register('inputFiles', [], VarParsing.multiplicity.list, VarParsing.varType.string, 'Manual file list input, will query DAS if empty')
options.register('useORCON', False, VarParsing.multiplicity.singleton, VarParsing.varType.bool, 'Use ORCON for conditions.  This is necessary for very recent runs where conditions have not propogated to Frontier')
options.parseArguments()

def formatLumis(lumistring, run) :
    lumis = (lrange.split('-') for lrange in lumistring.split(','))
    runlumis = (['%d:%s' % (run,lumi) for lumi in lrange] for lrange in lumis)
    return ['-'.join(l) for l in runlumis]

print 'Getting files for run %d...' % options.runNumber
if len(options.inputFiles) is 0 :
    inputFiles = util.getFilesForRun(options.runNumber, options.dataStream)
else :
    inputFiles = cms.untracked.vstring(options.inputFiles)
if len(inputFiles) is 0 :
    raise Exception('No files found for dataset %s run %d' % (options.dataStream, options.runNumber))
print 'Ok, time to analyze'
process = cms.Process("RCTofflineTEST")

process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport = cms.untracked.PSet( reportEvery = cms.untracked.int32(1000) )
process.MessageLogger.cerr.WARNING = cms.untracked.PSet( limit = cms.untracked.int32(100) )
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )

process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_condDBv2_cff')
process.GlobalTag.globaltag = '74X_dataRun2_Express_v1'
if options.useORCON :