if options.globalTag == "": raise RuntimeError( "Global tag not specified! Try sourcing environment.sh\n") else: print 'Using globalTag: %s' % options.globalTag process.GlobalTag.globaltag = cms.string(options.globalTag) # Count events at the beginning of the pat tuplization process.load("FinalStateAnalysis.RecoTools.eventCount_cfi") process.load("FinalStateAnalysis.RecoTools.dqmEventCount_cfi") # Hack meta information about this PAT tuple in the provenance. process.eventCount.uwMeta = cms.PSet( # The git commit commit=cms.string(fsa_version()), user=cms.string(get_user()), date=cms.string(time.strftime("%d %b %Y %H:%M:%S +0000", time.gmtime())), ) process.schedule = cms.Schedule() # Load all of our skim paths process.load("FinalStateAnalysis.RecoTools.uwSkims_cfi") # PAT tuplize all skim paths for skim_path in process.skimConfig.paths: print "Building skim path:", skim_path the_path = getattr(process, skim_path) # Count every event, even the ones that fail the skim the_path.insert(0, process.eventCount) if options.isMC and not options.embedded:
from FinalStateAnalysis.Utilities.version import fsa_version from FinalStateAnalysis.PatTools.pattuple_option_configurator import \ configure_pat_tuple import os import sys parser = argparse.ArgumentParser(description='Build PAT Tuple CRAB submission') parser.add_argument('jobid', help='Job ID identifier') parser.add_argument('--samples', nargs='+', type=str, required=False, help='Filter samples using list of patterns (shell style)') args = parser.parse_args() cfg = 'patTuple_cfg.py' jobId = args.jobid print " # Job ID: %s Version: %s" % (jobId, fsa_version()) print 'export TERMCAP=screen' for sample in sorted(datadefs.keys()): sample_info = datadefs[sample] passes_filter = True # Filter by sample wildcards if args.samples: passes_wildcard = False for pattern in args.samples: if fnmatch.fnmatchcase(sample, pattern): passes_wildcard = True passes_filter = passes_wildcard and passes_filter if not passes_filter: continue
parser.add_argument('--xrootd', action='store_true', required=False, default=False, help='fetch files from remote tiers using xrootd') parser.add_argument('--ignoreRunRange', action='store_true', required=False, default=False, help='ignores the run range passed from datadefs') args = parser.parse_args() cfg = 'patTuple_cfg.py' jobId = args.jobid print " # Job ID: %s Version: %s" % (jobId, fsa_version()) print 'export TERMCAP=screen' def any_matches(regexes, string): for regex in regexes: if fnmatch.fnmatchcase(string, regex): return True return False to_be_used = [] for key, info in datadefs.iteritems(): if args.samples: if any_matches(args.samples, key): to_be_used.append(key)
options = configure_pat_tuple(sample, sample_info) f.write('CMSSW.datasetpath = '+sample_info['datasetpath']+'\n') f.write('CMSSW.pset = ') f.write(sample+'_cfg.py\n') if 'data' not in sample and 'embedded' not in sample: f.write('CMSSW.total_number_of_events = -1\nCMSSW.events_per_job = 7000\n') else: f.write('CMSSW.total_number_of_lumis = -1\nCMSSW.lumis_per_job = 30\n') lumi_mask_fip = sample_info['lumi_mask'] lumi_mask_path = os.path.join(os.environ['CMSSW_BASE'], 'src', lumi_mask_fip) f.write('CMSSW.lumi_mask = %s\n' % lumi_mask_path) # Apply a run selection if 'firstRun' in sample_info: f.write('CMSSW.runselection = %i-%i\n' % (sample_info['firstRun'], sample_info['lastRun'])) options.append('dumpCfg='+jobId+'/'+sample+'_cfg.py') opts= ' '.join(options) print "python patTuple_cfg.py "+opts os.system("python patTuple_cfg.py "+opts) f.write('USER.publish_data_name = '+sample+"_%s-%s\n" % (jobId, fsa_version())) if 'dbs' in sample_info: f.write('CMSSW.dbs_url =http://cmsdbsprod.cern.ch/'+sample_info['dbs']+'/servlet/DBSServlet\n') f.write('\n\n') f.close()
%os.environ['CMSSW_BASE'] jobId = args.jobid def filter_sample(filter, sample): if filter: passes_wildcard = False for pattern in filter: if fnmatch.fnmatchcase(sample, pattern): passes_wildcard = True return passes_wildcard else: return True print " # Job ID: %s Version: %s" % (jobId, fsa_version()) print 'export TERMCAP=screen' for sample in sorted(allTuples.keys()): subsamples = allTuples[sample] passes_filter = filter_sample(args.samples, sample) if not passes_filter: continue for subsample in subsamples.keys(): if (subsample != 'tupleName' and subsample != 'tupleDate' and subsample != 'tupleRoot'): passes_filter = filter_sample(args.subsamples, subsample) if not passes_filter: continue
if options.globalTag == "": raise RuntimeError("Global tag not specified! Try sourcing environment.sh\n") else: print 'Using globalTag: %s'%options.globalTag process.GlobalTag.globaltag = cms.string(options.globalTag) # Count events at the beginning of the pat tuplization process.load("FinalStateAnalysis.RecoTools.eventCount_cfi") process.load("FinalStateAnalysis.RecoTools.dqmEventCount_cfi") # Hack meta information about this PAT tuple in the provenance. process.eventCount.uwMeta = cms.PSet( # The git commit commit = cms.string(fsa_version()), user = cms.string(get_user()), date = cms.string(time.strftime("%d %b %Y %H:%M:%S +0000", time.gmtime())), ) process.schedule = cms.Schedule() # Load all of our skim paths process.load("FinalStateAnalysis.RecoTools.uwSkims_cfi") # PAT tuplize all skim paths for skim_path in process.skimConfig.paths: print "Building skim path:", skim_path the_path = getattr(process, skim_path) # Count every event, even the ones that fail the skim the_path.insert(0, process.eventCount) if options.isMC and not options.embedded:
parser.add_argument('--samples', nargs='+', type=str, required=False, help='Filter samples using list of patterns (shell style)') parser.add_argument('--dbsnames', nargs='+', type=str, required=False, help='use full DBS names') parser.add_argument('--lumimask', type=str, required=False, help='Optionally override the lumi mask used.') parser.add_argument('--xrootd', action='store_true', required=False, default=False, help='fetch files from remote tiers using xrootd') parser.add_argument('--ignoreRunRange', action='store_true', required=False, default=False, help='ignores the run range passed from datadefs') args = parser.parse_args() cfg = 'patTuple_cfg.py' jobId = args.jobid print " # Job ID: %s Version: %s" % (jobId, fsa_version()) print 'export TERMCAP=screen' def any_matches(regexes, string): for regex in regexes: if fnmatch.fnmatchcase(string, regex): return True return False to_be_used = [] for key, info in datadefs.iteritems(): if args.samples: if any_matches(args.samples, key): to_be_used.append(key) if 'datasetpath' in info and args.dbsnames: dbs = info['datasetpath']
f.write('CMSSW.pset = ') f.write(sample + '_cfg.py\n') if 'data' not in sample and 'embedded' not in sample: f.write( 'CMSSW.total_number_of_events = -1\nCMSSW.events_per_job = 7000\n') else: f.write('CMSSW.total_number_of_lumis = -1\nCMSSW.lumis_per_job = 30\n') lumi_mask_fip = sample_info['lumi_mask'] lumi_mask_path = os.path.join(os.environ['CMSSW_BASE'], 'src', lumi_mask_fip) f.write('CMSSW.lumi_mask = %s\n' % lumi_mask_path) # Apply a run selection if 'firstRun' in sample_info: f.write('CMSSW.runselection = %i-%i\n' % (sample_info['firstRun'], sample_info['lastRun'])) options.append('dumpCfg=' + jobId + '/' + sample + '_cfg.py') opts = ' '.join(options) print "python patTuple_cfg.py " + opts os.system("python patTuple_cfg.py " + opts) f.write('USER.publish_data_name = ' + sample + "_%s-%s\n" % (jobId, fsa_version())) if 'dbs' in sample_info: f.write('CMSSW.dbs_url =http://cmsdbsprod.cern.ch/' + sample_info['dbs'] + '/servlet/DBSServlet\n') f.write('\n\n') f.close()