config.JobType.priority = dataset.priority config.Data.inputDBS = dataset.inputDBS label = dataset.label # MC specific settings: if not isData: pyCfgParams.append('data=0') # Data specific settings: if isData: pyCfgParams.append('data=1') config.Data.lumiMask = 'http://cms-service-dqm.web.cern.ch/cms-service-dqm/CAF/certification/Collisions15/13TeV/Cert_246908-260627_13TeV_PromptReco_Collisions15_25ns_JSON_v2.txt' #pyCfgParams.append('steps=analyze') # Only run analyze step on data (Applicable if datasets has been skimmed already) # Label specific settings pyCfgParams.append('sample=' + label) # Use this turn of different steps # pyCfgParams.append('steps=skim') # pyCfgParams.append('steps=analyze') config.JobType.pyCfgParams = pyCfgParams if not dryrun: res = submit_newthread(config, dryrun=dryrun) taskId = res['uniquerequestname'].split(':')[0] filePath = '%s%s/%s/%s/' % (config.Data.outLFNDirBase, config.Data.inputDataset.split('/')[1], config.Data.outputDatasetTag, taskId) print 'filePath:' print filePath tasklistFile.write(filePath) tasklistFile.write('\n') if not dryrun: tasklistFile.close()
config.JobType.priority = dataset.priority config.Data.inputDBS = dataset.inputDBS label = dataset.label doHLT = dataset.doHLT doJetFilter = dataset.doJetFilter # MC specific settings: if not isData: pyCfgParams.append('data=0') # Data specific settings: if isData: pyCfgParams.append('data=1') config.Data.lumiMask = 'https://cms-service-dqm.web.cern.ch/cms-service-dqm/CAF/certification/Collisions16/13TeV/ReReco/Final/Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt' pyCfgParams.append('steps=skim,analyze') # Label specific settings pyCfgParams.append('sample='+label) pyCfgParams.append('doHLT=%d' % doHLT) pyCfgParams.append('doJetFilter=%d' % doJetFilter) config.JobType.pyCfgParams = pyCfgParams if not dryrun: res = submit_newthread(config, dryrun=dryrun) taskId = res['uniquerequestname'].split(':')[0] filePath = '%s%s/%s/%s/' % ( config.Data.outLFNDirBase, config.Data.inputDataset.split('/')[1], config.Data.outputDatasetTag, taskId ) print 'filePath:' print filePath tasklistFile.write(filePath) tasklistFile.write('\n') if not dryrun: tasklistFile.close()
isData = dataset.isData config.Data.splitting = dataset.splitting config.JobType.priority = dataset.priority config.Data.inputDBS = dataset.inputDBS label = dataset.label # MC specific settings: if not isData: pyCfgParams.append('data=0') # Data specific settings: if isData: pyCfgParams.append('data=1') config.Data.lumiMask = 'https://cms-service-dqm.web.cern.ch/cms-service-dqm/CAF/certification/Collisions15/13TeV/Cert_246908-260627_13TeV_PromptReco_Collisions15_25ns_JSON_v2.txt' # Label specific settings pyCfgParams.append('sample='+label) # Use this turn of different steps # pyCfgParams.append('steps=skim') # pyCfgParams.append('steps=analyze') config.JobType.pyCfgParams = pyCfgParams if not dryrun: res = submit_newthread(config) taskId = res['uniquerequestname'].split(':')[0] filePath = '%s%s/%s/%s/' % ( config.Data.outLFNDirBase, config.Data.inputDataset.split('/')[1], config.Data.outputDatasetTag, taskId ) print 'filePath:' print filePath tasklistFile.write(filePath) tasklistFile.write('\n') if not dryrun: tasklistFile.close()
config.Data.splitting = dataset.splitting config.JobType.priority = dataset.priority config.Data.inputDBS = dataset.inputDBS label = dataset.label # MC specific settings: if not isData: pyCfgParams.append('data=0') # Data specific settings: if isData: pyCfgParams.append('data=1') config.Data.lumiMask = 'https://cms-service-dqm.web.cern.ch/cms-service-dqm/CAF/certification/Collisions15/13TeV/Cert_246908-260627_13TeV_PromptReco_Collisions15_25ns_JSON_v2.txt' # Label specific settings pyCfgParams.append('sample=' + label) # Use this turn of different steps # pyCfgParams.append('steps=skim') # pyCfgParams.append('steps=analyze') config.JobType.pyCfgParams = pyCfgParams if not dryrun: res = submit_newthread(config) taskId = res['uniquerequestname'].split(':')[0] filePath = '%s%s/%s/%s/' % (config.Data.outLFNDirBase, config.Data.inputDataset.split('/')[1], config.Data.outputDatasetTag, taskId) print 'filePath:' print filePath tasklistFile.write(filePath) tasklistFile.write('\n') if not dryrun: tasklistFile.close()