def readInExternals(self):
        if not "NumberGeneratedEvents" in self._config or (int(
                self._config["NumberGeneratedEvents"]) < 0):
            from Kappa.Skimming.registerDatasetHelper import get_n_generated_events_from_nick
            from Kappa.Skimming.datasetsHelper2015 import isData
            n_events_from_db = get_n_generated_events_from_nick(
                self._config["Nickname"])
            if (n_events_from_db > 0):
                self._config["NumberGeneratedEvents"] = n_events_from_db
            elif not isData(self._config["Nickname"]):
                log.fatal(
                    "Number of Generated Events not set! Check your datasets.json for nick "
                    + self._config["Nickname"])
                sys.exit(1)

        if not ("CrossSection"
                in self._config) or (self._config["CrossSection"] < 0):
            from Kappa.Skimming.registerDatasetHelper import get_xsec
            from Kappa.Skimming.datasetsHelper2015 import isData
            xsec = get_xsec(self._config["Nickname"])
            if (xsec > 0):
                self._config["CrossSection"] = xsec
            elif not isData(self._config["Nickname"]):
                log.fatal("Cross section for " + self._config["Nickname"] +
                          " not set! Check your datasets.json")
                sys.exit(1)

        if not ("GeneratorWeight" in self._config):
            from Kappa.Skimming.registerDatasetHelper import get_generator_weight
            from Kappa.Skimming.datasetsHelper2015 import isData
            generator_weight = get_generator_weight(self._config["Nickname"])
            if (generator_weight > 0 and generator_weight <= 1.0):
                self._config["GeneratorWeight"] = generator_weight
	def readInExternals(self):
		if not "NumberGeneratedEvents" in self._config or (int(self._config["NumberGeneratedEvents"]) < 0):
			from Kappa.Skimming.registerDatasetHelper import get_n_generated_events_from_nick
			from Kappa.Skimming.datasetsHelper2015 import isData
			n_events_from_db = get_n_generated_events_from_nick(self._config["Nickname"])
			if(n_events_from_db > 0):
				self._config["NumberGeneratedEvents"] = n_events_from_db
			elif not isData(self._config["Nickname"]):
				log.fatal("Number of Generated Events not set! Check your datasets.json for nick " + self._config["Nickname"])
				sys.exit(1)

		if not ("CrossSection" in self._config) or (self._config["CrossSection"] < 0):
			from Kappa.Skimming.registerDatasetHelper import get_xsec
			from Kappa.Skimming.datasetsHelper2015 import isData
			xsec = get_xsec(self._config["Nickname"])
			if(xsec > 0):
				self._config["CrossSection"] = xsec
			elif not isData(self._config["Nickname"]):
				log.fatal("Cross section for " + self._config["Nickname"] + " not set! Check your datasets.json")
				sys.exit(1)

		if not ("GeneratorWeight" in self._config):
			from Kappa.Skimming.registerDatasetHelper import get_generator_weight
			from Kappa.Skimming.datasetsHelper2015 import isData
			generator_weight = get_generator_weight(self._config["Nickname"])
			if(generator_weight > 0 and generator_weight <= 1.0):
				self._config["GeneratorWeight"] = generator_weight
Exemple #3
0
def submission():
	from CRABClient.UserUtilities import config
	config = config()
	config.General.workArea = '/nfs/dust/cms/user/%s/crab_kappa_skim-%s'%(getUsernameFromSiteDB(), date)
	check_path(config.General.workArea)
	config.General.transferOutputs = True
	config.General.transferLogs = True
	config.User.voGroup = 'dcms'
	
	config.JobType.pluginName = 'Analysis'
	config.JobType.psetName = 'kSkimming_run2_cfg.py'
	#config.JobType.inputFiles = ['Summer15_V5_MC.db']
	config.JobType.allowUndistributedCMSSW = True
	config.Site.blacklist = ["T2_BR_SPRACE"]
	config.Data.inputDBS = 'global'
	config.Data.splitting = 'FileBased'
	config.Data.unitsPerJob = 1
	config.Data.outLFNDirBase = '/store/user/%s/higgs-kit/skimming/%s'%(getUsernameFromSiteDB(), date)
	config.Data.publication = False
	
	config.Site.storageSite = "T2_DE_DESY"
	# load nicknames form gc-style config files and write them to a flat nicknames list
	nicknames = read_grid_control_includes(["samples/13TeV/Fall15_SM_Analysis.conf"])
	#nicknames = ['SUSYGluGluToHToTauTauM160_RunIIFall15MiniAODv2_76X_13TeV_MINIAOD_pythia8']

	# loop over datasets and get repsective nicks
	for nickname in nicknames:
		config.General.requestName = nickname
		config.JobType.pyCfgParams = ['globalTag=76X_dataRun2_16Dec2015_v0' if isData(nickname) else 'globalTag=76X_mcRun2_asymptotic_RunIIFall15DR76_v1' ,'kappaTag=KAPPA_2_1_0','nickname=%s'%(nickname),'outputfilename=kappa_%s.root'%(nickname),'testsuite=False']
		config.JobType.outputFiles = ['kappa_%s.root'%(nickname)]
		config.Data.inputDataset = get_sample_by_nick(nickname)
		p = Process(target=submit, args=(config,))
		p.start()
		p.join()
Exemple #4
0
def submission(events_per_job):
	from CRABClient.UserUtilities import config
	config = config()
	config.General.workArea = '/nfs/dust/cms/user/%s/kappa/crab_kappa_skim80X-%s'%(getUsernameFromSiteDB(), date)
	#config.General.workArea = '/net/scratch_cms/institut_3b/%s/kappa/crab_kappa_skim-%s'%(getUsernameFromSiteDB(), date)
	#config.General.workArea = '/nfs/dust/cms/user/<your-NAF-username>/kappa/crab_kappa_skim80X-%s'% date  #if CERN-username != NAF-username
	check_path(config.General.workArea)
	config.General.transferOutputs = True
	config.General.transferLogs = True
	config.User.voGroup = 'dcms'
	
	config.JobType.pluginName = 'Analysis'
	config.JobType.psetName = 'kSkimming_run2_cfg.py'
	#config.JobType.inputFiles = ['Spring16_25nsV6_DATA.db', 'Spring16_25nsV6_MC.db']
	config.JobType.allowUndistributedCMSSW = True
	config.Site.blacklist = ["T2_BR_SPRACE"]
	config.Data.splitting = 'FileBased'
	config.Data.unitsPerJob = 1
	config.Data.outLFNDirBase = '/store/user/%s/higgs-kit/skimming/80X_%s'%(getUsernameFromSiteDB(), date)
	config.Data.publication = False
	
	config.Site.storageSite = "T2_DE_DESY"
	# load nicknames form gc-style config files and write them to a flat nicknames list
	nicknames = read_grid_control_includes(["samples/13TeV/Summer16_SM_Analysis.conf"])
	#nicknames = read_grid_control_includes(["samples/13TeV/Spring16_SM_Higgs_CPmixing_2.conf"])
	#nicknames = read_grid_control_includes(["samples/13TeV/2016B_Data.conf"])
	#nicknames = ['SUSYGluGluToHToTauTauM160_RunIIFall15MiniAODv2_76X_13TeV_MINIAOD_pythia8']

	# loop over datasets and get repsective nicks
	for nickname in nicknames:
		config.General.requestName = nickname[:100]
		config.Data.inputDBS = get_inputDBS_by_nick(nickname)
		config.Data.unitsPerJob = 1
		nfiles = get_n_files_from_nick(nickname)
		if events_per_job:
			nevents = get_n_generated_events_from_nick(nickname)
			try:
				if int(nfiles) > 0 and int(nevents) > 0:
					files_per_job = int(events_per_job) * int(nfiles) / int(nevents)
					if files_per_job > 1:
						config.Data.unitsPerJob = int(files_per_job)
			except:
				print "Its not possilbe to make ",events_per_job," events/job for ",nickname," which has Nevents:",nevents," and Nfiles",nfiles," in the database. Just make one file per job"
		if float(config.Data.unitsPerJob) > 0 and float(nfiles)/float(config.Data.unitsPerJob) >= job_submission_limit:
			files_per_job = ceil(float(nfiles)/job_submission_limit)
			if files_per_job > 1:
				config.Data.unitsPerJob = int(files_per_job)

		config.JobType.pyCfgParams = ['globalTag=80X_dataRun2_2016SeptRepro_v7' if isData(nickname) else 'globalTag=80X_mcRun2_asymptotic_2016_TrancheIV_v8' if "PUMoriond17" in getScenario(nickname) else 'globalTag=80X_mcRun2_asymptotic_2016_miniAODv2_v1' ,'kappaTag=KAPPA_2_1_0','nickname=%s'%(nickname),'outputfilename=kappa_%s.root'%(nickname),'testsuite=False']
		config.JobType.outputFiles = ['kappa_%s.root'%(nickname)]
		config.Data.inputDataset = get_sample_by_nick(nickname)
		#config.Data.lumiMask = '/nfs/dust/cms/user/<NAF-username>/kappa/crab_kappa_skim80X-<campaign-date>/results/missingLumis.json' # for running of a subset of lumi sections
		p = Process(target=submit, args=(config,))
		p.start()
		p.join()
Exemple #5
0
def submission():
    from CRABClient.UserUtilities import config
    config = config()
    config.General.workArea = '/nfs/dust/cms/user/%s/crab_kappa_skim-%s' % (
        getUsernameFromSiteDB(), date)
    check_path(config.General.workArea)
    config.General.transferOutputs = True
    config.General.transferLogs = True
    config.User.voGroup = 'dcms'

    config.JobType.pluginName = 'Analysis'
    config.JobType.psetName = 'kSkimming_run2_cfg.py'
    #config.JobType.inputFiles = ['Summer15_V5_MC.db']
    config.JobType.allowUndistributedCMSSW = True
    config.Site.blacklist = ["T2_BR_SPRACE"]
    config.Data.inputDBS = 'global'
    config.Data.splitting = 'FileBased'
    config.Data.unitsPerJob = 1
    config.Data.outLFNDirBase = '/store/user/%s/higgs-kit/skimming/%s' % (
        getUsernameFromSiteDB(), date)
    config.Data.publication = False

    config.Site.storageSite = "T2_DE_DESY"
    # load nicknames form gc-style config files and write them to a flat nicknames list
    nicknames = read_grid_control_includes(
        ["samples/13TeV/Fall15_SM_Analysis.conf"])
    #nicknames = ['SUSYGluGluToHToTauTauM160_RunIIFall15MiniAODv2_76X_13TeV_MINIAOD_pythia8']

    # loop over datasets and get repsective nicks
    for nickname in nicknames:
        config.General.requestName = nickname
        config.JobType.pyCfgParams = [
            'globalTag=76X_dataRun2_16Dec2015_v0' if isData(nickname) else
            'globalTag=76X_mcRun2_asymptotic_RunIIFall15DR76_v1',
            'kappaTag=KAPPA_2_1_0',
            'nickname=%s' % (nickname),
            'outputfilename=kappa_%s.root' % (nickname), 'testsuite=False'
        ]
        config.JobType.outputFiles = ['kappa_%s.root' % (nickname)]
        config.Data.inputDataset = get_sample_by_nick(nickname)
        p = Process(target=submit, args=(config, ))
        p.start()
        p.join()
Exemple #6
0
def submission(events_per_job):
    from CRABClient.UserUtilities import config
    config = config()
    config.General.workArea = '/nfs/dust/cms/user/%s/kappa/crab_kappa_skim80X-%s' % (
        getUsernameFromSiteDB(), date)
    #config.General.workArea = '/net/scratch_cms/institut_3b/%s/kappa/crab_kappa_skim-%s'%(getUsernameFromSiteDB(), date)
    #config.General.workArea = '/nfs/dust/cms/user/<your-NAF-username>/kappa/crab_kappa_skim80X-%s'% date  #if CERN-username != NAF-username
    check_path(config.General.workArea)
    config.General.transferOutputs = True
    config.General.transferLogs = True
    config.User.voGroup = 'dcms'

    config.JobType.pluginName = 'Analysis'
    config.JobType.psetName = 'kSkimming_run2_cfg.py'
    #config.JobType.inputFiles = ['Spring16_25nsV6_DATA.db', 'Spring16_25nsV6_MC.db']
    config.JobType.allowUndistributedCMSSW = True
    config.Site.blacklist = ["T2_BR_SPRACE"]
    config.Data.splitting = 'FileBased'
    config.Data.unitsPerJob = 1
    config.Data.outLFNDirBase = '/store/user/%s/higgs-kit/skimming/80X_%s' % (
        getUsernameFromSiteDB(), date)
    config.Data.publication = False

    config.Site.storageSite = "T2_DE_DESY"
    # load nicknames form gc-style config files and write them to a flat nicknames list
    nicknames = read_grid_control_includes(
        ["samples/13TeV/Summer16_SM_Analysis.conf"])
    #nicknames = read_grid_control_includes(["samples/13TeV/Spring16_SM_Higgs_CPmixing_2.conf"])
    #nicknames = read_grid_control_includes(["samples/13TeV/2016B_Data.conf"])
    #nicknames = ['SUSYGluGluToHToTauTauM160_RunIIFall15MiniAODv2_76X_13TeV_MINIAOD_pythia8']

    # loop over datasets and get repsective nicks
    for nickname in nicknames:
        config.General.requestName = nickname[:100]
        config.Data.inputDBS = get_inputDBS_by_nick(nickname)
        config.Data.unitsPerJob = 1
        nfiles = get_n_files_from_nick(nickname)
        if events_per_job:
            nevents = get_n_generated_events_from_nick(nickname)
            try:
                if int(nfiles) > 0 and int(nevents) > 0:
                    files_per_job = int(events_per_job) * int(nfiles) / int(
                        nevents)
                    if files_per_job > 1:
                        config.Data.unitsPerJob = int(files_per_job)
            except:
                print "Its not possilbe to make ", events_per_job, " events/job for ", nickname, " which has Nevents:", nevents, " and Nfiles", nfiles, " in the database. Just make one file per job"
        if float(config.Data.unitsPerJob) > 0 and float(nfiles) / float(
                config.Data.unitsPerJob) >= job_submission_limit:
            files_per_job = ceil(float(nfiles) / job_submission_limit)
            if files_per_job > 1:
                config.Data.unitsPerJob = int(files_per_job)

        config.JobType.pyCfgParams = [
            'globalTag=80X_dataRun2_2016SeptRepro_v7' if isData(nickname) else
            'globalTag=80X_mcRun2_asymptotic_2016_TrancheIV_v8'
            if "PUMoriond17" in getScenario(nickname) else
            'globalTag=80X_mcRun2_asymptotic_2016_miniAODv2_v1',
            'kappaTag=KAPPA_2_1_0',
            'nickname=%s' % (nickname),
            'outputfilename=kappa_%s.root' % (nickname), 'testsuite=False'
        ]
        config.JobType.outputFiles = ['kappa_%s.root' % (nickname)]
        config.Data.inputDataset = get_sample_by_nick(nickname)
        #config.Data.lumiMask = '/nfs/dust/cms/user/<NAF-username>/kappa/crab_kappa_skim80X-<campaign-date>/results/missingLumis.json' # for running of a subset of lumi sections
        p = Process(target=submit, args=(config, ))
        p.start()
        p.join()