Пример #1
0
    cardDir += "_shape"
cardDir += "_lowUnc"
if args.WZtoPowheg:
    cardDir += "_WZreweight"
if args.expected:
    cardDir += "_expected"

## 2l setup ##
# not yet part of the game

## 3l setup ##
setup3l                 = Setup(year, nLeptons=3)
estimators3l            = estimatorList(setup3l)
setup3l.estimators      = estimators3l.constructEstimatorList(["WZ", "TTX", "TTW", "ZG", "rare", "ZZ"])# if not args.WZamc else estimators3l.constructEstimatorList(["WZ_amc", "TTX", "TTW", "ZG", "rare", "ZZ"])
setup3l.reweightRegions = regionsReweight
setup3l.channels        = [channel(-1,-1)] # == 'all'
setup3l.regions         = regionsE if not args.merged else noRegions
setup3l.year            = year

## 3l NP setup ##
setup3l_NP              = Setup(year, nLeptons=3, nonprompt=True)
setup3l_NP.channels     = [channel(-1,-1)] # == 'all'
setup3l_NP.regions      = regionsE if not args.merged else noRegions

## 4l setup ##
setup4l                 = Setup(year=year, nLeptons=4)
setup4l.parameters.update({'nJets':(1,-1), 'nBTags':(1,-1), 'zMassRange':20, 'zWindow2':"offZ"})
estimators4l            = estimatorList(setup4l)
setup4l.estimators      = estimators4l.constructEstimatorList(["ZZ", "rare","TTX"])
setup4l.reweightRegions = regionsReweight4l
setup4l.channels        = [channel(-1,-1)] # == 'all'
Пример #2
0

jobs = []

# remove all so to avoid unnecessary concurrency. All will be calculated as sum of the individual channels later
seperateChannels3l = allTrilepChannels
allTrilepChannelNames = [c.name for c in allTrilepChannels]
seperateChannels3l.pop(allTrilepChannelNames.index('all'))

seperateChannels4l = allQuadlepChannels
allQuadlepChannelNames4l = [c.name for c in allQuadlepChannels]
seperateChannels4l.pop(allQuadlepChannelNames4l.index('all'))

if not options.skipCentral:
    # First run over seperate channels
    jobs.append((noRegions[0], channel(-1, -1), setupIncl3l))
    jobs.append((noRegions[0], channel(-1, -1), setupIncl4l))
    for var in variations:
        for c in seperateChannels3l:
            jobs.append((noRegions[0], c,
                         setupIncl3l.systematicClone(sys={'reweight': [var]})))
        for c in seperateChannels4l:
            jobs.append((noRegions[0], c,
                         setupIncl4l.systematicClone(sys={'reweight': [var]})))

## then one can sum up over all (currently done in the combine step)
#for var in variations:
#    jobs.append((noRegions[0], "all", setupIncl.systematicClone(sys={'reweight':[var]})))

if not options.combine:
    for region in regions:
Пример #3
0
    def __init__(self, year=2017, nLeptons=3, nonprompt=False):
        self.name = "defaultSetup"
        self.channels = [channel(-1, -1)]
        self.resultsFile = 'calculatedLimits_%s.db' % self.name
        self.year = year
        self.nLeptons = nLeptons
        self.short = False

        if nLeptons == 1:
            self.tight_ID = "tight_3l"  # for parton shower study
            self.FO_ID = "FO_3l"
        elif nLeptons == 2:
            self.tight_ID = "tight_SS"
            self.FO_ID = "FO_SS"
        elif nLeptons == 3:
            self.tight_ID = "tight_3l"
            self.FO_ID = "FO_3l"
        elif nLeptons == 4:
            self.tight_ID = "tight_4l"
            self.FO_ID = "FO_4l"
        else:
            raise NotImplementedError("Can't handle 0,1,5,.. lepton cases")

        self.nonprompt = nonprompt
        self.leptonId = self.FO_ID if self.nonprompt else self.tight_ID

        self.default_sys = {
            'weight': 'weight',
            'reweight': ['reweightPU36fb', 'reweightBTagDeepCSV_SF'],
            'selectionModifier': None
        }  # 'reweightTrigger_%s'%self.leptonId, 'reweightLeptonTrackingSF_%s'%self.leptonId
        if nLeptons == 1:
            # no trigger/lepton reweighting
            pass

        #elif nLeptons == 3:
        #    self.default_sys['reweight'] += ['reweightTrigger_tight_3l', 'reweightLeptonSF_tight_3l']
        #    if self.year == 2017: #in 2016 already included in leptonSF
        #        self.default_sys['reweight'] += ['reweightLeptonTrackingSF_tight_3l']
        #elif nLeptons == 4:
        #    self.default_sys['reweight'] += ['reweightTrigger_tight_4l', 'reweightLeptonSF_tight_4l']
        #    if self.year == 2017: #in 2016 already included in leptonSF
        #        self.default_sys['reweight'] += ['reweightLeptonTrackingSF_tight_4l']
        elif nLeptons == 3:
            self.default_sys['reweight'] += [
                'reweightTrigger_tight_3l', 'reweightLeptonSFSyst_tight_3l',
                'reweightEleSFStat_tight_3l', 'reweightMuSFStat_tight_3l',
                'reweightLeptonTrackingSF_tight_3l'
            ]
        elif nLeptons == 4:
            self.default_sys['reweight'] += [
                'reweightTrigger_tight_4l', 'reweightLeptonSFSyst_tight_4l',
                'reweightEleSFStat_tight_4l', 'reweightMuSFStat_tight_4l',
                'reweightLeptonTrackingSF_tight_4l'
            ]

        self.resultsColumns = [
            'signal', 'exp', 'obs', 'exp1up', 'exp1down', 'exp2up', 'exp2down',
            'NLL_prefit', 'dNLL_postfit_r1', 'dNLL_bestfit'
        ]
        self.uncertaintyColumns = ["region", "channel", "PDFset"]

        self.analysis_results = analysis_results
        self.prefixes = []
        self.externalCuts = []

        #Default cuts and requirements. Those three things below are used to determine the key in the cache!
        self.parameters = copy.deepcopy(default_parameters)
        self.sys = self.default_sys
        if year == 2017:
            self.lumi = dataLumi2017
            self.dataLumi = dataLumi2017
        elif year == 2016:
            self.lumi = dataLumi2016
            self.dataLumi = dataLumi2016
        elif year == 20167:
            self.lumi = dataLumi201617
            self.dataLumi = dataLumi201617
        elif year == "run2":
            self.lumi = dataLumi20161718
            self.dataLumi = dataLumi20161718
        elif year == "HLLHC":
            self.lumi = dataHighLumi
            self.dataLumi = dataHighLumi

        self.genSelection = "Sum$(GenJet_pt>30)>=3&& abs(Z_mass-91.2)<10&&(abs(Z_daughterPdg)==11 || abs(Z_daughterPdg)==13 || abs(Z_daughterPdg)==15 )"
        self.WZselection = cutInterpreter.cutString(
            'trilep-Zcand-onZ-lepSelTTZ-njet1p')

        # Data
        if year == 2017:
            data = Run2017
        else:
            data = Run2016

        # MC
        if year == 2017:
            TTZSample = TTZtoLLNuNu_17
            WZSample = WZ_amcatnlo_17
            TTXSample = TTX_17
            TTWSample = TTW_17
            TZQSample = TZQ_17
            ZGSample = ZGTo2LG
            ZZSample = ZZ_17
            rareSample = rare_17
            nonpromptSample = nonpromptMC_17
            pseudoDataSample = pseudoData_17
            ttbarSample = TTLep_pow_17
        else:
            ## use 2016 samples as default (we do combine on card file level)
            TTZSample = TTZtoLLNuNu
            WZSample = WZ_amcatnlo
            TTXSample = TTX
            TTWSample = TTW
            TZQSample = TZQ
            ZGSample = ZGTo2LG
            ZZSample = ZZ
            rareSample = rare
            nonpromptSample = nonpromptMC
            pseudoDataSample = pseudoData
            ttbarSample = TTLep_pow

        # removed the channel dependence.
        self.samples = {
            'TTZ': TTZSample,
            'WZ': WZSample,
            'TTX': TTXSample,
            'TTW': TTWSample,
            'TZQ': TZQSample,
            'ZG': ZGSample,
            'rare': rareSample,
            'ZZ': ZZSample,
            'nonprompt': nonpromptSample,
            'ttbar': ttbarSample,
            'pseudoData': pseudoDataSample,
            'Data': data,
        }
Пример #4
0
for s in samples17:
    estimates17.append(MCBasedEstimate(name=s.name, sample=s ))
    estimates17[-1].initCache(cacheDir)

jobs = []

def wrapper(args):
    e, r, c, setup = args
    res = e.cachedEstimate(r, c, setup, save=True, overwrite=options.overwrite)
    return (e.uniqueKey(r, c, setup), res )

print regions

for r in regions:
    for c in [channel(1,0), channel(0,1)]:
        logger.info("Working on 2016 results")
        for e in estimates16:
            jobs.append((e, r, c, setup16))
            #res = e.cachedEstimate(r, channel(-1,-1), setup16, save=True)
            #logger.info("Result: %s", res.val)
        logger.info("Working on 2017 results")
        for e in estimates17:
            jobs.append((e,r,c, setup17.systematicClone(sys={'reweight':['PSweight_central']})))
            #res = e.cachedEstimate(r, channel(-1,-1), setup17, save=True)
            #logger.info("Result: %s", res.val)
            for weight in PS_indices:
                jobs.append((e,r,c, setup17.systematicClone(sys={'reweight':['(%s)'%weight]})))
                #res = e.cachedEstimate(r, channel(-1,-1), setup17.systematicClone(sys={'reweight':['LHEweight_wgt[%s]/LHEweight_wgt[1080]'%weight]}), save=True)
                #logger.info("Result: %s", res.val)
Пример #5
0
    estimates17.append(MCBasedEstimate(name=s.name, sample=s))
    estimates17[-1].initCache(cacheDir)

jobs = []


def wrapper(args):
    e, r, c, setup = args
    res = e.cachedEstimate(r, c, setup, save=True, overwrite=options.overwrite)
    return (e.uniqueKey(r, c, setup), res)


print regions

for r in regions:
    for c in [channel(1, 0), channel(0, 1)]:
        logger.info("Working on 2016 results")
        for e in estimates16:
            jobs.append((e, r, c, setup16))
            #res = e.cachedEstimate(r, channel(-1,-1), setup16, save=True)
            #logger.info("Result: %s", res.val)
        logger.info("Working on 2017 results")
        for e in estimates17:
            jobs.append((e, r, c,
                         setup17.systematicClone(
                             sys={'reweight': ['PSweight_central']})))
            #res = e.cachedEstimate(r, channel(-1,-1), setup17, save=True)
            #logger.info("Result: %s", res.val)
            for weight in PS_indices:
                jobs.append((e, r, c,
                             setup17.systematicClone(
Пример #6
0
    estimates16.append(MCBasedEstimate(name=s.name, sample=s))
    estimates16[-1].initCache(cacheDir)

jobs = []


def wrapper(args):
    e, r, c, setup = args
    res = e.cachedEstimate(r, c, setup, save=True, overwrite=options.overwrite)
    return (e.uniqueKey(r, c, setup), res)


print regions

for r in noRegionsB:
    for c in [channel(1, 0), channel(0, 1)]:
        for e in estimates16:
            jobs.append((e, r, c, setup16))

for r in regions:
    for c in [channel(1, 0), channel(0, 1)]:
        logger.info("Working on 2016 results")
        for e in estimates16:
            jobs.append((e, r, c, setup16))
            #res = e.cachedEstimate(r, channel(-1,-1), setup16, save=True)
            #logger.info("Result: %s", res.val)

if options.noMultiThreading:
    results = map(wrapper, jobs)
else:
    from multiprocessing import Pool
Пример #7
0
estimate = MCBasedEstimate(name='TTZ', sample=TTZ_CR)
estimate.initCache(cacheDir)

jobs = []


def wrapper(args):
    e, r, c, setup = args
    res = e.cachedEstimate(r, c, setup, save=True, overwrite=options.overwrite)
    return (e.uniqueKey(r, c, setup), res)


print regions

for r in regions:
    for c in [channel(3, 0), channel(2, 1), channel(1, 2), channel(0, 3)]:
        logger.info("Working on 2016 results")
        jobs.append((estimate, r, c, setup16))
        jobs.append(
            (estimate, r, c,
             setup16.systematicClone(sys={'reweight': ['reweightCR']})))

if options.noMultiThreading:
    results = map(wrapper, jobs)
else:
    from multiprocessing import Pool
    pool = Pool(processes=8)
    results = pool.map(wrapper, jobs)
    pool.close()
    pool.join()