예제 #1
0
 def getPlotter(self,analysis,region,runPeriod,mass,runTau,plotName,doFakes):
     nl = 3 if analysis=='Hpp3l' or analysis=='WZ' else 4
     ntuples = getNtupleDirectory(analysis,region,runPeriod)
     saves = '%s_%s_%iTeV' % (analysis,region,runPeriod)
     sigMap = getSigMap(nl,mass)
     intLumiMap = getIntLumiMap()
     mergeDict = getMergeDict(runPeriod)
     regionBackground = getChannelBackgrounds(runPeriod)
     channels, leptons = getChannels(nl,runTau=runTau)
 
     plotter = Plotter(region,ntupleDir=ntuples,saveDir=saves,period=runPeriod,rootName=plotName,mergeDict=mergeDict,scaleFactor=self.scalefactor)
     if not doFakes: plotter.initializeBackgroundSamples([sigMap[runPeriod][x] for x in regionBackground[analysis]])
     if runPeriod==8: plotter.initializeDataSamples([sigMap[runPeriod]['data']])
     plotter.setIntLumi(intLumiMap[runPeriod])
 
     return plotter
예제 #2
0
def calculateLeptonSystematic(mass,chanCuts,chanScales,**kwargs):
    do4l = kwargs.pop('do4l',False)
    analysis = 'Hpp3l'
    region = 'Hpp3l'
    runPeriod = 8
    nl = 3
    ntuples = getNtupleDirectory(analysis,region,runPeriod)
    saves = '%s_%s_%sTeV' % (analysis,region,runPeriod)
    sigMap = getSigMap(4,mass) if do4l else getSigMap(nl,mass)
    intLumiMap = getIntLumiMap()
    mergeDict = getMergeDict(runPeriod)
    regionBackground = {
        'Hpp3l' : ['T','TT', 'TTV','W','Z','VVV','WW','ZZ','WZ'],
        'Hpp4l' : ['TT','Z','DB']
    }
    channels, leptons = getChannels(nl)

    plotter = Plotter(analysis,ntupleDir=ntuples,saveDir=saves,period=runPeriod,rootName='systematics',mergeDict=mergeDict)
    plotter.initializeBackgroundSamples([sigMap[runPeriod][x] for x in regionBackground[analysis]])
    plotter.initializeSignalSamples([sigMap[runPeriod]['Sig']])
    plotter.initializeDataSamples([sigMap[runPeriod]['data']])
    plotter.setIntLumi(intLumiMap[runPeriod])

    fullCut = 'finalstate.mass>100 && finalstate.sT>1.1*%f+60. && fabs(z1.mass-%f)>80. && h1.dPhi<%f/600.+1.95' %(mass,ZMASS,mass)
    finalSRCut = 'h1.mass>0.9*%f && h1.mass<1.1*%f' %(mass,mass)

    totBG = 0
    totBG_scaled = 0
    for c,s in zip(chanCuts,chanScales):
        chanBG = s*plotter.getNumEntries('%s && %s && %s' %(c,fullCut,finalSRCut),plotter.signal[0],scaleup=False)
        chanBG_scaled = s*plotter.getNumEntries('%s && %s && %s' %(c,fullCut,finalSRCut),plotter.signal[0],scaleup=True)
        totBG += chanBG
        totBG_scaled += chanBG_scaled
    sigSelSys = (totBG_scaled-totBG)/totBG

    return sigSelSys+1
예제 #3
0
def limit(analysis,region,period,mass,**kwargs):
    cut = kwargs.pop('cut','1')
    doChannels = kwargs.pop('doChannels',False)
    doAlphaTest = kwargs.pop('doAlphaTest',False)
    unblind = kwargs.pop('unblind',False)
    name = kwargs.pop('name','card')
    bp = kwargs.pop('bp','')
    directory = kwargs.pop('directory','')
    channelScales = kwargs.pop('channelScales',[1.0])
    channelCuts = kwargs.pop('channelCuts',['1'])
    recoChannels = kwargs.pop('recoChannels',['1'])
    genChannels = kwargs.pop('genChannels',['1'])
    mode = kwargs.pop('mode','sideband')
    scalefactor = kwargs.pop('scalefactor','event.gen_weight*event.pu_weight*event.lep_scale*event.trig_scale')
    datacardDir = kwargs.pop('datacardDir','./datacards')
    do4l = kwargs.pop('do4l',False)
    doBoth = kwargs.pop('doBoth',False)
    logging.info("Processing BP %s; mass-point %i; card name %s" % (bp,mass,name))

    # get the cut maps for each gen channel
    genCutMap = {}
    alphaGenCuts = {}
    for genChan in genChannels:
        genCutMap[genChan] = getChannelCutFlowMap(analysis,genChan,mass=mass)
        alphaGenCuts[genChan] = getChannelSidebandSignalRegion(analysis,genChan,mass=mass)

    # and the reco channel
    recoCutMap = {}
    alphaRecoCuts = {}
    for recoChan in recoChannels:
        recoCutMap[recoChan] = getChannelCutFlowMap(analysis,recoChan,mass=mass)
        alphaRecoCuts[recoChan] = getChannelSidebandSignalRegion(analysis,recoChan,mass=mass)

    # if we are testing a tau hypothesis, explicitly use a wider mass window and the tau cuts
    # otherwise we need to just use reco
    if bp in ['et100','mt100','tt100']:
        theCutMap = getChannelCutFlowMap(analysis,bp[:2]+bp[:2],mass=mass)
        theAlphaCuts = getChannelSidebandSignalRegion(analysis,bp[:2]+bp[:2],mass=mass)
    else:
        theCutMap = recoCutMap[recoChannels[0]]       # only support 1 reco channel at a time
        theAlphaCuts = alphaRecoCuts[recoChannels[0]]

    fullCut = ' && '.join(theCutMap['cuts'])

    # setup sideband stuff
    chanCuts = '(' + ' || '.join(channelCuts) + ')' # gen cuts for h++ ORed with the default for BG 'aaa'

    sbCut = theAlphaCuts['sbcut']
    srCut = theAlphaCuts['srcut']
    finalSRCut = theAlphaCuts['srcut']

    channels, leptons = getChannels(3 if analysis=='Hpp3l' or analysis=='WZ' else 4)

    nl = 3 if analysis in ['Hpp3l', 'WZ'] else 4
    sigMap = getSigMap(4,mass) if do4l else getSigMap(nl,mass)
    intLumiMap = getIntLumiMap()


    # setup for final selection
    myCut = cut

    sbcut = '%s && %s && %s' %(myCut,sbCut,chanCuts)
    srcut = '%s && %s && %s && %s' %(myCut,srCut,fullCut, chanCuts)
    base_selections = '%s && %s && %s' %(myCut, srCut, fullCut)
    if region=='WZ': srcut = '%s && %s && %s' %(myCut,srCut, chanCuts)
    if doAlphaTest:
        sbcut = '%s && finalstate.sT<150. && z1.mass<110. && h1.mass<130.' %(chanCuts)
        srcut = '%s && finalstate.sT<400. && finalstate.sT>150. && z1.mass<110. && h1.mass<130.' %(chanCuts)
    if doAlphaTest: unblind = True

    logging.debug('Sideband cut: %s' % sbcut)
    logging.debug('Signal region cut: %s' % srcut)
    logging.debug('Base cut: %s' % base_selections)

    # create the limits object
    # base_selections is the cut applied on top of individual channel cuts
    # sbcut is the sideband selection for alpha calculation
    # srcut is the signal ragion selection for alpha calculation
    limits = Limits(analysis,region, period, base_selections, getNtupleDirectory(analysis,region,period),
                    '%s/%s_%itev_%s/%s/%s' % (datacardDir, analysis, period, region, directory, mass),
                    channels=['dblh%s' % analysis], lumi=intLumiMap[period],
                    blinded=not unblind, bgMode=mode, scalefactor=scalefactor,
                    sbcut=sbcut, srcut=srcut)

    signal =  sigMap[period]['SigPP'] if do4l or analysis in ['Hpp4l'] else sigMap[period]['SigAP']
    if mode=='sideband':
        # add groups, signal scales must be list of floats with same length as cuts list in gen card
        signame = "hpp%i_PP" % mass if do4l or analysis in ['Hpp4l'] else "hpp%i_AP" % mass
        limits.add_group(signame, signal, scale=channelScales, isSignal=True)
        if doBoth:
            signame_PP = "hpp%i_PP" % mass
            signal_PP =  sigMap[period]['SigPP']
            limits.add_group(signame_PP, signal_PP, scale=channelScales, isSignal=True)
        limits.add_group("bg", "bg")
        limits.add_group("data", "data_R*", isData=True)

        # luminosity systematics, 2.6 % for mc
        lumi = {signame: 1.026}
        if doBoth: lumi = {signame: 1.026, signame_PP: 1.026}
        limits.add_systematics("lumi", "lnN", **lumi)
    
        # lepton systematics, electron and muon separately for mc
        chanNames = recoChannels # only one supported for now
        scaleMap = calculateChannelLeptonSystematic(mass,chanNames,do4l=do4l,doBoth=doBoth)
        eid = {signame: "%0.3f" %scaleMap[chanNames[0]]['e']}
        if doBoth: eid = {signame: "%0.3f" %scaleMap[chanNames[0]]['e'], signame_PP: "%0.3f" %scaleMap[chanNames[0]]['e']}
        chgid = {signame: "%0.3f" %scaleMap[chanNames[0]]['chg']}
        if doBoth: chgid = {signame: "%0.3f" %scaleMap[chanNames[0]]['chg'], signame_PP: "%0.3f" %scaleMap[chanNames[0]]['chg']}
        mid = {signame: "%0.3f" %scaleMap[chanNames[0]]['m']}
        if doBoth: mid = {signame: "%0.3f" %scaleMap[chanNames[0]]['m'], signame_PP: "%0.3f" %scaleMap[chanNames[0]]['m']}
        limits.add_systematics('eid', 'lnN', **eid)
        limits.add_systematics('chgid', 'lnN', **chgid)
        limits.add_systematics('mid', 'lnN', **mid)
    
        # signal mc uncertainty
        sigmc = {signame: 1.15}
        if doBoth: sigmc = {signame: 1.15, signame_PP: 1.15}
        limits.add_systematics("sig_mc_err", "lnN", **sigmc)
    
        # uncertainty on bg estimation
        alpha_str = recoChannels[0] # only one supported for now
        alpha_pdf = {'bg': 1.1}
        limits.add_systematics("alpha_%s" %alpha_str, "lnN", **alpha_pdf)
    
        # generate the card, passing the cuts to be applied for each gen channel
        limits.gen_card("%s.txt" % name, mass=mass, cuts=channelCuts, doAlphaTest=doAlphaTest)

    elif mode=='mc':
        logging.error('MC needs to be reimplemented')
        #add_systematics_mc(limits,mass,signal,name,channelCuts,scale,period,bp,doAlphaTest,do4l)
    else:
        return 0
예제 #4
0
def wzlimit(analysis, region, period, chan, **kwargs):
    cut = kwargs.pop('cut', '1')
    name = kwargs.pop('name', 'card')
    scalefactor = kwargs.pop(
        'scalefactor',
        'event.gen_weight*event.pu_weight*event.lep_scale*event.trig_scale')
    datacardDir = kwargs.pop('datacardDir', './datacards')
    mode = kwargs.pop('mode',
                      'all')  # all, none, theory, stat, lumi, experimental
    logging.info("Processing card name {0}".format(name))

    chanCut = '{0} && channel=="{1}"'.format(cut, chan)

    #doStat = mode in ['all','experimental','stat']
    doStat = mode not in ['nostat']
    limits = WZLimits(
        analysis,
        region,
        period,
        chanCut,
        getNtupleDirectory(analysis, region, period),
        '%s/%s_%itev_%s' % (datacardDir, analysis, period, region),
        scalefactor=scalefactor,
        doStat=doStat)

    # add systematic
    #bgnames = ['datadriven','ZZ','WW','TTV','VVV','ZG']
    bgnames = ['datadriven', 'ZZ', 'TTV', 'VVV', 'ZG']
    signames = ['WZ']
    #mcnames = ['WZ','ZZ','WW','TTV','VVV','ZG']
    mcnames = ['WZ', 'ZZ', 'TTV', 'VVV', 'ZG']

    # lumi
    # current recommendation: 4.6%
    lumi = {}
    for b in mcnames:
        lumi[b] = 1.046
    if mode in ['all', 'lumi', 'nostat']:
        limits.add_systematics("lumi", "lnN", **lumi)

    # datadriven
    # assume 40%
    fake = {'datadriven': 1.4}
    if mode in ['all', 'experimental', 'nostat']:
        limits.add_systematics('fake_rate_unc', 'lnN', **fake)

    # ZZ cross section
    zzxsec = {'ZZ': 1.16}
    if mode in ['all', 'experimental', 'nostat']:
        limits.add_systematics('zz_xsec', 'lnN', **zzxsec)

    # ZG cross section theory
    zgxsec = {'ZG': 1.06}
    if mode in ['all', 'experimental', 'nostat']:
        limits.add_systematics('zg_xsec_theory', 'lnN', **zgxsec)

    # TTZ cross section theory
    ttzxsec = {'TTV': 1.15}
    if mode in ['all', 'experimental', 'nostat']:
        limits.add_systematics('ttz_xsec_theory', 'lnN', **ttzxsec)

    # diboson cross section theory
    wwxsec = {'WW': 1.06}
    if mode in ['all', 'experimental', 'nostat']:
        limits.add_systematics('ww_xsec_theory', 'lnN', **wwxsec)

    # VVV cross section theory
    vvvxsec = {'VVV': 1.06}
    if mode in ['all', 'experimental', 'nostat']:
        limits.add_systematics('vvv_xsec_theory', 'lnN', **vvvxsec)

    # eff uncertainty
    # take scale factors for leptons, propagate up and down based on statistical uncertainty
    lepvals = {
        'eee': 1.019,
        'eem': 1.016,
        'mme': 1.016,
        'mmm': 1.016,
    }
    lep = {}
    for m in mcnames:
        lep[m] = lepvals[chan]
    #limits.add_systematics('lep_eff_unc','lnN',**lep)
    elepvals = {
        'eee': 1.018,
        'eem': 1.012,
        'mme': 1.005,
        'mmm': 1.000,
    }
    if 'e' in chan:
        elep = {}
        for m in mcnames:
            elep[m] = elepvals[chan]
        if mode in ['all', 'experimental', 'nostat']:
            limits.add_systematics('lep_eff_unc_e', 'lnN', **elep)
    mlepvals = {
        'eee': 1.000,
        'eem': 1.004,
        'mme': 1.011,
        'mmm': 1.016,
    }
    if 'm' in chan:
        mlep = {}
        for m in mcnames:
            mlep[m] = mlepvals[chan]
        if mode in ['all', 'experimental', 'nostat']:
            limits.add_systematics('lep_eff_unc_m', 'lnN', **mlep)

    # pu
    # assume 10% uncertainty on min bias cross section, scale up and down, take largest difference in wz yield
    puvals = {
        'eee': 1.0064,
        'eem': 1.0088,
        'mme': 1.0124,
        'mmm': 1.0020,
    }
    pu = {}
    for m in mcnames:
        pu[m] = puvals[chan]
    if mode in ['all', 'experimental', 'nostat']:
        limits.add_systematics('PU_unc', 'lnN', **pu)

    # met
    # scale all components up and down independently, add in quadrature the largest
    metvals = {
        #'eee' : 1.0146, # placeholder from 8 tev
        #'eem' : 1.0150,
        #'mme' : 1.0159,
        #'mmm' : 1.0117,
        'eee': 1.019,  # 13 tev mes, ees, jes
        'eem': 1.013,
        'mme': 1.033,
        'mmm': 1.017,
    }
    met = {}
    for m in mcnames:
        met[m] = metvals[chan]
    if mode in ['all', 'experimental', 'nostat']:
        limits.add_systematics('met_unc', 'lnN', **met)

    # pdf
    # propagate pdf ucnertainties through the selection, scale up and down, take largest
    pdfvals = {
        'eee':
        1.01407,  # for now just taking gen, figure it out later after new fsa
        'eem': 1.01394,
        'mme': 1.01399,
        'mmm': 1.01395,
    }
    pdf = {}
    for s in signames:
        pdf[s] = pdfvals[chan]
    if mode in ['all', 'theory', 'nostat']:
        limits.add_systematics('pdf_unc', 'lnN', **pdf)

    # scale
    # propagate scale uncertainties through the selection, scale up and down, take largest
    scalevals = {
        'eee': 1.04296,  # again, now just taking gen, fix fsa later
        'eem': 1.04298,
        'mme': 1.04285,
        'mmm': 1.04298,
    }
    scale = {}
    for s in signames:
        scale[s] = scalevals[chan]
    if mode in ['all', 'theory', 'nostat']:
        limits.add_systematics('scale_unc', 'lnN', **scale)

    # gen card
    limits.gen_card("{0}.txt".format(name))