コード例 #1
0
ファイル: srTable.py プロジェクト: llechner/TTGammaEFT
def wrapper(arg):
        r,channel,setup,estimate,cat,est = arg
        estimate.initCache(setup.defaultCacheDir())
        if estimate.name == "Data" and blind:
            res = u_float(0,0)
        else:
            res = estimate.cachedEstimate(r, channel, setup, overwrite=False, checkOnly=True)
        if args.removeNegative and res < 0: res = u_float(0,0)
        return est, str(r), cat, channel, res.tuple()
コード例 #2
0
 def addUnc(c, name, binname, pName, unc, unc_yield,
            signal):
     if newPOI_input and signal:
         if name in sigUnc:
             sigUnc[name] += u_float(0, unc) * unc_yield
         else:
             sigUnc[name] = u_float(0, unc) * unc_yield
     else:
         c.specifyUncertainty(name, binname, pName,
                              1 + unc)
コード例 #3
0
    def _kappaMC(self, region, channel, setup, overwrite=False):

        param_LsLc = {"addMisIDSF": addSF}
        param_LsHc = {"photonIso": "highChgIso", "addMisIDSF": addSF}
        param_HsLc = {"photonIso": "highSieie", "addMisIDSF": addSF}
        param_HsHc = {"photonIso": "highChgIsohighSieie", "addMisIDSF": addSF}

        params_LsLc = copy.deepcopy(setup.parameters)
        params_LsHc = copy.deepcopy(setup.parameters)
        params_HsLc = copy.deepcopy(setup.parameters)
        params_HsHc = copy.deepcopy(setup.parameters)

        params_LsLc.update(param_LsLc)
        params_LsHc.update(param_LsHc)
        params_HsLc.update(param_HsLc)
        params_HsHc.update(param_HsHc)

        setup_LsLc = setup.sysClone(parameters=params_LsLc)
        setup_LsHc = setup.sysClone(parameters=params_LsHc)
        setup_HsLc = setup.sysClone(parameters=params_HsLc)
        setup_HsHc = setup.sysClone(parameters=params_HsHc)

        #            estimate = MCBasedEstimate( name="all_mc_e_had" if channel == "e" else "all_mc_mu_had", process=setup.processes["all_mc_e_had" if channel == "e" else "all_mc_mu_had"] )
        estimate = MCBasedEstimate(name="all_noQCD_had",
                                   process=setup.processes["all_noQCD_had"])

        mcHad_LsLc = estimate.cachedEstimate(region,
                                             channel,
                                             setup_LsLc,
                                             overwrite=overwrite)
        mcHad_LsHc = estimate.cachedEstimate(region,
                                             channel,
                                             setup_LsHc,
                                             overwrite=overwrite)
        mcHad_HsLc = estimate.cachedEstimate(region,
                                             channel,
                                             setup_HsLc,
                                             overwrite=overwrite)
        mcHad_HsHc = estimate.cachedEstimate(region,
                                             channel,
                                             setup_HsHc,
                                             overwrite=overwrite)
        print "had", mcHad_LsLc, mcHad_LsHc, mcHad_HsLc, mcHad_HsHc

        # no shape correction if any component is 0
        if mcHad_LsLc <= 0: return u_float(1)
        if mcHad_LsHc <= 0: return u_float(1)
        if mcHad_HsLc <= 0: return u_float(1)
        if mcHad_HsHc <= 0: return u_float(1)

        # create the data-driven factor
        mcRatio_Ls = mcHad_LsLc / mcHad_LsHc
        mcRatio_Hs = mcHad_HsLc / mcHad_HsHc

        return mcRatio_Ls / mcRatio_Hs
コード例 #4
0
    def _kappaData(self, region, channel, setup, overwrite=False):
        # data correction factor for low chgIso -> high chgIso

        param_HsLc = {"photonIso": "highSieie", "addMisIDSF": addSF}
        param_HsHc = {"photonIso": "highChgIsohighSieie", "addMisIDSF": addSF}

        params_HsLc = copy.deepcopy(setup.parameters)
        params_HsHc = copy.deepcopy(setup.parameters)

        params_HsLc.update(param_HsLc)
        params_HsHc.update(param_HsHc)

        setup_HsLc = setup.sysClone(parameters=params_HsLc)
        setup_HsHc = setup.sysClone(parameters=params_HsHc)

        if self.expected:
            #                estimate = MCBasedEstimate( name="all_mc_e" if channel == "e" else "all_mc_mu", process=setup.processes["all_mc_e" if channel == "e" else "all_mc_mu"] )
            estimate = MCBasedEstimate(name="all_noQCD",
                                       process=setup.processes["all_noQCD"])
        else:
            estimate = DataObservation(name="Data",
                                       process=setup.processes["Data"])

        data_HsLc = estimate.cachedEstimate(region,
                                            channel,
                                            setup_HsLc,
                                            overwrite=overwrite)
        data_HsHc = estimate.cachedEstimate(region,
                                            channel,
                                            setup_HsHc,
                                            overwrite=overwrite)

        # get the MC based Estimate for fakes in the region
        #            estimate = MCBasedEstimate( name="all_mc_e_prompt" if channel == "e" else "all_mc_mu_prompt", process=setup.processes["all_mc_e_prompt" if channel == "e" else "all_mc_mu_prompt"] )
        estimate = MCBasedEstimate(name="all_noQCD_prompt",
                                   process=setup.processes["all_noQCD_prompt"])

        mcPrompt_HsLc = estimate.cachedEstimate(region,
                                                channel,
                                                setup_HsLc,
                                                overwrite=overwrite)
        print "prompt hslc", mcPrompt_HsLc
        mcPrompt_HsHc = estimate.cachedEstimate(region,
                                                channel,
                                                setup_HsHc,
                                                overwrite=overwrite)
        print "prompt hshc", mcPrompt_HsHc

        dataHad_HsHc = data_HsHc - mcPrompt_HsHc
        if dataHad_HsHc <= 0: return u_float(0)

        dataHad_HsLc = data_HsLc - mcPrompt_HsLc
        if dataHad_HsLc <= 0: dataHad_HsLc = u_float(.1, .1)

        return dataHad_HsLc / dataHad_HsHc
コード例 #5
0
 def JECSystematic(self, region, channel, setup, jes="Total"):
     ref  = self.cachedEstimate(region, channel, setup)
     if ref == 0: return u_float(0,0)
     up   = self.cachedEstimate(region, channel, setup.sysClone({"selectionModifier":"jes%sUp"%jes}))
     down = self.cachedEstimate(region, channel, setup.sysClone({"selectionModifier":"jes%sDown"%jes}))
     unc  = abs(0.5*(up-down)/ref)
     if unc.val == 0:
         uncUp    = abs((ref-up)/ref)
         uncDown  = abs((ref-down)/ref)
         unc      = uncUp if uncUp.val >= uncDown.val else uncDown
         if unc.val == 0: return u_float(0,0)
     return unc
コード例 #6
0
    def writeToCache(self, region, channel, setup, value, signalAddon=None, save=True, overwrite=False, checkOnly=False):
        key =  self.uniqueKey(region, channel, setup, signalAddon=signalAddon)
        if (self.cache and self.cache.contains(key)) and not overwrite:
            res = self.cache.get(key)
#            if res.val != value.val: print "Warning, caches estimate not equal to input value: have %s, got %s"%(res, value)
#            logger.debug( "Loading cached %s result for %r : %r"%(self.name, key, res) )
        elif self.cache and not checkOnly:
            _res = self.cache.add( key, value, overwrite=True )
            res = value
            logger.debug( "Adding cached %s result for %r : %r" %(self.name, key, res) )
        else:
            res = u_float(-1,0)
        return res if res >= 0 or checkOnly else u_float(0,0)
コード例 #7
0
ファイル: combineTables.py プロジェクト: llechner/TTGammaEFT
def convertLine(line):
    # inclusive & 4134.95 $\pm$ 77.81 & \textbf{592.30 $\pm$ 27.19} & \textbf{587.32 $\pm$ 12.50}
    # & 6494.72 $\pm$ 96.86 & \textbf{926.98 $\pm$ 34.25} & \textbf{951.09 $\pm$ 16.20}

    l = line.replace(" ", "").split("&")
    header = l[0] if l[0] else None
    norm = u_float(float(l[1].split("$")[0]), float(l[1].split("$")[2]))
    dd = l[2].split("{")[1].split("}")[0]
    ddfakes = u_float(float(dd.split("$")[0]), float(dd.split("$")[2]))
    mc = l[3].split("{")[1].split("}")[0]
    mcfakes = u_float(float(mc.split("$")[0]), float(mc.split("$")[2]))

    return header, norm, ddfakes, mcfakes
コード例 #8
0
 def cachedEstimate(self, region, channel, setup, signalAddon=None, save=True, overwrite=False, checkOnly=False):
     key =  self.uniqueKey(region, channel, setup, signalAddon=signalAddon)
     if (self.cache and self.cache.contains(key)) and not overwrite:
         res = self.cache.get(key)
         logger.debug( "Loading cached %s result for %r : %r"%(self.name, key, res) )
     elif self.cache and not checkOnly:
         logger.debug( "Calculating %s result for %r"%(self.name, key) )
         res = self._estimate( region, channel, setup, signalAddon=signalAddon, overwrite=overwrite )
         _res = self.cache.add( key, res, overwrite=True )
         logger.debug( "Adding cached %s result for %r : %r" %(self.name, key, res) )
     elif not checkOnly:
         res = self._estimate( region, channel, setup, signalAddon=signalAddon, overwrite=overwrite)
     else:
         res = u_float(-1,0)
     return res if res >= 0 or checkOnly else u_float(0,0)
コード例 #9
0
 def cachedFakeFactor(self, region, channel, setup, overwrite=False, checkOnly=False):
     key =  self.uniqueKey(region, channel, setup)
     if (self.helperCache and self.helperCache.contains(key)) and not overwrite:
         res = self.helperCache.get(key)
         logger.debug( "Loading cached %s result for %r : %r"%(self.name, key, res) )
     elif self.helperCache and not checkOnly:
         logger.debug( "Calculating %s result for %r"%(self.name, key) )
         res = self._dataDrivenFakeCorrectionFactor( region, channel, setup, overwrite=overwrite )
         _res = self.helperCache.add( key, res, overwrite=True )
         logger.debug( "Adding cached transfer factor for %r : %r" %(key, res) )
     elif not checkOnly:
         res = self._dataDrivenFakeCorrectionFactor( region, channel, setup, overwrite=overwrite )
     else:
         res = u_float(-1,0)
     return res if res > 0 or checkOnly else u_float(0,0)
コード例 #10
0
    def _fakesData(self, region, channel, setup, overwrite=False):

        param_LsHc = {"photonIso": "highChgIso", "addMisIDSF": addSF}
        params_LsHc = copy.deepcopy(setup.parameters)
        params_LsHc.update(param_LsHc)
        setup_LsHc = setup.sysClone(parameters=params_LsHc)

        if self.expected:
            #                estimate = MCBasedEstimate( name="all_mc_e" if channel == "e" else "all_mc_mu", process=setup.processes["all_mc_e" if channel == "e" else "all_mc_mu"] )
            estimate = MCBasedEstimate(name="all_noQCD",
                                       process=setup.processes["all_noQCD"])
        else:
            estimate = DataObservation(name="Data",
                                       process=setup.processes["Data"])

        data_LsHc = estimate.cachedEstimate(region,
                                            channel,
                                            setup_LsHc,
                                            overwrite=overwrite)

        #            estimate = MCBasedEstimate( name="all_mc_e_prompt" if channel == "e" else "all_mc_mu_prompt", process=setup.processes["all_mc_e_prompt" if channel == "e" else "all_mc_mu_prompt"] )
        estimate = MCBasedEstimate(name="all_noQCD_prompt",
                                   process=setup.processes["all_noQCD_prompt"])

        mcPrompt_LsHc = estimate.cachedEstimate(region,
                                                channel,
                                                setup_LsHc,
                                                overwrite=overwrite)
        print "prompt lshc", mcPrompt_LsHc

        dataHad_LsHc = data_LsHc - mcPrompt_LsHc

        return dataHad_LsHc if dataHad_LsHc > 0 else u_float(.1, .1)
コード例 #11
0
    def observation(self, region, channel, setup, overwrite):

        if setup.nJet == "3p":
            setup4p = setup.sysClone(parameters={"nJet": (4, -1)})
            setup3 = setup.sysClone(parameters={"nJet": (3, 3)})
            return sum([
                self.cachedEstimate(region, channel, s, overwrite=overwrite)
                for s in [setup3, setup4p]
            ])

        if channel == "all":
            return sum([
                self.cachedEstimate(region, c, setup, overwrite=overwrite)
                for c in lepChannels
            ])

        elif channel == "SFtight":
            return sum([
                self.cachedEstimate(region, c, setup, overwrite=overwrite)
                for c in dilepChannels
            ])

        else:
            preSelection = setup.preselection("Data", channel=channel)
            #            cut = "&&".join([region.cutString(setup.sys['selectionModifier']), preSelection['cut']])
            cut = "&&".join([region.cutString(), preSelection['cut']])

            logger.debug("Using cut %s" % cut)

            weight = preSelection['weightStr']
            if hasattr(setup, "blinding") and setup.blinding:
                weight += "*" + setup.blinding

            return u_float(**self.process.getYieldFromDraw(
                selectionString=cut, weightString=weight))
コード例 #12
0
ファイル: cardFileHelpers.py プロジェクト: llechner/Analysis
def getEstimateFromCard(cardFile, estimateName, binName, postfix=''):
    res = u_float(0)
    uncName = 'Stat_' + binName + '_' + estimateName+postfix
    with open(cardFile) as f:
      binList = False
      estimateList = False
      for line in f:
        if len(line.split())==0: continue
        if line.split()[0] == "bin":
          if not binList: binList = True
          else:           binList = line.split()[1:]
        if line.split()[0] == "process":
          if not estimateList: estimateList = line.split()[1:]
        if line.split()[0] == "rate":
            for i in range(len(binList)):
              if binList[i] == binName and estimateList[i]==estimateName:
                try: res.val = float(line.split()[1:][i])
                except: res.val = 0
                #return float(line.split()[1:][i])
        if line.split()[0] != uncName: continue
        for i in range(len(binList)):
          if binList[i] == binName and estimateList[i]==estimateName:
            try:    res.sigma = (float(line.split()[2:][i])-1)*res.val
            except: res.sigma = 0.
    return res
コード例 #13
0
ファイル: Process.py プロジェクト: HephyAnalysisSW/TTZRun2EFT
    def xsec( self, modified_couplings=None, overwrite=False, skip=False ):

        key = self.getKey( modified_couplings )
        # Do we have the x-sec?
        if self.xsecDB.contains(key) and not overwrite:
            logger.debug( "Found x-sec %s for key %r. Do nothing.", self.xsecDB.get(key), key )
            return self.xsecDB.get(key)
        elif skip:
            return u_float(0)
        else:
            print "Trying to get xsec"
            self.__initialize( modified_couplings ) 
            logger.info( "Calculating x-sec" )
            # rerun MG to obtain the correct x-sec (with more events)
            with open( os.path.join( self.processTmpDir, 'Cards/run_card.dat' ), 'a' ) as f:
                f.write( ".false. =  gridpack\n" )
            logger.info( "Calculate x-sec: Calling bin/generate_events" )
            output = subprocess.check_output( [ os.path.join( self.processTmpDir, 'bin/generate_events' ) , '-f' ] )
            for i in range(10):
                try:
                    output = subprocess.check_output( [ os.path.join( self.processTmpDir, 'bin/generate_events' ) , '-f' ] )
                    m = re.search( "Cross-section :\s*(.*) \pb", output )
                    logger.info( "x-sec: {} pb".format(m.group(1)) )
                    break
                except ValueError:
                    logger.info("Encountered problem during the MG run. Restarting.")

            xsec_ = u_float.fromString(m.group(1)) 
            
            self.xsecDB.add( key, xsec_, overwrite=True )

            logger.info( "Done!" )

            return xsec_
コード例 #14
0
 def yieldFromCache(self, setup, process, c, selectionString, weightString, overwrite=False):
     s = (process, c, selectionString, weightString)
     if self.helperCache and self.helperCache.contains(s) and not overwrite:
         return self.helperCache.get(s)
     else:
         yieldFromDraw = u_float(**setup.processes[process].getYieldFromDraw(selectionString, weightString))
         if self.helperCache: self.helperCache.add(s, yieldFromDraw, overwrite=True)
         return yieldFromDraw
コード例 #15
0
 def mult(self, l):
     if len(l) > 0:
         res = l[0]
         for i in l[1:]:
             res = res * i
     else:
         res = u_float(1)
     return res
コード例 #16
0
    def cachedQCDMCTransferFactor(self, channel, setup, qcdUpdates=None, save=True, overwrite=False, checkOnly=False):
        key =  self.uniqueKey("regionQCDMC", channel, setup, qcdUpdates=qcdUpdates)
        if (self.tfCache and self.tfCache.contains(key)) and not overwrite:
            res = self.tfCache.get(key)
            logger.debug( "Loading cached %s result for %r : %r"%(self.name, key, res) )
        elif self.tfCache and not checkOnly:
            logger.debug( "Calculating %s result for %r"%(self.name, key) )
#            res = self._dataDrivenTransferFactor( channel, setup, qcdUpdates=qcdUpdates, overwrite=overwrite )
            res = self._transferFactor( channel, setup, qcdUpdates=qcdUpdates, overwrite=overwrite )
            _res = self.tfCache.add( key, res, overwrite=True )
            logger.debug( "Adding cached transfer factor for %r : %r" %(key, res) )
        elif not checkOnly:
#            res = self._dataDrivenTransferFactor( channel, setup, qcdUpdates=qcdUpdates, overwrite=overwrite )
            res = self._transferFactor( channel, setup, qcdUpdates=qcdUpdates, overwrite=overwrite )
        else:
            res = u_float(-1,0)
        return res if res > 0 or checkOnly else u_float(0,0)
コード例 #17
0
    def _dataDrivenFakes(self, region, channel, setup, overwrite=False):

        dataFakes = self._fakesData(region,
                                    channel,
                                    setup,
                                    overwrite=overwrite)
        if dataFakes <= 0: return u_float(0)

        kappaData = self._kappaData(region,
                                    channel,
                                    setup,
                                    overwrite=overwrite)
        if kappaData <= 0: return u_float(0)

        # no shape correction if 0
        kappaMC = self._kappaMC(region, channel, setup, overwrite=overwrite)
        if kappaMC <= 0: kappaMC = u_float(1)

        return dataFakes * kappaData * kappaMC
コード例 #18
0
 def cachedObservation(self,
                       region,
                       channel,
                       setup,
                       save=True,
                       overwrite=False,
                       checkOnly=False):
     key = self.uniqueKey(region, channel, setup)
     if (self.cache and self.cache.contains(key)) and not overwrite:
         res = self.cache.get(key)
         logger.debug("Loading cached %s result for %r : %r" %
                      (self.name, key, res))
     elif self.cache and not checkOnly:
         res = self.observation(region, channel, setup, overwrite)
         _res = self.cache.add(key, res, overwrite=True)
         logger.debug("Adding cached %s result for %r" % (self.name, key))
     elif not checkOnly:
         res = self.observation(region, channel, setup, overwrite)
     else:
         res = u_float(-1, 0)
     return res if res >= 0 or checkOnly else u_float(0, 0)
コード例 #19
0
def wrapper(arg):
    r, channel, setup, (ctZ, ctZI, ctW, ctWI) = arg
    EFTparams = ["ctZ", str(ctZ), "ctZI",
                 str(ctZI)]  #, "ctW", str(ctW), "ctWI", str(ctWI) ]
    params = {"ctZ": ctZ, "ctZI": ctZI}  #,  "ctW":ctW, "ctWI":ctWI }
    key = (args.controlRegion, str(r), channel, "_".join(EFTparams))
    keymu = (args.controlRegion.replace("All",
                                        ""), str(r), "e", "_".join(EFTparams))
    keye = (args.controlRegion.replace("All",
                                       ""), str(r), "mu", "_".join(EFTparams))
    print key
    print keymu
    print keye
    print cache.contains(key), cache.contains(keymu), cache.contains(keye)
    if cache.contains(key) and not args.overwrite:
        res = cache.get(key)
    elif not cache.contains(key) and args.checkOnly:
        res = {"val": -1, "sigma": 0}
    else:
        if channel == "all" and cache.contains(keye) and cache.contains(
                keymu) and not args.overwrite:
            res = u_float(cache.get(keye))
            print "e", res
            resmu = u_float(cache.get(keymu))
            print "mu", resmu
            res += u_float(cache.get(keymu))
        else:
            selection = setup.genSelection("MC",
                                           channel=channel,
                                           **setup.defaultParameters())["cut"]
            selection = "&&".join([selection, r.cutString()])
            weightString = "ref_weight*(" + get_weight_string(params) + ")"
            print selection
            print weightString
            res = eftSample.getYieldFromDraw(selectionString=selection,
                                             weightString=weightString)
        print key, res
        cache.add(key, res, overwrite=True)
#    print "done", res
    return (key, res)
コード例 #20
0
ファイル: cardFileHelpers.py プロジェクト: llechner/Analysis
def getObservationFromCard(cardFile, binName):
    res = u_float(0)
    with open(cardFile) as f:
      binList = False
      estimateList = False
      for line in f:
        if len(line.split())==0: continue
        if line.split()[0] == "bin":
            binList = line.split()[1:]
        if line.split()[0] == "observation":
            for i in range(len(binList)):
              if binList[i] == binName:# and estimateList[i]==estimateName:
                try: res.val = float(line.split()[1:][i])
                except: res.val = 0
    return res
コード例 #21
0
def wrapper(arg):
    # INFO: fakeFactor = fakesData / fakesMC * kappaData * kappaMC
    key, subkey, r, channel, setup = arg
    logger.info(
        "Running estimate for region %s, channel %s in setup %s" %
        (r, channel, args.controlRegion if args.controlRegion else "None"))
    #        fakeFactor = estimate.cachedFakeFactor(r, channel, setup, checkOnly=True).val
    kappaData = estimate._kappaData(r, channel, setup)
    kappaMC = estimate._kappaMC(r, channel, setup)
    fakesData = estimate._fakesData(r, channel, setup)
    fakesMC = estimate._fakesMC(r, channel, setup)
    ddfakes = fakesData * kappaMC * kappaData
    sf = ddfakes / fakesMC if fakesMC.val > 0 else u_float(0)
    return (key, subkey, channel, fakesData.tuple(), kappaData.tuple(),
            kappaMC.tuple(), ddfakes.tuple(), fakesMC.tuple(), sf.tuple())
コード例 #22
0
    def _dataDrivenFakeCorrectionFactor(self,
                                        region,
                                        channel,
                                        setup,
                                        overwrite=False):
        # not used anymore, gives some kind of fake SF which could be compared to Nabin
        # factor to multiply with non-prompt mc yield for data-driven estimation
        mcFakes = self._fakesMC(region, channel, setup, overwrite=overwrite)
        if mcFakes <= 0: return u_float(0)

        ddFakes = self._dataDrivenFakes(region,
                                        channel,
                                        setup,
                                        overwrite=overwrite)
        return ddFakes / mcFakes
コード例 #23
0
    def getPartialSF( self, effMap, pt, eta, effMap_unc=None ):
        if self.year == 2016:
            bin = effMap.FindBin(eta,pt)
        elif self.year == 2017:
            bin = 1 if abs(eta) < 1.479 else 4
        elif self.year == 2018:
            bin = effMap.FindBin(pt,eta)

        sf  = effMap.GetBinContent( bin )

        if self.year == 2018:
            err = effMap_unc.GetBinContent( bin )
        else:
            err = effMap.GetBinError( bin )

        return u_float(sf, err)
コード例 #24
0
ファイル: cardFileHelpers.py プロジェクト: llechner/Analysis
def getTotalPostFitUncertainty(cardFile, binName):
    with open(cardFile) as f:
      binList = False
      estimateList = False
      ind = []
      uncertainties = False
      uncDict = {}
      totalUnc = {}
      for line in f:
        if len(line.split())==0: continue
        if line.split()[0] == "bin":
          if not binList: binList = True
          else:
            binList = line.split()[1:]
            for i,b in enumerate(binList):
                if b == binName:
                    ind.append(i) 
        if line.split()[0] == "process":
          if not estimateList:
            estimateList = line.split()[1:]
            estimateList = estimateList[ind[1]:ind[-1]+1]
        if line.split()[0] == "rate":
          estimates = line.split()[1:]
          estimates = [float(a) for a in estimates[ind[1]:ind[-1]+1]]
        if line.split()[0] == 'PU': uncertainties = True
        if uncertainties:
            uncDict[line.split()[0]] = [ 0 if a =='-' else float(a)-1 for a in line.split()[2:][ind[1]:ind[-1]+1] ]
    nuisanceFile = cardFile.replace('.txt','_nuisances_full.txt')
    for unc in uncDict.keys():
        totalUnc[unc] = 0
        for i in range(len(estimates)):
            #totalUnc[unc] += uncDict[unc][i] * estimates[i] * ( 1 + getPull(nuisanceFile,unc)*uncDict[unc][i] ) #* getConstrain(nuisanceFile, unc)
            totalUnc[unc] += uncDict[unc][i] * estimates[i] * math.exp( getPull(nuisanceFile,unc)*uncDict[unc][i] )
            #totalUnc[unc] += (uncDict[unc][i] * estimates[i] * math.exp( getPull(nuisanceFile,unc)*uncDict[unc][i] ))**2
        #totalUnc[unc] = math.sqrt(totalUnc[unc])
    total = 0
    for unc in totalUnc.keys():
        total += totalUnc[unc]**2
    estimatesPostFit = []
    for e in estimateList:
        res = getEstimateFromCard(cardFile, e, binName)
        res = applyAllNuisances(cardFile, e, res, binName)
        estimatesPostFit.append(res.val)
    estimatePostFit = sum(estimatesPostFit)
    return u_float(estimatePostFit,math.sqrt(total))
コード例 #25
0
    def getSF(self, pdgId, pt, eta, sigma=0):

        if abs(pdgId) not in [11, 13]:
            raise Exception("Lepton SF for PdgId %i not known" % pdgId)

        if self.year == 2016 and abs(pdgId) == 13:
            if pt >= 120: pt = 119
            if pt <= 20: pt = 21
            if eta >= 2.4: eta = 2.39
            elif eta <= -2.4: eta = -2.39

            sf_BCDEF = self.mult([
                self.getPartialSF(effMap, pt, eta) for effMap in self.mu_BCDEF
            ]).val
            sf_GH = self.mult([
                self.getPartialSF(effMap, pt, eta) for effMap in self.mu_GH
            ]).val
            sf = (sf_BCDEF * lumiRatio2016_BCDEF) + (sf_GH * lumiRatio2016_GH
                                                     )  # Scale SF by lumiRatio
            sigma = 0.03  # Recommendation for Moriond17
            sf = u_float(sf, sigma)

        else:
            if abs(pdgId) == 13:
                absEta = abs(eta)
                if pt >= 120: pt = 119
                if pt <= 20: pt = 21
                if absEta >= 2.4: absEta = 2.39

                sf = self.mult([
                    self.getPartialSF(effMap, pt, absEta, reversed=True)
                    for effMap in self.mu
                ])

            elif abs(pdgId) == 11:
                if pt >= 500: pt = 499
                if pt <= 10: pt = 11
                if eta >= 2.5: eta = 2.49
                elif eta <= -2.5: eta = -2.49

                sf = self.mult([
                    self.getPartialSF(effMap, pt, eta) for effMap in self.ele
                ])

        return (1 + sf.sigma * sigma) * sf.val
コード例 #26
0
    def _fakesMC(self, region, channel, setup, overwrite=False):

        param_LsLc = {"addMisIDSF": addSF}
        params_LsLc = copy.deepcopy(setup.parameters)
        params_LsLc.update(param_LsLc)
        setup_LsLc = setup.sysClone(parameters=params_LsLc)

        estimate = MCBasedEstimate(
            name="all_mc_e_had" if channel == "e" else "all_mc_mu_had",
            process=setup.processes["all_mc_e_had" if channel ==
                                    "e" else "all_mc_mu_had"])
        #            estimate = MCBasedEstimate( name="all_noQCD_had", process=setup.processes["all_noQCD_had"] )

        mcHad_LsLc = estimate.cachedEstimate(region,
                                             channel,
                                             setup_LsLc,
                                             overwrite=overwrite)

        if mcHad_LsLc <= 0: return u_float(0)
        return mcHad_LsLc
コード例 #27
0
ファイル: getPostFit.py プロジェクト: llechner/Analysis
def getValFrom1BinnedHistOrGraph( hist ):
    """
        if input is AsymTGraph, the average of errors is given 
    """
    if type(hist) in [ ROOT.TH1F , ROOT.TH1D ]:
        v = hist.GetBinContent(1)
        e = hist.GetBinError(1)
    if type(hist) in [ ROOT.TH2F , ROOT.TH2D ]:
        v = hist.GetBinContent(1,1)
        e = hist.GetBinError(1,1)
    if type(hist) in [ROOT.TGraphAsymmErrors]:
        v = hist.GetY()[0]
        el = hist.GetEYlow()[0]
        eh = hist.GetEYhigh()[0]
        if el and eh:
            e  = sum( [abs(el), abs(eh)] )/2.
        else:
            e  = max(abs(el),abs(eh))
        #print hist , (v,el,eh)
        #return (v, el, eh )
    return u_float(v,e)
コード例 #28
0
ファイル: yieldTable.py プロジェクト: llechner/TTGammaEFT
# create dictionary structure
yields = {}
signal = {}
for estName in [e.name for e in allEstimators] + [
        "MC", "MC_gen", "MC_misID", "MC_np", "MC_PU", "MC_fake", "MC_hp"
]:
    est = estName.split("_")[0]
    yields[est] = {}
    for i_region, region in enumerate(allPhotonRegions):
        yields[est][ptDict[str(region)]] = {}
        signal[ptDict[str(region)]] = {}
        for i_cat, cat in enumerate(
            ["gen", "misID", "np", "PU", "fake", "hp", "all"]):
            yields[est][ptDict[str(region)]][cat] = {}
            for i_mode, mode in enumerate(channels + [allMode]):
                yields[est][ptDict[str(region)]][cat][mode] = u_float(0)
                signal[ptDict[str(region)]][mode] = u_float(0)

jobs = []
for estimator in allEstimators:
    cat = estimator.name.split("_")[-1] if estimator.name.split("_")[-1] in [
        "gen", "misID", "np", "hp", "fake", "PU"
    ] else "all"
    est = estimator.name.split("_")[0]
    for i_region, region in enumerate(allPhotonRegions):
        for i_mode, mode in enumerate(channels):
            jobs.append((region, mode, setup, estimator, cat, est))

if args.cores > 1:
    from multiprocessing import Pool
    pool = Pool(processes=args.cores)
コード例 #29
0
ファイル: LeptonSF.py プロジェクト: llechner/Analysis
 def getPartialSF(self, effMap, pt, eta, reversed=False):
     x = eta if not reversed else pt
     y = pt if not reversed else eta
     sf = effMap.GetBinContent(effMap.FindBin(x, y))
     err = effMap.GetBinError(effMap.FindBin(x, y))
     return u_float(sf, err)
コード例 #30
0
    def _estimate(self,
                  region,
                  channel,
                  setup,
                  signalAddon=None,
                  overwrite=False):
        ''' Concrete implementation of abstract method 'estimate' as defined in Systematic
        '''

        logger.debug("MC prediction for %s channel %s" % (self.name, channel))

        if setup.nJet == "3p":
            setup4p = setup.sysClone(parameters={"nJet": (4, -1)})
            setup3 = setup.sysClone(parameters={"nJet": (3, 3)})
            return sum([
                self.cachedEstimate(region,
                                    channel,
                                    s,
                                    signalAddon=signalAddon,
                                    overwrite=overwrite)
                for s in [setup3, setup4p]
            ])

        if channel == 'all':
            # 'all' is the total of all contributions
            return sum([
                self.cachedEstimate(region,
                                    c,
                                    setup,
                                    signalAddon=signalAddon,
                                    overwrite=overwrite) for c in lepChannels
            ])

        elif channel == 'SFtight':
            # 'SFtight' is the total of mumutight and eetight contributions
            return sum([
                self.cachedEstimate(region,
                                    c,
                                    setup,
                                    signalAddon=signalAddon,
                                    overwrite=overwrite) for c in dilepChannels
            ])

        else:
            # change the sample processed if there is a signal addon like TuneUp
            if signalAddon:
                if self.name.split("_")[-1] in [
                        "gen", "misID", "had", "hp", "fake", "PU"
                ]:
                    name = "_".join(
                        self.name.split("_")[:-1] +
                        [signalAddon, self.name.split("_")[-1]])
                else:
                    name = "_".join([self.name, signalAddon])
                setattr(self, "process" + signalAddon, setup.processes[name])


#            preSelection = setup.preselection('MC' if not signalAddon else "MCpTincl", channel=channel, processCut=self.processCut)
            preSelection = setup.preselection('MC',
                                              channel=channel,
                                              processCut=self.processCut)
            cuts = [
                region.cutString(setup.sys['selectionModifier']),
                preSelection['cut']
            ]
            #            if setup.parameters["photonIso"] and setup.parameters["photonIso"] != "lowChgIsolowSieie":
            #                self.processCut = self.processCut.replace("photoncat", "photonhadcat")
            #            if self.processCut:
            #                cuts.append( cutInterpreter.cutString(self.processCut) )
            #                logger.info( "Adding process specific cut %s"%self.processCut )
            cut = "&&".join(cuts)
            weight = preSelection['weightStr']

            logger.debug("Using cut %s and weight %s" % (cut, weight))

            #            return setup.lumi/1000.*u_float(**getattr(self,"".join(["process",signalAddon if signalAddon else ""])).getYieldFromDraw(selectionString = cut, weightString = weight) )
            #            print cut, weight
            return u_float(**getattr(
                self, "".join(["process", signalAddon if signalAddon else ""])
            ).getYieldFromDraw(selectionString=cut, weightString=weight))