Beispiel #1
0
 def cachedTemplate(self,
                    selection,
                    weight='(1)',
                    save=True,
                    overwrite=False):
     key = {
         "selection": selection,
         "weight": weight,
         "source": self.source_sample.name
     }
     if (self.cache and self.cache.contains(key)) and not overwrite:
         result = self.cache.get(key)
         logger.info("Loaded MC PU profile from %s" %
                     (self.cache.database_file))
         logger.debug("Key used: %s result: %r" % (key, result))
     elif self.cache:
         logger.info("Obtain PU profile for %s" % (key, ))
         result = self.makeTemplate(selection=selection, weight=weight)
         if result:
             result = self.cache.addData(key, result, overwrite=save)
             logger.info("Adding PU profile to cache for %s : %r" %
                         (key, result))
         else:
             logger.warning(
                 "Couldn't create PU profile to cache for %s : %r" %
                 (key, result))
     else:
         result = self.makeTemplate(selection=selection, weight=weight)
     return result
Beispiel #2
0
def makeUniquePath():
    ''' Create unique path in tmp directory
    '''

    from TTGammaEFT.Tools.user import tmp_directory
    import uuid

    while True:
        uniqueDir = uuid.uuid4().hex
        uniquePath = os.path.join(tmp_directory, uniqueDir)

        if not os.path.isdir(uniquePath): break

        logger.warning("Path exists, waiting 0.1 sec.")
        time.sleep(0.1)

    return uniquePath
def filler(event):

    event.run, event.luminosity, event.evt = reader.evt
    event.weight = lumiweight

    if reader.position % 100 == 0:
        logger.info("At event %i/%i", reader.position, reader.nEvents)

    # EFT weights
    if options.addReweights:
        event.nrw = weightInfo.nid
        lhe_weights = reader.products["lhe"].weights()
        weights = []
        param_points = []

        #        hyperPoly.initialized = False

        for weight in lhe_weights:
            # Store nominal weight (First position!)
            if weight.id == "rwgt_1": event.rw_nominal = weight.wgt

            if not weight.id in weightInfo.id: continue

            pos = weightInfo.data[weight.id]
            event.rw_w[pos] = weight.wgt
            weights += [weight.wgt]
            interpreted_weight = interpret_weight(weight.id)

            for var in weightInfo.variables:
                getattr(event, "rw_" + var)[pos] = interpreted_weight[var]

            # weight data for interpolation
            if not hyperPoly.initialized:
                param_points += [
                    tuple(interpreted_weight[var]
                          for var in weightInfo.variables)
                ]

        # get list of values of ref point in specific order
        ref_point_coordinates = [
            weightInfo.ref_point_coordinates[var]
            for var in weightInfo.variables
        ]

        # Initialize with Reference Point
        if not hyperPoly.initialized:
            hyperPoly.initialize(param_points, ref_point_coordinates)

        coeff = hyperPoly.get_parametrization(weights)
        event.np = hyperPoly.ndof
        event.chi2_ndof = hyperPoly.chi2_ndof(coeff, weights)

        if event.chi2_ndof > 10**-6:
            logger.warning("chi2_ndof is large: %f", event.chi2_ndof)

        for n in xrange(hyperPoly.ndof):
            event.p_C[n] = coeff[n]

        # lumi weight / w0
        event.ref_weight = event.weight / coeff[0]

    ##############################################
    ##############################################
    ##############################################

    # GEN Particles
    genPart = reader.products["gp"]

    # for searching
    search = GenSearch(genPart)

    # MET
    GenMET = {
        "pt": reader.products["genMET"][0].pt(),
        "phi": reader.products["genMET"][0].phi()
    }
    event.GenMET_pt = GenMET["pt"]
    event.GenMET_phi = GenMET["phi"]
    # reco naming
    event.MET_pt = GenMET["pt"]
    event.MET_phi = GenMET["phi"]

    # find heavy objects before they decay
    GenT = filter(lambda p: abs(p.pdgId()) == 6 and search.isLast(p), genPart)

    GenTopLep = []
    GenWLep = []
    GenTopHad = []
    GenWHad = []
    GenBLep = []
    GenBHad = []
    GenWs = []
    for top in GenT:
        GenW = [
            search.descend(w) for w in search.daughters(top)
            if abs(w.pdgId()) == 24
        ]
        GenB = [
            search.descend(w) for w in search.daughters(top)
            if abs(w.pdgId()) == 5
        ]
        if GenW:
            GenW = GenW[0]
            GenWs.append(GenW)
            wDecays = [abs(l.pdgId()) for l in search.daughters(GenW)]
            if 11 in wDecays or 13 in wDecays or 15 in wDecays:
                GenWLep.append(GenW)
                if GenB: GenBLep.append(GenB[0])
                GenTopLep.append(top)
            else:
                GenWHad.append(GenW)
                if GenB: GenBHad.append(GenB[0])
                GenTopHad.append(top)

    GenTops = map(lambda t: {var: getattr(t, var)()
                             for var in genTopVars}, GenT)
    GenTops.sort(key=lambda p: -p["pt"])
    fill_vector_collection(event, "GenTop", genTopVars, GenTops)

    GenTopLep = map(lambda t: {var: getattr(t, var)()
                               for var in genTopVars}, GenTopLep)
    GenTopLep.sort(key=lambda p: -p["pt"])
    fill_vector_collection(event, "GenTopLep", genTopVars, GenTopLep)

    GenTopHad = map(lambda t: {var: getattr(t, var)()
                               for var in genTopVars}, GenTopHad)
    GenTopHad.sort(key=lambda p: -p["pt"])
    fill_vector_collection(event, "GenTopHad", genTopVars, GenTopHad)

    GenWs = map(lambda t: {var: getattr(t, var)() for var in genWVars}, GenWs)
    GenWs.sort(key=lambda p: -p["pt"])
    fill_vector_collection(event, "GenW", genWVars, GenWs)

    GenWLep = map(lambda t: {var: getattr(t, var)()
                             for var in genWVars}, GenWLep)
    GenWLep.sort(key=lambda p: -p["pt"])
    fill_vector_collection(event, "GenWLep", genWVars, GenWLep)

    GenWHad = map(lambda t: {var: getattr(t, var)()
                             for var in genWVars}, GenWHad)
    GenWHad.sort(key=lambda p: -p["pt"])
    fill_vector_collection(event, "GenWHad", genWVars, GenWHad)

    GenBLep = map(lambda t: {var: getattr(t, var)()
                             for var in genWVars}, GenBLep)
    GenBLep.sort(key=lambda p: -p["pt"])
    fill_vector_collection(event, "GenBLep", genWVars, GenBLep)

    GenBHad = map(lambda t: {var: getattr(t, var)()
                             for var in genWVars}, GenBHad)
    GenBHad.sort(key=lambda p: -p["pt"])
    fill_vector_collection(event, "GenBHad", genWVars, GenBHad)

    bPartonsFromTop = [
        b for b in filter(
            lambda p: abs(p.pdgId()) == 5 and p.numberOfMothers() == 1 and abs(
                p.mother(0).pdgId()) == 6, genPart)
    ]

    # genParticles for isolation
    GenParticlesIso = [
        l for l in filter(
            lambda p: abs(p.pdgId()) not in [12, 14, 16] and p.pt() > 5 and p.
            status() == 1, genPart)
    ]
    GenParticlesIso.sort(key=lambda p: -p.pt())
    GenParticlesIso = [{var: getattr(l, var)()
                        for var in genParticleVarsRead}
                       for l in GenParticlesIso]

    # genLeptons
    GenLeptonAll = [(search.ascend(l), l) for l in filter(
        lambda p: abs(p.pdgId()) in [11, 13] and search.isLast(p) and p.status(
        ) == 1, genPart)]
    GenLeptonAll.sort(key=lambda p: -p[1].pt())
    GenLepton = []

    for first, last in GenLeptonAll:

        mother = first.mother(0) if first.numberOfMothers() > 0 else None
        grandmother_pdgId = -999
        mother_pdgId = -999

        if mother:
            mother_pdgId = mother.pdgId()
            mother_ascend = search.ascend(mother)
            grandmother = mother_ascend.mother(
                0) if mother.numberOfMothers() > 0 else None
            if grandmother:
                grandmother_pdgId = grandmother.pdgId()

        genLep = {var: getattr(last, var)() for var in genLeptonVarsRead}
        genLep["motherPdgId"] = mother_pdgId
        genLep["grandmotherPdgId"] = grandmother_pdgId
        GenLepton.append(genLep)

    # Gen photons: particle-level isolated gen photons
    GenPhotonAll = [(search.ascend(l), l) for l in filter(
        lambda p: abs(p.pdgId()) == 22 and p.pt() > 1 and search.isLast(p),
        genPart)]
    #    GenPhotonAll = [ ( search.ascend(l), l ) for l in filter( lambda p: abs( p.pdgId() ) == 22 and search.isLast(p), genPart ) ]
    GenPhotonAll.sort(key=lambda p: -p[1].pt())
    GenPhoton = []

    for first, last in GenPhotonAll:

        mother = first.mother(0) if first.numberOfMothers() > 0 else None
        grandmother_pdgId = -999
        mother_pdgId = -999

        if mother:
            mother_pdgId = mother.pdgId()
            mother_ascend = search.ascend(mother)
            grandmother = mother_ascend.mother(
                0) if mother.numberOfMothers() > 0 else None
            if grandmother:
                grandmother_pdgId = grandmother.pdgId()

        GenP = {var: getattr(last, var)() for var in genPhotonVarsRead}
        GenP["motherPdgId"] = mother_pdgId
        GenP["grandmotherPdgId"] = grandmother_pdgId
        GenP["status"] = last.status()

        close_particles = filter(
            lambda p: p != last and deltaR2(
                {
                    "phi": last.phi(),
                    "eta": last.eta()
                }, {
                    "phi": p.phi(),
                    "eta": p.eta()
                }) < 0.16, search.final_state_particles_no_neutrinos)
        GenP["relIso04_all"] = sum([p.pt()
                                    for p in close_particles], 0) / last.pt()

        close_particles = filter(
            lambda p: p != last and deltaR2(
                {
                    "phi": last.phi(),
                    "eta": last.eta()
                }, {
                    "phi": p.phi(),
                    "eta": p.eta()
                }) < 0.09, search.final_state_particles_no_neutrinos)
        GenP["relIso03_all"] = sum([p.pt()
                                    for p in close_particles], 0) / last.pt()

        GenPhoton.append(GenP)

    # Jets
    GenJetAll = list(filter(genJetId, reader.products["genJets"]))
    GenJetAll.sort(key=lambda p: -p.pt())
    # Filter genJets
    GenJet = map(lambda t: {var: getattr(t, var)()
                            for var in genJetVarsRead}, GenJetAll)
    # BJets
    GenBJet = [b for b in filter(lambda p: abs(p.pdgId()) == 5, genPart)]

    for GenJ in GenJet:
        GenJ["isBJet"] = min(
            [999] +
            [deltaR2(GenJ, {
                "eta": b.eta(),
                "phi": b.phi()
            }) for b in GenBJet]) < 0.16
#        GenJ["parton"] = [ p.pdgId() for p in filter( lambda x: abs( x.pdgId() ) not in [12,14,16] and x.status() == 1, genPart ) if deltaR2( GenJ, {"eta":p.eta(), "phi":p.phi() } ) < 0.16 ]
#        GenJ["parton"] = [ p.pdgId() for p in filter( lambda x: abs( x.pdgId() ) not in [12,14,16], genPart ) if deltaR2( GenJ, {"eta":p.eta(), "phi":p.phi() } ) < 0.16 ]
#        print GenJ["parton"]

# gen b jets
    GenBJet = list(filter(lambda j: j["isBJet"], GenJet))

    # store minimum DR to jets
    for GenP in GenPhoton:
        GenP["photonJetdR"] = min([999] + [deltaR(GenP, j) for j in GenJet])
        GenP["photonLepdR"] = min([999] + [deltaR(GenP, j) for j in GenLepton])
        GenP["photonAlldR"] = min([999] + [
            deltaR(GenP, j) for j in GenParticlesIso
            if abs(GenP["pt"] - j["pt"]) > 0.01 and j["pdgId"] != 22
        ])

    fill_vector_collection(event, "GenPhoton", genPhotonVars,
                           filter(lambda p: p["pt"] >= 5, GenPhoton))
    fill_vector_collection(event, "GenLepton", genLeptonVars, GenLepton)
    fill_vector_collection(event, "GenJet", genJetVars, GenJet)
    fill_vector_collection(event, "GenBJet", genJetVars, GenBJet)

    event.nGenElectron = len(filter(lambda l: abs(l["pdgId"]) == 11,
                                    GenLepton))
    event.nGenMuon = len(filter(lambda l: abs(l["pdgId"]) == 13, GenLepton))
    event.nGenLepton = len(GenLepton)
    event.nGenPhoton = len(filter(lambda p: p["pt"] >= 5, GenPhoton))
    event.nGenBJet = len(GenBJet)
    event.nGenJets = len(GenJet)
    event.nGenBJet = len(GenBJet)

    ##############################################
    ##############################################
    ##############################################

    # Analysis specific variables

    for il, genL in enumerate(GenLepton):
        if abs(genL["motherPdgId"]) not in [11, 13, 15, 23, 24, 25]: continue
        for genP in GenPhoton:
            if deltaR(genL, genP) < 0.1:
                dressedL = get4DVec(GenLepton[il]) + get4DVec(genP)
                GenLepton[il]["pt"] = dressedL.Pt()
                GenLepton[il]["eta"] = dressedL.Eta()
                GenLepton[il]["phi"] = dressedL.Phi()

    # no meson mother
    GenLeptonCMSUnfold = list(
        filter(
            lambda l: abs(l["motherPdgId"]) in [11, 13, 15, 23, 24, 25] and
            genLeptonSel_CMSUnfold(l), GenLepton))
    GenLeptonATLASUnfold = list(
        filter(
            lambda l: abs(l["motherPdgId"]) in [11, 13, 15, 23, 24, 25] and
            genLeptonSel_ATLASUnfold(l), GenLepton))

    GenPhotonCMSUnfold = list(
        filter(lambda p: genPhotonSel_CMSUnfold(p) and p["status"] == 1,
               GenPhoton))
    GenPhotonEECMSUnfold = list(
        filter(lambda p: genPhotonEESel_CMSUnfold(p) and p["status"] == 1,
               GenPhoton))
    GenPhotonATLASUnfold = list(
        filter(lambda p: genPhotonSel_ATLASUnfold(p) and p["status"] == 1,
               GenPhoton))

    GenJetCMSUnfold = list(filter(lambda j: genJetSel_CMSUnfold(j), GenJet))
    GenJetATLASUnfold = list(filter(lambda j: genJetSel_ATLASUnfold(j),
                                    GenJet))

    GenBJetCMSUnfold = list(filter(lambda j: genJetSel_CMSUnfold(j), GenBJet))
    GenBJetATLASUnfold = list(
        filter(lambda j: genJetSel_ATLASUnfold(j), GenBJet))

    for GenP in GenPhotonCMSUnfold:
        GenP["photonJetdR"] = min([999] +
                                  [deltaR(GenP, j) for j in GenJetCMSUnfold])
        GenP["photonLepdR"] = min(
            [999] + [deltaR(GenP, j) for j in GenLeptonCMSUnfold])
        GenP["photonAlldR"] = min([999] + [
            deltaR(GenP, j) for j in GenParticlesIso
            if abs(GenP["pt"] - j["pt"]) > 0.01 and j["pdgId"] != 22
        ])

    for GenP in GenPhotonEECMSUnfold:
        GenP["photonJetdR"] = min([999] +
                                  [deltaR(GenP, j) for j in GenJetCMSUnfold])
        GenP["photonLepdR"] = min(
            [999] + [deltaR(GenP, j) for j in GenLeptonCMSUnfold])
        GenP["photonAlldR"] = min([999] + [
            deltaR(GenP, j) for j in GenParticlesIso
            if abs(GenP["pt"] - j["pt"]) > 0.01 and j["pdgId"] != 22
        ])

    for GenP in GenPhotonATLASUnfold:
        GenP["photonJetdR"] = min([999] +
                                  [deltaR(GenP, j) for j in GenJetATLASUnfold])
        GenP["photonLepdR"] = min(
            [999] + [deltaR(GenP, j) for j in GenLeptonATLASUnfold])
        GenP["photonAlldR"] = min([999] + [
            deltaR(GenP, j) for j in GenParticlesIso
            if abs(GenP["pt"] - j["pt"]) > 0.01 and j["pdgId"] != 22
        ])

    # Isolated
    GenPhotonCMSUnfold = filter(lambda g: g["photonAlldR"] > 0.1,
                                GenPhotonCMSUnfold)
    GenPhotonCMSUnfold = filter(
        lambda g: abs(g["grandmotherPdgId"]) in range(37) + [2212] and abs(g[
            "motherPdgId"]) in range(37) + [2212], GenPhotonCMSUnfold)

    GenPhotonEECMSUnfold = filter(lambda g: g["photonAlldR"] > 0.1,
                                  GenPhotonEECMSUnfold)
    GenPhotonEECMSUnfold = filter(
        lambda g: abs(g["grandmotherPdgId"]) in range(37) + [2212] and abs(g[
            "motherPdgId"]) in range(37) + [2212], GenPhotonEECMSUnfold)

    GenPhotonATLASUnfold = filter(lambda g: g["relIso03_all"] < 0.1,
                                  GenPhotonATLASUnfold)
    GenPhotonATLASUnfold = filter(
        lambda g: abs(g["grandmotherPdgId"]) in range(37) + [2212] and abs(g[
            "motherPdgId"]) in range(37) + [2212], GenPhotonATLASUnfold)

    for GenJ in GenJetATLASUnfold:
        GenJ["jetPhotondR"] = min(
            [999] + [deltaR(GenJ, p) for p in GenPhotonATLASUnfold])
        GenJ["jetLepdR"] = min([999] +
                               [deltaR(GenJ, p) for p in GenLeptonATLASUnfold])

    for GenJ in GenBJetATLASUnfold:
        GenJ["jetPhotondR"] = min(
            [999] + [deltaR(GenJ, p) for p in GenPhotonATLASUnfold])
        GenJ["jetLepdR"] = min([999] +
                               [deltaR(GenJ, p) for p in GenLeptonATLASUnfold])

    for i_j, GenJ in enumerate(GenJetCMSUnfold):
        GenJ["jetPhotondR"] = min(
            [999] + [deltaR(GenJ, p) for p in GenPhotonCMSUnfold])
        GenJ["jetLepdR"] = min([999] +
                               [deltaR(GenJ, p) for p in GenLeptonCMSUnfold])
#        GenJ["jetJetdR"]    =  min( [999] + [ deltaR( GenJ, p ) for i_p, p in enumerate(GenJetCMSUnfold) if i_p != i_j ] )
#        print GenJ["jetJetdR"]

    for GenJ in GenBJetCMSUnfold:
        GenJ["jetPhotondR"] = min(
            [999] + [deltaR(GenJ, p) for p in GenPhotonCMSUnfold])
        GenJ["jetLepdR"] = min([999] +
                               [deltaR(GenJ, p) for p in GenLeptonCMSUnfold])

    GenJetEECMSUnfold = copy.deepcopy(GenJetCMSUnfold)
    for GenJ in GenJetEECMSUnfold:
        GenJ["jetPhotondR"] = min(
            [999] + [deltaR(GenJ, p) for p in GenPhotonEECMSUnfold])

    GenBJetEECMSUnfold = copy.deepcopy(GenBJetCMSUnfold)
    for GenJ in GenBJetEECMSUnfold:
        GenJ["jetPhotondR"] = min(
            [999] + [deltaR(GenJ, p) for p in GenPhotonEECMSUnfold])

    # CMS Unfolding cleaning
    GenPhotonCMSUnfold = filter(lambda g: g["photonLepdR"] > 0.4,
                                GenPhotonCMSUnfold)
    GenJetCMSUnfold = filter(lambda g: g["jetPhotondR"] > 0.1, GenJetCMSUnfold)
    GenBJetCMSUnfold = filter(lambda g: g["jetPhotondR"] > 0.1,
                              GenBJetCMSUnfold)
    GenJetCMSUnfold = filter(lambda g: g["jetLepdR"] > 0.4, GenJetCMSUnfold)
    GenBJetCMSUnfold = filter(lambda g: g["jetLepdR"] > 0.4, GenBJetCMSUnfold)

    GenPhotonEECMSUnfold = filter(lambda g: g["photonLepdR"] > 0.1,
                                  GenPhotonEECMSUnfold)
    GenJetEECMSUnfold = filter(lambda g: g["jetPhotondR"] > 0.1,
                               GenJetEECMSUnfold)
    GenBJetEECMSUnfold = filter(lambda g: g["jetPhotondR"] > 0.1,
                                GenBJetEECMSUnfold)
    GenJetEECMSUnfold = filter(lambda g: g["jetLepdR"] > 0.4,
                               GenJetEECMSUnfold)
    GenBJetEECMSUnfold = filter(lambda g: g["jetLepdR"] > 0.4,
                                GenBJetEECMSUnfold)

    # ATLAS Unfolding cleaning
    GenPhotonATLASUnfold = filter(lambda g: g["photonLepdR"] > 1.0,
                                  GenPhotonATLASUnfold)
    GenJetATLASUnfold = filter(lambda g: g["jetPhotondR"] > 0.4,
                               GenJetATLASUnfold)
    GenBJetATLASUnfold = filter(lambda g: g["jetPhotondR"] > 0.4,
                                GenBJetATLASUnfold)
    GenJetATLASUnfold = filter(lambda g: g["jetLepdR"] > 0.4,
                               GenJetATLASUnfold)
    GenBJetATLASUnfold = filter(lambda g: g["jetLepdR"] > 0.4,
                                GenBJetATLASUnfold)

    GenPhotonCMSUnfold.sort(key=lambda p: -p["pt"])
    genP0 = (GenPhotonCMSUnfold[:1] + [None])[0]
    if genP0: fill_vector(event, "GenPhotonCMSUnfold0", genPhotonVars, genP0)
    if genP0: fill_vector(event, "PhotonGood0", genPhotonVars, genP0)
    if genP0:
        fill_vector(event, "PhotonNoChgIsoNoSieie0", genPhotonVars, genP0)

    GenPhotonEECMSUnfold.sort(key=lambda p: -p["pt"])
    genP0 = (GenPhotonEECMSUnfold[:1] + [None])[0]
    if genP0: fill_vector(event, "PhotonEEGood0", genPhotonVars, genP0)

    GenPhotonATLASUnfold.sort(key=lambda p: -p["pt"])
    genP0 = (GenPhotonATLASUnfold[:1] + [None])[0]
    if genP0: fill_vector(event, "GenPhotonATLASUnfold0", genPhotonVars, genP0)

    GenLeptonCMSUnfold.sort(key=lambda p: -p["pt"])
    genL0 = (GenLeptonCMSUnfold[:1] + [None])[0]
    if genL0: fill_vector(event, "GenLeptonCMSUnfold0", genLeptonVars, genL0)
    if genL0: fill_vector(event, "LeptonTight0", genLeptonVars, genL0)

    GenJetCMSUnfold.sort(key=lambda p: -p["pt"])
    genJ0, genJ1, genJ2 = (GenJetCMSUnfold[:3] + [None, None, None])[:3]
    if genJ0: fill_vector(event, "GenJetsCMSUnfold0", genJetVars, genJ0)
    if genJ1: fill_vector(event, "GenJetsCMSUnfold1", genJetVars, genJ1)
    if genJ2: fill_vector(event, "GenJetsCMSUnfold2", genJetVars, genJ2)

    GenBJetCMSUnfold.sort(key=lambda p: -p["pt"])
    genB0, genB1 = (GenBJetCMSUnfold[:2] + [None, None])[:2]
    if genB0: fill_vector(event, "GenBJetCMSUnfold0", genJetVars, genB0)
    if genB1: fill_vector(event, "GenBJetCMSUnfold1", genJetVars, genB1)

    event.nGenElectronCMSUnfold = len(
        filter(lambda l: abs(l["pdgId"]) == 11, GenLeptonCMSUnfold))
    event.nGenMuonCMSUnfold = len(
        filter(lambda l: abs(l["pdgId"]) == 13, GenLeptonCMSUnfold))
    event.nGenLeptonCMSUnfold = len(GenLeptonCMSUnfold)
    event.nGenPhotonCMSUnfold = len(GenPhotonCMSUnfold)
    event.nGenBJetCMSUnfold = len(GenBJetCMSUnfold)
    event.nGenJetsCMSUnfold = len(GenJetCMSUnfold)

    event.nPhotonEEGood = len(GenPhotonEECMSUnfold)
    event.nGenBJetEECMSUnfold = len(GenBJetEECMSUnfold)
    event.nGenJetsEECMSUnfold = len(GenJetEECMSUnfold)

    # use reco naming for easier handling
    event.nLeptonTight = event.nGenLeptonCMSUnfold
    event.nElectronTight = event.nGenElectronCMSUnfold
    event.nMuonTight = event.nGenMuonCMSUnfold
    event.nLeptonVetoIsoCorr = event.nGenLeptonCMSUnfold
    event.nJetGood = event.nGenJetsCMSUnfold
    event.nBTagGood = event.nGenBJetCMSUnfold
    event.nPhotonGood = event.nGenPhotonCMSUnfold
    event.nPhotonNoChgIsoNoSieie = event.nGenPhotonCMSUnfold

    event.nGenElectronATLASUnfold = len(
        filter(lambda l: abs(l["pdgId"]) == 11, GenLeptonATLASUnfold))
    event.nGenMuonATLASUnfold = len(
        filter(lambda l: abs(l["pdgId"]) == 13, GenLeptonATLASUnfold))
    event.nGenLeptonATLASUnfold = len(GenLeptonATLASUnfold)
    event.nGenPhotonATLASUnfold = len(GenPhotonATLASUnfold)
    event.nGenBJetATLASUnfold = len(GenBJetATLASUnfold)
    event.nGenJetsATLASUnfold = len(GenJetATLASUnfold)

    ##############################################
    ##############################################
    ##############################################

    if GenPhotonCMSUnfold:
        if GenLeptonCMSUnfold:
            event.dPhiLepGamma = deltaPhi(GenPhotonCMSUnfold[0]["phi"],
                                          GenLeptonCMSUnfold[0]["phi"])
            event.dRLepGamma = deltaR(GenPhotonCMSUnfold[0],
                                      GenLeptonCMSUnfold[0])
        if GenTopHad:
            event.dPhiTopHadGamma = deltaPhi(GenPhotonCMSUnfold[0]["phi"],
                                             GenTopHad[0]["phi"])
            event.dRTopHadGamma = deltaR(GenPhotonCMSUnfold[0], GenTopHad[0])
        if GenWHad:
            event.dPhiWHadGamma = deltaPhi(GenPhotonCMSUnfold[0]["phi"],
                                           GenWHad[0]["phi"])
            event.dRWHadGamma = deltaR(GenPhotonCMSUnfold[0], GenWHad[0])
        if GenTopLep:
            event.dPhiTopLepGamma = deltaPhi(GenPhotonCMSUnfold[0]["phi"],
                                             GenTopLep[0]["phi"])
            event.dRTopLepGamma = deltaR(GenPhotonCMSUnfold[0], GenTopLep[0])
        if GenWLep:
            event.dPhiWLepGamma = deltaPhi(GenPhotonCMSUnfold[0]["phi"],
                                           GenWLep[0]["phi"])
            event.dRWLepGamma = deltaR(GenPhotonCMSUnfold[0], GenWLep[0])
        if GenBHad:
            event.dPhiBHadGamma = deltaPhi(GenPhotonCMSUnfold[0]["phi"],
                                           GenBHad[0]["phi"])
            event.dRBHadGamma = deltaR(GenPhotonCMSUnfold[0], GenBHad[0])
        if GenBLep:
            event.dPhiBLepGamma = deltaPhi(GenPhotonCMSUnfold[0]["phi"],
                                           GenBLep[0]["phi"])
            event.dRBLepGamma = deltaR(GenPhotonCMSUnfold[0], GenBLep[0])
    if GenBLep:
        if GenWLep:
            event.dPhiBLepWLep = deltaPhi(GenBLep[0]["phi"], GenWLep[0]["phi"])
            event.dRBLepWLep = deltaR(GenBLep[0], GenWLep[0])
        if GenBHad:
            event.dPhiBLepBHad = deltaPhi(GenBLep[0]["phi"], GenBHad[0]["phi"])
            event.dRBLepBHad = deltaR(GenBLep[0], GenBHad[0])
    if GenWHad:
        if GenBHad:
            event.dPhiBHadWHad = deltaPhi(GenBHad[0]["phi"], GenWHad[0]["phi"])
            event.dRBHadWHad = deltaR(GenBHad[0], GenWHad[0])
        if GenWLep:
            event.dPhiWLepWHad = deltaPhi(GenWLep[0]["phi"], GenWHad[0]["phi"])
            event.dRWLepWHad = deltaR(GenWLep[0], GenWHad[0])
    if GenTopLep and GenTopHad:
        event.dPhiTopLepTopHad = deltaPhi(GenTopLep[0]["phi"],
                                          GenTopHad[0]["phi"])
        event.dRTopLepTopHad = deltaR(GenTopLep[0], GenTopHad[0])
    if GenLeptonCMSUnfold:
        event.dPhiLepMET = deltaPhi(GenLeptonCMSUnfold[0]["phi"],
                                    GenMET["phi"])

    event.ht = -999
    event.m3 = -999
    event.mT = -999
    event.mLtight0Gamma = -999
    if GenJetCMSUnfold:
        event.ht = sum([j["pt"] for j in GenJetCMSUnfold])
        event.m3 = m3(GenJetCMSUnfold)[0]

    if GenLeptonCMSUnfold:
        event.mT = mT(GenLeptonCMSUnfold[0], GenMET)
        if GenPhotonCMSUnfold:
            event.mLtight0Gamma = (get4DVec(GenLeptonCMSUnfold[0]) +
                                   get4DVec(GenPhotonCMSUnfold[0])).M()
                                str(uuid.uuid4()))
targetPath = os.path.join(directory, "gen" + postfix, sample.name)

# Single file post processing
if options.nJobs > 1:
    n_files_before = len(sample.files)
    sample = sample.split(options.nJobs)[options.job]
    n_files_after = len(sample.files)
    print "Running job %i/%i over %i files from a total of %i." % (
        options.job, options.nJobs, n_files_after, n_files_before)
    logger.info("Running job %i/%i over %i files from a total of %i.",
                options.job, options.nJobs, n_files_after, n_files_before)

if os.path.exists(targetPath) and options.overwrite:
    if options.nJobs > 1:
        logger.warning("NOT removing directory %s because nJobs = %i",
                       targetPath, options.nJobs)
    else:
        logger.info("Output directory %s exists. Deleting.", targetPath)
        shutil.rmtree(targetPath, ignore_errors=True)

if not os.path.exists(targetPath):
    try:
        os.makedirs(targetPath)
        logger.info("Created output directory %s.", targetPath)
    except:
        logger.info("Directory %s already exists.", targetPath)
        pass

if not os.path.exists(output_directory):
    try:
        os.makedirs(output_directory)
Beispiel #5
0
     if f.endswith('.root'):
         full_filename = os.path.join(dirName, f)
         if not '_reHadd_' in f:
             to_skip = False
             for skip in options.skip:
                 if skip in f:
                     logger.info( "Found skip string %s in %s. Skipping.", skip, f )
                     to_skip = True
                     break
             if to_skip: continue
             isOK =  checkRootFile( full_filename, checkForObjects = [options.treeName]) \
                     if options.treeName is not None else checkRootFile( full_filename )
             if isOK:
                 rootFiles.append( f )
             else:
                 logger.warning( "File %s does not look OK. Checked for tree: %r", full_filename, options.treeName )
         else:
             logger.info( "Found '_reHadd_' in file %s in %s. Skipping.", full_filename, dirName )
 job = []
 jobsize = 0
 for fname in rootFiles:
     filename, file_extension = os.path.splitext(fname)
     n_str = filename.split('_')[-1]
     if n_str.isdigit():
         full_filename = os.path.join(dirName, fname)
         jobsize += os.path.getsize( full_filename  )
         job.append( full_filename )
         if jobsize>1024**3*options.sizeGB:
             jobs.append(job)
             job = []
             jobsize = 0
Beispiel #6
0
                       type=str,
                       help="Which sample to plot")
args = argParser.parse_args()

# Logger
import Analysis.Tools.logger as logger
import RootTools.core.logger as logger_rt
logger = logger.get_logger(args.logLevel, logFile=None)
logger_rt = logger_rt.get_logger(args.logLevel, logFile=None)

logger.debug("Start run_estimate.py")

if args.checkOnly: args.overwrite = False

if not args.controlRegion:
    logger.warning("ControlRegion not known")
    sys.exit(0)

# load and define the EFT sample
from TTGammaEFT.Samples.genTuples_TTGamma_EFT_postProcessed import *
eftSample = eval(args.sample)

#settings for eft reweighting
w = WeightInfo(eftSample.reweight_pkl)
w.set_order(args.order)
variables = w.variables


def get_weight_string(parameters):
    return w.get_weight_string(**parameters)
Beispiel #7
0
    def __init__(self, model_name, MG260=False):

        self.model_name = model_name
        self.__isPreInitialized = False

        # Load model
        model_file = os.path.expandvars(
            "$CMSSW_BASE/python/TTGammaEFT/Generation/parameters.py")
        logger.info("Loading model %s from file %s", model_name, model_file)

        try:
            tmp_module = imp.load_source(model_name,
                                         os.path.expandvars(model_file))
            self.model = getattr(tmp_module, model_name)
        except:
            logger.error("Failed to import model %s from %s", model_name,
                         model_file)
            raise

        # make work directory
        self.uniquePath = makeUniquePath()
        logger.info("Using temporary directory %s", self.uniquePath)

        # MG file locations
        self.data_path = os.path.expandvars(
            '$CMSSW_BASE/src/TTGammaEFT/Generation/data')

        # MG5 directories

        if self.model_name == 'dim6top_LO' or self.model_name == 'dim6top_LO_v2' or MG260:
            logger.warning("Model dim6top_LO: Using MG 2.6.0!")
            self.MG5_tarball = '/afs/hephy.at/data/rschoefbeck02/MG/MG5_aMC_v2.6.0.tar.gz'  # From MG webpage --> WARNING: No matching PDFs for pattern: Ct10nlo.LHgrid
            self.MG5_tmpdir = os.path.join(self.uniquePath, 'MG5_aMC_v2_6_0')
        else:
            self.MG5_tarball = '/afs/hephy.at/data/dspitzbart01/MG5_aMC_v2.3.3.tar.gz'
            self.MG5_tmpdir = os.path.join(self.uniquePath, 'MG5_aMC_v2_3_3')

        logger.info("Will use MG5 from %s", self.MG5_tarball)

        # GridPack directories
        self.GP_tarball = "/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc481/13TeV/madgraph/V5_2.3.3/ttZ01j_5f_MLM/v1/ttZ01j_5f_tarball.tar.xz"
        self.GP_tmpdir = os.path.join(self.uniquePath, 'centralGridpack')
        logger.info("Will use gridpack from %s", self.GP_tarball)

        # restriction file
        self.restrictCardTemplate = os.path.join(
            self.data_path, 'template',
            'template_restrict_no_b_mass_' + model_name + '.dat')
        self.restrictCard = os.path.join(self.MG5_tmpdir, 'models',
                                         self.model_name,
                                         'restrict_no_b_mass.dat')

        # Consistency check of the model: Check that couplings are unique
        self.all_model_couplings = [c[0] for c in sum(self.model.values(), [])]
        seen = set()
        uniq = [
            x for x in self.all_model_couplings
            if x not in seen and not seen.add(x)
        ]
        if len(seen) != len(self.all_model_couplings):
            logger.error(
                "Apparently, list of couplings for model %s is not unique: %s. Check model file %s.",
                self.model_name, ",".join(self.all_model_couplings),
                model_file)
            raise RuntimeError

        # Get default values for model
        self.default_model_couplings = {}
        for param_set in self.model.values():
            for param, val in param_set:
                self.default_model_couplings[param] = val
Beispiel #8
0
                       help="Run only job i")
args = argParser.parse_args()

if args.year != "RunII": args.year = int(args.year)
if args.checkOnly: args.overwrite = False

# Logging
import Analysis.Tools.logger as logger
logger = logger.get_logger(args.logLevel, logFile=None)
import RootTools.core.logger as logger_rt
logger_rt = logger_rt.get_logger(args.logLevel, logFile=None)

logger.debug("Start run_estimate.py")

if not args.controlRegion:
    logger.warning("ControlRegion not known")
    sys.exit(0)

if not "3p" in args.controlRegion:
    print "Need the 3p region as input! Got %s" % args.controlRegion
    sys.exit(0)

print "Combining caches from %s and %s to %s" % (args.controlRegion.replace(
    "3p", "3"), args.controlRegion.replace("3p", "4p"), args.controlRegion)

parameters3p = allRegions[args.controlRegion]["parameters"]
parameters3 = allRegions[args.controlRegion.replace("3p", "3")]["parameters"]
parameters4p = allRegions[args.controlRegion.replace("3p", "4p")]["parameters"]
channels = allRegions[args.controlRegion]["channels"]
photonSelection = not allRegions[args.controlRegion]["noPhotonCR"]
if args.noInclusive:
Beispiel #9
0
def filler(event):

    event.run, event.luminosity, event.evt = reader.evt
    event.weight = lumiweight

    if reader.position % 100 == 0:
        logger.info("At event %i/%i", reader.position, reader.nEvents)

    # EFT weights
    if options.addReweights:
        event.nrw = weightInfo.nid
        lhe_weights = reader.products['lhe'].weights()
        weights = []
        param_points = []

        for weight in lhe_weights:
            # Store nominal weight (First position!)
            if weight.id == 'rwgt_1': event.rw_nominal = weight.wgt

            if not weight.id in weightInfo.id: continue

            pos = weightInfo.data[weight.id]
            event.rw_w[pos] = weight.wgt
            weights += [weight.wgt]
            interpreted_weight = interpret_weight(weight.id)

            for var in weightInfo.variables:
                getattr(event, "rw_" + var)[pos] = interpreted_weight[var]

            # weight data for interpolation
            if not hyperPoly.initialized:
                param_points += [
                    tuple(interpreted_weight[var]
                          for var in weightInfo.variables)
                ]

        # get list of values of ref point in specific order
        ref_point_coordinates = [
            weightInfo.ref_point_coordinates[var]
            for var in weightInfo.variables
        ]

        # Initialize with Reference Point
        if not hyperPoly.initialized:
            hyperPoly.initialize(param_points, ref_point_coordinates)

        coeff = hyperPoly.get_parametrization(weights)
        event.np = hyperPoly.ndof
        event.chi2_ndof = hyperPoly.chi2_ndof(coeff, weights)

        if event.chi2_ndof > 10**-6:
            logger.warning("chi2_ndof is large: %f", event.chi2_ndof)

        for n in xrange(hyperPoly.ndof):
            event.p_C[n] = coeff[n]

        # lumi weight / w0
        event.ref_weight = event.weight / coeff[0]

    # GEN Particles
    genPart = reader.products['gp']

    # for searching
    search = GenSearch(genPart)

    # MET
    GenMET = {
        'pt': reader.products['genMET'][0].pt(),
        'phi': reader.products['genMET'][0].phi()
    }
    event.GenMET_pt = GenMET['pt']
    event.GenMET_phi = GenMET['phi']

    # find heavy objects before they decay
    GenTops = map(
        lambda t: {var: getattr(t, var)()
                   for var in genTopVars},
        filter(lambda p: abs(p.pdgId()) == 6 and search.isLast(p), genPart))
    GenTops.sort(key=lambda p: -p['pt'])
    fill_vector_collection(event, "GenTop", genTopVars, GenTops)

    # genLeptons: prompt gen-leptons
    #    GenLeptonsAll = [ (search.ascend(l), l) for l in filter( lambda p: abs( p.pdgId() ) in [11,13] and search.isLast(p) and p.status() == 1, genPart ) ]
    GenLeptonsAll = [(search.ascend(l), l) for l in filter(
        lambda p: abs(p.pdgId()) in [11, 13] and search.isLast(p), genPart)]
    GenPromptLeptons = []
    GenAllLeptons = []

    for first, last in GenLeptonsAll:

        mother = first.mother(0) if first.numberOfMothers() > 0 else None
        mother_pdgId = -999
        grandmother_pdgId = -999

        if mother:
            mother_pdgId = mother.pdgId()
            mother_ascend = search.ascend(mother)
            grandmother = mother_ascend.mother(
                0) if mother.numberOfMothers() > 0 else None
            grandmother_pdgId = grandmother.pdgId() if grandmother else -999

        genLep = {var: getattr(last, var)() for var in genLeptonVarsRead}
        genLep['motherPdgId'] = mother_pdgId
        genLep['grandmotherPdgId'] = grandmother_pdgId
        GenAllLeptons.append(genLep)

        if abs(mother_pdgId) in [11, 13, 15, 23, 24, 25
                                 ] and isGoodGenLepton(genLep):
            GenPromptLeptons.append(genLep)

    # Filter gen leptons
    GenAllLeptons.sort(key=lambda p: -p['pt'])
    fill_vector_collection(event, "GenAllLepton", genLeptonVars, GenAllLeptons)

    GenPromptLeptons.sort(key=lambda p: -p['pt'])

    if GenPromptLeptons:
        GenPromptLeptons[0]["clean"] = 1  #dont clean the high pT photons
        for i, GenPromptLepton in enumerate(GenPromptLeptons[::-1][:-1]):
            GenPromptLepton['clean'] = min([999] + [
                deltaR2(GenPromptLepton, p)
                for p in GenPromptLeptons[::-1][i + 1:]
            ]) > 0.16
        GenPromptLeptons = list(filter(lambda j: j["clean"], GenPromptLeptons))

    GenPromptElectrons = list(
        filter(lambda l: abs(l['pdgId']) == 11, GenPromptLeptons))
    GenPromptMuons = list(
        filter(lambda l: abs(l['pdgId']) == 13, GenPromptLeptons))
    event.nGenElectron = len(GenPromptElectrons)
    event.nGenMuon = len(GenPromptMuons)

    # Gen photons: particle-level isolated gen photons
    GenPhotonsAll = [(search.ascend(l), l) for l in filter(
        lambda p: abs(p.pdgId()) == 22 and p.pt() > 5 and search.isLast(p),
        genPart)]
    GenPhotonsAll.sort(key=lambda p: -p[1].pt())
    GenPhotons = []
    GenAllPhotons = []
    GenMGPhotons = []
    GenMGAllPhotons = []

    for first, last in GenPhotonsAll:
        mother_pdgId = first.mother(
            0).pdgId() if first.numberOfMothers() > 0 else -999
        GenPhoton = {var: getattr(last, var)() for var in genPhotonVarsRead}
        GenMGPhoton = {var: getattr(first, var)() for var in genPhotonVarsRead}

        GenPhoton['motherPdgId'] = mother_pdgId
        GenPhoton['status'] = last.status()
        GenMGPhoton['motherPdgId'] = mother_pdgId
        GenMGPhoton['status'] = first.status()

        mother_ascend = search.ascend(first.mother(0))
        grandmother = mother_ascend.mother(
            0) if first.mother(0).numberOfMothers() > 0 else None
        grandmother_pdgId = grandmother.pdgId() if grandmother else 0

        close_particles = filter(
            lambda p: p != last and deltaR2(
                {
                    'phi': last.phi(),
                    'eta': last.eta()
                }, {
                    'phi': p.phi(),
                    'eta': p.eta()
                }) < 0.16, search.final_state_particles_no_neutrinos)
        GenPhoton['relIso04_all'] = sum([p.pt() for p in close_particles],
                                        0) / last.pt()
        GenPhoton['photonJetdR'] = 999
        GenPhoton['photonLepdR'] = 999
        GenAllPhotons.append(GenPhoton)
        if isGoodGenPhoton(GenPhoton):
            GenPhotons.append(GenPhoton)

        close_particles = filter(
            lambda p: p != first and deltaR2(
                {
                    'phi': first.phi(),
                    'eta': first.eta()
                }, {
                    'phi': p.phi(),
                    'eta': p.eta()
                }) < 0.16, search.final_state_particles_no_neutrinos)
        GenMGPhoton['relIso04_all'] = sum([p.pt() for p in close_particles],
                                          0) / first.pt()
        GenMGPhoton['photonJetdR'] = 999
        GenMGPhoton['photonLepdR'] = 999
        GenMGAllPhotons.append(GenMGPhoton)
        if isGoodGenPhoton(GenMGPhoton):
            GenMGPhotons.append(GenMGPhoton)

    fill_vector_collection(event, "GenAllPhoton", genPhotonVars, GenAllPhotons)
    fill_vector_collection(event, "GenMGAllPhoton", genPhotonVars,
                           GenMGAllPhotons)

    if not options.noCleaning:
        # deltaR cleaning to photons as in run card
        GenPhotons = list(
            filter(
                lambda p: min(
                    [999] + [deltaR2(p, l) for l in GenPromptLeptons]) > 0.04,
                GenPhotons))
        GenMGPhotons = list(
            filter(
                lambda p: min(
                    [999] + [deltaR2(p, l) for l in GenPromptLeptons]) > 0.04,
                GenMGPhotons))

    # Jets
    GenJetsAll = list(filter(genJetId, reader.products['genJets']))
    GenJetsAll.sort(key=lambda p: -p.pt())
    # Filter genJets
    GenAllJets = map(
        lambda t: {var: getattr(t, var)()
                   for var in genJetVarsRead}, GenJetsAll)
    bPartons = [b for b in filter(lambda p: abs(p.pdgId()) == 5, genPart)]

    for GenJet in GenAllJets:
        GenJet['matchBParton'] = min([999] + [
            deltaR2(GenJet, {
                'eta': b.eta(),
                'phi': b.phi()
            }) for b in bPartons
        ]) < 0.04

    # store if gen-jet is DR matched to a B parton in cone of 0.2
    GenJets = list(filter(lambda j: isGoodGenJet(j), GenAllJets))

    trueAllNonBjets = list(filter(lambda j: not j['matchBParton'], GenAllJets))
    trueAllBjets = list(filter(lambda j: j['matchBParton'], GenAllJets))
    fill_vector_collection(event, "GenAllJet", genJetVars, GenAllJets)
    fill_vector_collection(event, "GenAllBJet", genJetVars, trueAllBjets)

    if not options.noCleaning:
        GenJets = list(
            filter(
                lambda j: min([999] + [deltaR2(j, p)
                                       for p in GenPhotons]) > 0.04, GenJets))

    # gen b jets
    trueBjets = list(filter(lambda j: j['matchBParton'], GenJets))
    trueNonBjets = list(filter(lambda j: not j['matchBParton'], GenJets))

    # Mimick b reconstruction ( if the trailing b fails acceptance, we supplement with the leading non-b jet )
    GenBj0, GenBj1 = (trueBjets + [None, None])[:2]
    if GenBj0: fill_vector(event, "GenBj0", genJetVars, GenBj0)
    if GenBj1: fill_vector(event, "GenBj1", genJetVars, GenBj1)

    # store minimum DR to jets
    for GenPhoton in GenPhotons + GenMGPhotons:
        GenPhoton['photonJetdR'] = min([999] +
                                       [deltaR(GenPhoton, j) for j in GenJets])
        GenPhoton['photonLepdR'] = min(
            [999] + [deltaR(GenPhoton, j) for j in GenPromptLeptons])

    fill_vector_collection(event, "GenPhoton", genPhotonVars, GenPhotons)
    fill_vector_collection(event, "GenMGPhoton", genPhotonVars, GenMGPhotons)
    fill_vector_collection(event, "GenLepton", genLeptonVars, GenPromptLeptons)
    fill_vector_collection(event, "GenJet", genJetVars, GenJets)
    event.nGenBJet = len(trueBjets)

    event.m3 = m3(GenJets)[0]
    if len(GenPhotons) > 0:
        event.m3gamma = m3(GenJets, photon=GenPhotons[0])[0]

    # Ovservables
    if len(GenPromptLeptons) > 1:
        event.mll = (get4DVec(GenPromptLeptons[0]) +
                     get4DVec(GenPromptLeptons[1])).M()
        if len(GenPhotons) > 0:
            event.mllgamma = (get4DVec(GenPromptLeptons[0]) +
                              get4DVec(GenPromptLeptons[1]) +
                              get4DVec(GenPhotons[0])).M()

    event.minDRjj = min([
        deltaR(j1, j2) for i, j1 in enumerate(trueNonBjets[:-1])
        for j2 in trueNonBjets[i + 1:]
    ] + [999])
    event.minDRbb = min([
        deltaR(b1, b2) for i, b1 in enumerate(trueBjets[:-1])
        for b2 in trueBjets[i + 1:]
    ] + [999])
    event.minDRll = min([
        deltaR(l1, l2) for i, l1 in enumerate(GenPromptLeptons[:-1])
        for l2 in GenPromptLeptons[i + 1:]
    ] + [999])
    event.minDRaa = min([
        deltaR(g1, g2) for i, g1 in enumerate(GenPhotons[:-1])
        for g2 in GenPhotons[i + 1:]
    ] + [999])
    event.minDRbj = min(
        [deltaR(b, j) for b in trueBjets for j in trueNonBjets] + [999])
    event.minDRaj = min(
        [deltaR(a, j) for a in GenPhotons for j in trueNonBjets] + [999])
    event.minDRjl = min(
        [deltaR(l, j) for l in GenPromptLeptons for j in trueNonBjets] + [999])
    event.minDRab = min([deltaR(a, b) for a in GenPhotons
                         for b in trueBjets] + [999])
    event.minDRbl = min(
        [deltaR(l, b) for l in GenPromptLeptons for b in trueBjets] + [999])
    event.minDRal = min(
        [deltaR(l, a) for l in GenPromptLeptons for a in GenPhotons] + [999])
Beispiel #10
0
directory = os.path.join(options.targetDir, options.processingEra)
postfix = '_small' if options.small else ''
output_directory = os.path.join(directory, "gen" + postfix, sample.name)

# Single file post processing
if options.nJobs > 1:
    n_files_before = len(sample.files)
    sample = sample.split(options.nJobs)[options.job]
    n_files_after = len(sample.files)
    logger.info("Running job %i/%i over %i files from a total of %i.",
                options.job, options.nJobs, n_files_after, n_files_before)

if os.path.exists(output_directory) and options.overwrite:
    if options.nJobs > 1:
        logger.warning("NOT removing directory %s because nJobs = %i",
                       output_directory, options.nJobs)
    else:
        logger.info("Output directory %s exists. Deleting.", output_directory)
        shutil.rmtree(output_directory, ignore_errors=True)

if not os.path.exists(output_directory):
    try:
        os.makedirs(output_directory)
        logger.info("Created output directory %s.", output_directory)
    except:
        logger.info("Directory %s already exists.", output_directory)
        pass

# Load reweight pickle file if supposed to keep weights.
reweight_variables = []
if options.addReweights:
                           str(ppEntry["year"]), ppEntry["skim"],
                           sample + postfix)

    # find all root files in subdirectory

    allFiles = os.listdir(dirPath) if os.path.exists(dirPath) else []
    allRootFiles = filter(
        lambda f: f.endswith('.root') and not f.startswith('nanoAOD'),
        allFiles)
    nanoAODFiles = filter(
        lambda f: f.endswith('.root') and f.startswith('nanoAOD'), allFiles)
    prefix = ''

    nanoAOD_list = []
    if len(nanoAODFiles) > 0:
        logger.warning("Found nanoAODFiles. Shouldn't be here: %s",
                       ",".join(nanoAODFiles))
        try:
            nanoAOD_list = list(
                set([
                    int(f.rstrip('.root').split('_')[-2]) for f in nanoAODFiles
                ]))
        except ValueError:  #possibly 'SPLIT1'
            nanoAOD_list = [0]

    if len(allRootFiles) > 0:
        rootFiles = [
            os.path.join(dirPath, filename) for filename in allRootFiles
        ]

        if args.check != 'None':
            if args.check == 'deep':
Beispiel #12
0
    def _dataDrivenTransferFactor(self,
                                  channel,
                                  setup,
                                  qcdUpdates=None,
                                  overwrite=False):

        print("Calculating data-driven QCD transfer factor")

        selection_MC_CR = setup.selection(
            "MC",
            channel=channel,
            **setup.defaultParameters(update=qcdUpdates["CR"]
                                      if qcdUpdates else QCDTF_updates["CR"]))
        selection_Data_CR = setup.selection(
            "Data",
            channel=channel,
            **setup.defaultParameters(update=qcdUpdates["CR"]
                                      if qcdUpdates else QCDTF_updates["CR"]))
        selection_MC_SR = setup.selection(
            "MC",
            channel=channel,
            **setup.defaultParameters(update=qcdUpdates["SR"]
                                      if qcdUpdates else QCDTF_updates["SR"]))
        selection_Data_SR = setup.selection(
            "Data",
            channel=channel,
            **setup.defaultParameters(update=qcdUpdates["SR"]
                                      if qcdUpdates else QCDTF_updates["SR"]))
        #        selection_Data_full = setup.selection("Data", channel=channel, **setup.defaultParameters( update=QCDTF_updates["SRDenom"] ))
        #        selection_Data_part = setup.selection("Data", channel=channel, **setup.defaultParameters())

        weight_MC_CR = selection_MC_CR["weightStr"]  # w/ misID SF
        weight_Data_CR = selection_Data_CR["weightStr"]
        weight_MC_SR = selection_MC_SR["weightStr"]  # w/ misID SF
        weight_Data_SR = selection_Data_SR["weightStr"]

        print("Using QCD TF CR weightstring MC %s" % (weight_MC_CR))
        print("Using QCD TF CR weightstring Data %s" % (weight_Data_CR))
        print("Using QCD TF SR weightstring MC %s" % (weight_MC_SR))
        print("Using QCD TF SR weightstring Data %s" % (weight_Data_SR))

        cut_MC_CR = selection_MC_CR["cut"]
        cut_Data_CR = selection_Data_CR["cut"]
        cut_MC_SR = selection_MC_SR["cut"]
        cut_Data_SR = selection_Data_SR["cut"]

        print("Using QCD TF CR MC total cut %s" % (cut_MC_CR))
        print("Using QCD TF CR Data total cut %s" % (cut_Data_CR))
        print("Using QCD TF SR MC total cut %s" % (cut_MC_SR))
        print("Using QCD TF SR Data total cut %s" % (cut_Data_SR))

        # Calculate yields for Data
        yield_data_SR = self.yieldFromCache(setup,
                                            "Data",
                                            channel,
                                            cut_Data_SR,
                                            weight_Data_SR,
                                            overwrite=overwrite)

        if yield_data_SR <= 0:
            logger.warning("SR data yield for QCD TF is 0!")
            return u_float(0, 0)

        yield_data_CR = self.yieldFromCache(setup,
                                            "Data",
                                            channel,
                                            cut_Data_CR,
                                            weight_Data_CR,
                                            overwrite=overwrite)

        if yield_data_CR <= 0:
            logger.warning("CR data yield for QCD TF is 0!")
            return u_float(0, 0)

        # Calculate yields for MC (normalized to data lumi)
        if addSF:
            if setup.nJet == "2":
                DYSF_val = DY2SF_val
                WGSF_val = WG2SF_val
                ZGSF_val = ZG2SF_val
            elif setup.nJet == "3":
                DYSF_val = DY3SF_val
                WGSF_val = WG3SF_val
                ZGSF_val = ZG3SF_val
            elif setup.nJet == "4":
                DYSF_val = DY4SF_val
                WGSF_val = WG4SF_val
                ZGSF_val = ZG4SF_val
            elif setup.nJet == "5":
                DYSF_val = DY5SF_val
                WGSF_val = WG5SF_val
                ZGSF_val = ZG5SF_val
            elif setup.nJet == "2p":
                DYSF_val = DY2pSF_val
                WGSF_val = WG2pSF_val
                ZGSF_val = ZG2pSF_val
            elif setup.nJet == "3p":
                DYSF_val = DY3pSF_val
                WGSF_val = WG3pSF_val
                ZGSF_val = ZG3pSF_val
            elif setup.nJet == "4p":
                DYSF_val = DY4pSF_val
                WGSF_val = WG4pSF_val
                ZGSF_val = ZG4pSF_val

        yield_other_CR = 0
        yield_other_SR = 0
        for s in default_sampleList:
            if s in ["QCD-DD", "QCD", "QCD_e", "QCD_mu", "GJets", "Data"]:
                continue
            print s
            y_CR = self.yieldFromCache(setup,
                                       s,
                                       channel,
                                       cut_MC_CR,
                                       weight_MC_CR,
                                       overwrite=overwrite)
            y_SR = self.yieldFromCache(setup,
                                       s,
                                       channel,
                                       cut_MC_SR,
                                       weight_MC_SR,
                                       overwrite=overwrite)
            print "without SF", s, "CR", y_CR, "SR", y_SR

            if addSF:
                if "DY_LO" in s:
                    y_CR *= DYSF_val[setup.year]  #add DY SF
                    y_SR *= DYSF_val[setup.year]  #add DY SF
                elif "WJets" in s:
                    y_CR *= WJetsSF_val[setup.year]  #add WJets SF
                    y_SR *= WJetsSF_val[setup.year]  #add WJets SF
                elif "TTG" in s:
                    y_CR *= SSMSF_val[setup.year]  #add TTG SF
                    y_SR *= SSMSF_val[setup.year]  #add TTG SF
                elif "ZG" in s:
                    y_CR *= ZGSF_val[setup.year]  #add ZGamma SF
                    y_SR *= ZGSF_val[setup.year]  #add ZGamma SF
                elif "WG" in s:
                    y_CR *= WGSF_val[setup.year]  #add WGamma SF
                    y_SR *= WGSF_val[setup.year]  #add WGamma SF
            print "with SF", s, "CR", y_CR, "SR", y_SR
            yield_other_CR += y_CR
            yield_other_SR += y_SR

        qcd_est_CR = yield_data_CR - yield_other_CR
        qcd_est_SR = yield_data_SR - yield_other_SR
        # TF from full QCD CR to full SR
        transRatio = qcd_est_SR / qcd_est_CR if qcd_est_CR > 0 else u_float(
            0, 0)
        # scale the TF for subregions in e.g. m(l,gamma) or m3 (region cuts)
        transferFac = transRatio  # * fractionalSR

        print("Calculating data-driven QCD TF normalization in channel " +
              channel + " using lumi " + str(setup.dataLumi) + ":")
        print("TF CR yield data:                " + str(yield_data_CR))
        print("TF CR yield other:               " + str(yield_other_CR))
        print("TF CR yield (data-other):        " + str(qcd_est_CR))
        print("TF SR yield data:                " + str(yield_data_SR))
        print("TF SR yield other:               " + str(yield_other_SR))
        print("TF SR yield (data-other):        " + str(qcd_est_SR))
        print("transfer factor:                 " + str(transferFac))

        if transferFac <= 0:
            logger.warning("TF is 0!")
            return u_float(0, 0)

        if qcd_est_CR <= 0 and yield_data_CR > 0:
            logger.warning("Negative QCD TF CR estimate yield!")
            return u_float(0, 0)

        if qcd_est_SR <= 0 and yield_data_SR > 0:
            logger.warning("Negative QCD TF SR estimate yield!")
            return u_float(0, 0)

        return transferFac if transferFac > 0 else u_float(0, 0)