def main():
    dirEmbs = ["."] + [os.path.join("..", d) for d in result.dirEmbs[1:]]
    #dirSig = "../"+result.dirSig

    datasetsEmb = result.DatasetsMany(dirEmbs, baseline + "Counters")
    #datasetsSig = dataset.getDatasetsFromMulticrabCfg(cfgfile=dirSig+"/multicrab.cfg", counters=analysisSig+"Counters")

    datasetsEmb.forEach(plots.mergeRenameReorderForDataMC)
    datasetsEmb.setLumiFromData()
    #plots.mergeRenameReorderForDataMC(datasetsSig)

    tauEmbedding.normalize = True
    tauEmbedding.era = "Run2011A"

    values = {}

    analyses = [("Baseline", baseline), ("Plus", plus), ("Minus", minus)]

    cntr = "Counters/weighted"

    mainTable = counter.CounterTable()
    tauTable = counter.CounterTable()

    for name, analysis in analyses:
        c = datasetsEmb.getCounter("Data", analysis + cntr + "/counter")
        c.setName(name)
        mainTable.appendColumn(c)
        col = mainTable.getColumn(name=name)

        value = col.getCount(col.getRowNames().index(count)).value()
        values[name] = value

    plusDiff = abs(values["Baseline"] - values["Plus"])
    minusDiff = abs(values["Baseline"] - values["Minus"])
    maxDiff = max(plusDiff, minusDiff)
    rel = maxDiff / values["Baseline"]

    print "Count %s, baseline %.3f, plus %.3f, minus %.3f" % (
        count, values["Baseline"], values["Plus"], values["Minus"])
    print "Plus diff %.3f, minus diff %.3f" % (plusDiff, minusDiff)
    print "Relative uncertainty from tau energy scale %.6f" % (rel)

    style = tdrstyle.TDRStyle()
    histograms.createLegend.moveDefaults(dx=-0.32, dh=-0.15)

    doPlot(datasetsEmb, analyses, "transverseMass", "mt_variated_btagging",
           "Without #Delta#phi(#tau jet, E_{T}^{miss}) cut")
    doPlot(datasetsEmb, analyses, "transverseMassAfterDeltaPhi160",
           "mt_variated_deltaPhi160",
           "#Delta#phi(#tau jet, E_{T}^{miss}) < 160^{o}")
    doPlot(datasetsEmb, analyses, "transverseMassAfterDeltaPhi130",
           "mt_variated_deltaPhi130",
           "#Delta#phi(#tau jet, E_{T}^{miss}) < 130^{o}")
Example #2
0
def doCounters(datasets, massPoints):
    eventCounter = counter.EventCounter(datasets)
    eventCounter.normalizeMCByLuminosity()

    rows1 = [
        "Trigger and HLT_MET cut",
        "taus == 1",
        "trigger scale factor",
        "electron veto",
        "muon veto",
        "njets",
        "MET"
        ]
    rows2 = [
        "btagging scale factor",
        "deltaPhiTauMET<160",
        "deltaPhiTauMET<130",
        ]

    tableAll = eventCounter.getMainCounterTable()
    tableAll.keepOnlyRows(rows1+rows2)

    tableWH = counter.CounterTable()
    tableHH = counter.CounterTable()
    for mass in massPoints:
        tableWH.appendColumn(tableAll.getColumn(name="TTToHplusBWB_M%d"%mass))
        tableHH.appendColumn(tableAll.getColumn(name="TTToHplusBHminusB_M%d"%mass))

    tableWH2 = tableWH.clone()
    tableWH.keepOnlyRows(rows1)
    tableWH2.keepOnlyRows(rows2)
    tableHH2 = tableHH.clone()
    tableHH.keepOnlyRows(rows1)
    tableHH2.keepOnlyRows(rows2)

    format1 = counter.TableFormatText(counter.CellFormatTeX(valueFormat="%.0f", valueOnly=True))
    format12 = counter.TableFormatText(counter.CellFormatTeX(valueFormat="%.1f", valueOnly=True))
    format2 = counter.TableFormatText(counter.CellFormatTeX(valueFormat="%.2f", withPrecision=1))

    print "tt -> bW bH+"
    print tableWH.format(format1)
    print tableWH2.format(format2)
    
    print
    print
    print "tt -> bH+ bH-"
    print tableHH.format(format12)
    print tableHH2.format(format2)
Example #3
0
def getMainCounterTable(emb, sig):
    ec = counter.EventCounter(emb)
    ec2 = counter.EventCounter(sig)

    colName = "Data"
    if not useData:
        ec.normalizeMCByLuminosity()
        ec2.normalizeMCByLuminosity()
        colName = "MC"

    table = counter.CounterTable()
    col = ec.getMainCounterTable().getColumn(name=colName)
    col.setName("Embedded")
    table.appendColumn(col)
    col = col.copy()
    col.setName("Embedded (norm)")
    col.multiply(embeddingNormalisation.value(),
                 embeddingNormalisation.uncertainty())
    table.appendColumn(col)

    col = ec2.getMainCounterTable().getColumn(name=colName)
    col.setName("Normal")
    table.appendColumn(col)

    return table
Example #4
0
    def getCounter(self, datasetName, name):
        if not datasetName in self.residualNames:
            return self.datasetsEmb.getCounter(datasetName, name)

        # Ugly hack
        sigName = name
        if isinstance(sigName, basestring):
            sigName = sigName.replace(self.analysisEmb, self.analysisSig)
        else:
            sigName = sigName.clone(
                tree=sigName.tree.replace(self.analysisEmb, self.analysisSig))

        # Get properly normalized embedded data, embedded DY and normal DY histograms
        (embHisto, tmp) = self.datasetsEmb.getHistogram(datasetName, name)
        sigHisto = self.datasetsSig.getDataset(
            datasetName).getDatasetRootHisto(sigName)  # DatasetRootHisto
        sigHisto.normalizeToLuminosity(self.datasetsEmb.getLuminosity())
        sigHisto = sigHisto.getHistogram()  # ROOT.TH1

        table = counter.CounterTable()
        table.appendColumn(counter.HistoCounter("Embedded", embHisto))
        table.appendColumn(counter.HistoCounter("Normal", sigHisto))
        table.removeNonFullRows()

        embColumn = table.getColumn(name="Embedded")
        sigColumn = table.getColumn(name="Normal")
        residual = counter.subtractColumn(datasetName + " residual", sigColumn,
                                          embColumn)
        return residual
Example #5
0
def main():
    dirEmbs = ["."] + [os.path.join("..", d) for d in result.dirEmbs[1:]]

    tauEmbedding.normalize = True
    tauEmbedding.era = "Run2011A"

    table = counter.CounterTable()
    for i in xrange(len(dirEmbs)):
        tmp = dirEmbs[:]
        del tmp[i]
        row = doCounters(tmp)
        row.setName("Removed embedding %d" % i)
        table.appendRow(row)

    arows = []
    arows.append(counter.meanRow(table))
    arows.append(counter.maxRow(table))
    arows.append(counter.minRow(table))
    arows.append(counter.subtractRow("Max-mean", arows[1], arows[0]))
    arows.append(counter.subtractRow("Mean-min", arows[0], arows[2]))
    for r in arows:
        table.appendRow(r)

    cellFormat = counter.TableFormatText(
        counter.CellFormatTeX(valueFormat='%.3f'))
    print "DeltaPhi < 160"
    print
    print table.format(cellFormat)
Example #6
0
    def writeRootHisto(self, directory, dstName, srcEmbName, isCounter):
        if not self.datasetsEmb.hasHistogram(self.datasetName, srcEmbName):
            return

        # Get properly normalized embedded data
        (embDataHisto,
         tmp) = self.datasetsEmb.getHistogram(self.datasetName, srcEmbName)

        histo = None
        if isCounter:
            embDataCounter = counter.HistoCounter("EmbData", embDataHisto)
            table = counter.CounterTable()
            table.appendColumn(embDataCounter)
            column = table.getColumn(name="EmbData")
            histo = dataset._counterToHisto(dstName, column.getPairList())
        else:
            histo = embDataHisto

        histo.SetName(dstName)
        histo.SetDirectory(directory)
        histo.Write()

        histo.Delete()  # could this help???

        print "%s/%s" % (directory.GetPath(), dstName)
Example #7
0
def doCounters(datasetsEmb):
    eventCounter = tauEmbedding.EventCounterMany(datasetsEmb, normalize=True)

    #row = "btagging scale factor"
    row = "deltaPhiTauMET<160"
    #row = "deltaPhiTauMET<130"
    table = eventCounter.getMainCounterTable()
    table.keepOnlyRows([row])

    result = counter.CounterTable()
    def addRow(name, newktt, nsignal):
        fraction = None
        if nsignal != None:
            fraction = nsignal.clone()
            total = nsignal.clone()
            total.add(newktt)
            fraction.divide(total)
            fraction.multiply(dataset.Count(100))
        result.appendRow(counter.CounterRow(name, ["EWK+tt events", "Signal events", "Signal fraction (\%)"], [newktt, nsignal, fraction]))
    addRow("No signal", table.getCount(irow=0, colName="EWKMC"), None)
    ewkWithSignal = table.getCount(irow=0, colName="EWKScaled")
    for mass in [80, 90, 100, 120, 140, 150, 155, 160]:
        addRow("H+ M%d"%mass, ewkWithSignal, table.getCount(irow=0, colName="TTToHplus_M%d"%mass))

    #cellFormat = counter.TableFormatLaTeX(counter.CellFormatTeX(valueFormat='%.3f'))
    cellFormat = counter.TableFormatLaTeX(counter.CellFormatTeX(valueFormat='%.4f', withPrecision=2))
    print result.format(cellFormat)
def doCounters(datasetsEmb, datasetsSig, datasetName):
    lumi = datasetsEmb.getLuminosity()

    # Counters
    eventCounterEmb = tauEmbedding.EventCounterMany(datasetsEmb)
    eventCounterSig = counter.EventCounter(datasetsSig)

    def isNotThis(name):
        return name != datasetName
    eventCounterEmb.removeColumns(filter(isNotThis, datasetsEmb.getAllDatasetNames()))
    eventCounterSig.removeColumns(filter(isNotThis, datasetsSig.getAllDatasetNames()))
    eventCounterSig.normalizeMCToLuminosity(lumi)

    tableEmb = eventCounterEmb.getMainCounterTable()
    tableSig = eventCounterSig.getMainCounterTable()

    table = counter.CounterTable()
    col = tableEmb.getColumn(name=datasetName)
    col.setName(datasetName+" emb")
    table.appendColumn(col)
    col = tableSig.getColumn(name=datasetName)
    col.setName(datasetName+" norm")
    table.appendColumn(col)

    table.keepOnlyRows([
            "njets",
            "MET",
            "btagging",
            "btagging scale factor",
            "deltaPhiTauMET<160",
            "deltaPhiTauMET<130",
            ])
    table.renameRows({"njets": "tau ID"})

    return table
Example #9
0
    def getMainCounterTable(self, dataset):
        table = counter.CounterTable()
        for an, dm in zip(self.analyses, self.datasetManagers):
            eventCounter = counter.EventCounter(dm)
            eventCounter.normalizeMCByLuminosity()

            tmpTable = eventCounter.getMainCounterTable()
            col = tmpTable.getColumn(name=dataset)
            col.setName(legends[an])
            table.appendColumn(col)
        return table
Example #10
0
    def writeRootHisto(self, directory, dstName, srcEmbName, srcSigName,
                       isCounter):
        if not self.datasetsEmb.hasHistogram(
                self.datasetName,
                srcEmbName) or not self.datasetSig.hasRootHisto(srcSigName):
            return

        histo = None
        # Get properly normalized embedded and normal MC
        (embMcHisto,
         tmp) = self.datasetsEmb.getHistogram(self.datasetName, srcEmbName)
        sigMcHisto = self.datasetSig.getDatasetRootHisto(
            srcSigName).getHistogram()  # ROOT.TH1

        #print embMcHisto, sigMcHisto

        if self.normalOnly:
            histo = sigMcHisto
        elif self.embeddedOnly:
            histo = embMcHisto
        else:

            if isCounter:
                embMcCounter = counter.HistoCounter("EmbMc", embMcHisto)
                sigMcCounter = counter.HistoCounter("SigMc", sigMcHisto)

                table = counter.CounterTable()
                table.appendColumn(embMcCounter)
                table.appendColumn(sigMcCounter)

                table.removeNonFullRows()
                if table.getNrows() == 0:
                    return

                embMcColumn = table.getColumn(name="EmbMc")
                sigMcColumn = table.getColumn(name="SigMc")
                residual = counter.subtractColumn("Correction", sigMcColumn,
                                                  embMcColumn)
                histo = dataset._counterToHisto(dstName,
                                                residual.getPairList())
            else:
                # Residual MC: normal-embedded
                sigMcHisto.Add(embMcHisto, -1)
                histo = sigMcHisto

        histo.SetName(dstName)
        histo.SetDirectory(directory)
        histo.Write()

        histo.Delete()  # could this help???

        print "%s/%s" % (directory.GetPath(), dstName)
def doCounters(muonDatasets, tauDatasets, datasetName):
    ecMuon = counter.EventCounter(muonDatasets)
    ecMuonWeighted = counter.EventCounter(muonDatasets,
                                          counters="counters/weighted")
    ecTau = counter.EventCounter(tauDatasets)

    def isNotThis(name):
        return name != datasetName

    ecMuon.removeColumns(filter(isNotThis, muonDatasets.getAllDatasetNames()))
    ecMuonWeighted.removeColumns(
        filter(isNotThis, muonDatasets.getAllDatasetNames()))
    ecTau.removeColumns(filter(isNotThis, tauDatasets.getAllDatasetNames()))

    ecMuon.normalizeMCToLuminosity(mcLumi)
    ecMuonWeighted.normalizeMCToLuminosity(mcLumi)
    ecTau.normalizeMCToLuminosity(mcLumi)

    table = counter.CounterTable()
    col = ecMuon.getMainCounterTable().getColumn(name=datasetName)
    col.setName("Muon")
    col.setCount(
        -1,
        ecMuonWeighted.getMainCounterTable().getCount(irow=-1,
                                                      colName=datasetName))
    muonEvents = col.getCount(name="= 1 gen muon").clone()
    muonEventsWeighted = col.getCount(-1).clone()
    table.appendColumn(col)
    col = ecTau.getMainCounterTable().getColumn(name=datasetName)
    col.setName("Tau")
    tauEvents = col.getCount(-1).clone()
    table.appendColumn(col)

    print table.format()

    ratio = tauEvents.clone()
    ratio.divide(muonEvents)
    ratioWeighted = tauEvents.clone()
    ratioWeighted.divide(muonEventsWeighted)
    print "Tau/Muon     = %f +- %f" % (ratio.value(), ratio.uncertainty())
    print "Tau/Muon(ID) = %f +- %f" % (ratioWeighted.value(),
                                       ratioWeighted.uncertainty())
    print

    ratio = muonEvents.clone()
    ratio.divide(tauEvents)
    ratioWeighted = muonEventsWeighted.clone()
    ratioWeighted.divide(tauEvents)
    print "Muon/Tau     = %f +- %f" % (ratio.value(), ratio.uncertainty())
    print "Muon(ID)/Tau = %f +- %f" % (ratioWeighted.value(),
                                       ratioWeighted.uncertainty())
def doCounters(muonDatasets, tauDatasets, datasetName, ntupleCacheMuon,
               ntupleCacheTau):
    ecMuon = counter.EventCounter(muonDatasets)
    #ecMuonWeighted = counter.EventCounter(muonDatasets, counters="counters/weighted")
    ecTau = counter.EventCounter(tauDatasets)

    def isNotThis(name):
        return name != datasetName

    ecMuon.removeColumns(filter(isNotThis, muonDatasets.getAllDatasetNames()))
    #ecMuonWeighted.removeColumns(filter(isNotThis, muonDatasets.getAllDatasetNames()))
    ecTau.removeColumns(filter(isNotThis, tauDatasets.getAllDatasetNames()))

    ecMuon.normalizeMCToLuminosity(mcLumi)
    #ecMuonWeighted.normalizeMCToLuminosity(mcLumi)
    ecTau.normalizeMCToLuminosity(mcLumi)

    ecMuon.getMainCounter().appendRows(
        ntupleCacheMuon.histogram("counters/weighted/counter"))
    ecTau.getMainCounter().appendRows(
        ntupleCacheTau.histogram("counters/weighted/counter"))

    table = counter.CounterTable()
    muonCol = ecMuon.getMainCounterTable().getColumn(name=datasetName)
    muonCol.setName("Muon")
    #col.setCount(-1, ecMuonWeighted.getMainCounterTable().getCount(irow=-1, colName=datasetName))
    table.appendColumn(muonCol)
    tauCol = ecTau.getMainCounterTable().getColumn(name=datasetName)
    tauCol.setName("Tau")
    table.appendColumn(tauCol)

    print table.format()

    def printRatio(muonCount, tauCount):
        ratio1 = tauCol.getCount(name=tauCount).clone()
        ratio1.divide(muonCol.getCount(name=muonCount))

        ratio2 = muonCol.getCount(name=muonCount).clone()
        ratio2.divide(tauCol.getCount(name=tauCount))

        print "Tau/Muon = %f +- %f, Muon/Tau = %f +- %f" % (
            ratio1.value(), ratio1.uncertainty(), ratio2.value(),
            ratio2.uncertainty())

    print "Generator level"
    printRatio("= 1 gen muon", "= 1 gen tau")
    print

    print "Reco muon vs. gen tau, after muon veto"
    printRatio("muon id eff weighting", "reco muon veto")
def main():
    dirEmbs = ["."] + [os.path.join("..", d) for d in tauEmbedding.dirEmbs[1:]]
    dirSig = "../" + tauEmbedding.dirSig
    #    dirEmbs = dirEmbs[:2]

    datasetsEmb = tauEmbedding.DatasetsMany(dirEmbs,
                                            analysisEmb + "/counters",
                                            normalizeMCByLuminosity=True)
    datasetsSig = dataset.getDatasetsFromMulticrabCfg(
        cfgfile=dirSig + "/multicrab.cfg", counters=analysisSig + "/counters")
    datasetsSig.updateNAllEventsToPUWeighted()

    datasetsEmb.forEach(plots.mergeRenameReorderForDataMC)
    datasetsEmb.setLumiFromData()
    plots.mergeRenameReorderForDataMC(datasetsSig)

    def mergeEWK(datasets):
        datasets.merge(
            "EWKMC", ["WJets", "TTJets", "DYJetsToLL", "SingleTop", "Diboson"],
            keepSources=True)

    mergeEWK(datasetsSig)
    datasetsEmb.forEach(mergeEWK)
    plots._legendLabels["EWKMC"] = "EWK"

    # Apply TDR style
    style = tdrstyle.TDRStyle()
    histograms.cmsTextMode = histograms.CMSMode.SIMULATION
    histograms.cmsText[histograms.CMSMode.SIMULATION] = "Simulation"
    histograms.createLegend.setDefaults(y1=0.93, y2=0.75, x1=0.52, x2=0.93)
    tauEmbedding.normalize = True
    tauEmbedding.era = "Run2011AB"

    table = counter.CounterTable()

    def dop(name):
        doPlots(datasetsEmb, datasetsSig, name)
        tbl = doCounters(datasetsEmb, datasetsSig, name)
        for icol in xrange(tbl.getNcolumns()):
            table.appendColumn(tbl.getColumn(icol))

    dop("TTJets")
    dop("WJets")
    dop("DYJetsToLL")
    dop("SingleTop")
    dop("Diboson")

    cellFormat = counter.TableFormatLaTeX(
        counter.CellFormatTeX(valueFormat='%.4f', withPrecision=2))
    print table.format(cellFormat)
Example #14
0
def doCounters(datasetsEmb, datasetsSig):
    rows = [
        "njets", "MET", "btagging scale factor", "deltaPhiTauMET<160",
        "deltaPhiTauMET<130"
    ]
    residuals = ["DYJetsToLL residual", "WW residual"]

    # Normal MC
    eventCounterNormal = counter.EventCounter(datasetsSig)
    eventCounterNormal.normalizeMCToLuminosity(datasetsEmb.getLuminosity())
    tableNormal = eventCounterNormal.getMainCounterTable()
    tableNormal.keepOnlyRows(rows)

    # Embedded data and MC, residual MC
    eventCounter = tauEmbedding.EventCounterResidual(datasetsEmb)
    table = eventCounter.getMainCounterTable()
    table.keepOnlyRows(rows)

    # Build the result
    result = counter.CounterTable()

    c = table.getColumn(name="Data")
    c.setName("Embedded data")
    result.appendColumn(c)
    #result.appendColumn(table.getColumn(name="EWKMC"))
    for name in residuals:
        result.appendColumn(table.getColumn(name=name))

    result.appendColumn(
        counter.sumColumn(
            "Emb. data + res. MC",
            [table.getColumn(name=name) for name in ["Data"] + residuals]))
    result.appendColumn(
        counter.sumColumn(
            "Emb. MC + res. MC",
            [table.getColumn(name=name) for name in ["EWKMC"] + residuals]))

    c = tableNormal.getColumn(name="EWKMC")
    c.setName("Normal MC")
    result.appendColumn(c)

    # Final formatting
    result.renameRows({
        "njets": "tau-jet identification",
        "btagging scale factor": "b tagging"
    })

    cellFormat = counter.TableFormatLaTeX(
        counter.CellFormatTeX(valueFormat='%.4f', withPrecision=2))
    print result.format(cellFormat)
Example #15
0
    def getCounter(self, datasetName, name):
        if not datasetName in ["Data", "EWKMC", "DYJetsToLL"]:
            (embDataHisto,
             tmp) = self.datasetsEmb.getHistogram(datasetName, name)
            return counter.HistoCounter(datasetName, embDataHisto)

        # Ugly hack
        sigName = name
        if isinstance(sigName, basestring):
            sigName = sigName.replace(self.analysisEmb, self.analysisSig)
        else:
            sigName = sigName.clone(
                tree=sigName.tree.replace(self.analysisEmb, self.analysisSig))

        # Get properly normalized embedded data, embedded DY and normal DY histograms
        (embDataHisto, tmp) = self.datasetsEmb.getHistogram(datasetName, name)
        (embDyHisto, tmp) = self.datasetsEmb.getHistogram("DYJetsToLL", name)
        sigDyHisto = self.datasetsSig.getDataset(
            "DYJetsToLL").getDatasetRootHisto(sigName)  # DatasetRootHisto
        sigDyHisto.normalizeToLuminosity(self.datasetsEmb.getLuminosity())
        sigDyHisto = sigDyHisto.getHistogram()  # ROOT.TH1

        embDataCounter = counter.HistoCounter("EmbData", embDataHisto)
        embDyCounter = counter.HistoCounter("EmbDy", embDyHisto)
        sigDyCounter = counter.HistoCounter("SigDy", sigDyHisto)

        table = counter.CounterTable()
        table.appendColumn(embDataCounter)
        table.appendColumn(embDyCounter)
        table.appendColumn(sigDyCounter)

        table.removeNonFullRows()

        column = table.getColumn(name="EmbData")
        embDyColumn = table.getColumn(name="EmbDy")
        sigDyColumn = table.getColumn(name="SigDy")
        dyCorrection = counter.subtractColumn("Correction", sigDyColumn,
                                              embDyColumn)
        column = counter.sumColumn(datasetName, [column, dyCorrection])

        return column
def doCountersOld(datasetsEmb, counterName="counter"):
    datasetNames = datasetsEmb.getAllDatasetNames()

    table = counter.CounterTable()
    for name in datasetNames:
        table.appendColumn(
            datasetsEmb.getCounter(
                name, analysisEmb + "Counters/weighted/" + counterName))

    ewkDatasets = ["WJets", "TTJets", "DYJetsToLL", "SingleTop", "Diboson"]
    table.insertColumn(
        2,
        counter.sumColumn("EWKMCsum",
                          [table.getColumn(name=name)
                           for name in ewkDatasets]))

    print "============================================================"
    if isinstance(datasetsEmb, result.DatasetsDYCorrection):
        print "DY correction applied"
    cellFormat = counter.TableFormatText(
        counter.CellFormatTeX(valueFormat='%.3f'))
    print table.format(cellFormat)
def do(onlyWjets, mcEvents, normalize, formatCounters, formatPlots):
    dirEmbs = tauEmbedding.dirEmbs[:]
    if onlyWjets:
        dirEmbs.extend(dirEmbsWjets)
    dirEmbs = ["."] + [os.path.join("..", d) for d in dirEmbs[1:]]
#    dirEmbs = dirEmbs[0:2]

    # Read luminosity
    datasets = dataset.getDatasetsFromMulticrabCfg(cfgfile=dirEmbs[0]+"/multicrab.cfg", counters=analysisEmb+"Counters", weightedCounters=False)
    datasets.loadLuminosities()
    plots.mergeRenameReorderForDataMC(datasets)
    lumi = datasets.getDataset("Data").getLuminosity()


    style = tdrstyle.TDRStyle()
    histograms.cmsTextMode = histograms.CMSMode.SIMULATION
    histograms.cmsText[histograms.cmsTextMode] = "Simulation"

    tauEmbedding.normalize=normalize
    tauEmbedding.era = "Run2011A"

    table = counter.CounterTable()
    for i, d in enumerate(dirEmbs):
        datasets = dataset.getDatasetsFromMulticrabCfg(cfgfile=d+"/multicrab.cfg", counters=analysisEmb+"Counters", weightedCounters=False)
        if onlyWjets:
            datasets.remove(filter(lambda n: n != "WJets_TuneZ2_Summer11", datasets.getAllDatasetNames()))
        else:
            if mcEvents:
                datasets.remove(filter(lambda n: n != "WJets_TuneZ2_Summer11" and n != "TTJets_TuneZ2_Summer11" and not "SingleMu" in n, datasets.getAllDatasetNames()))
            datasets.loadLuminosities()
        datasets.remove(filter(lambda name: "HplusTB" in name, datasets.getAllDatasetNames()))
        datasets.remove(filter(lambda name: "TTToHplus" in name, datasets.getAllDatasetNames()))
        tauEmbedding.updateAllEventsToWeighted(datasets)
        plots.mergeRenameReorderForDataMC(datasets)

        row = doCounters(datasets, onlyWjets, mcEvents, normalize, lumi)
        row.setName("Embedding %d" % i)
        table.appendRow(row)

    if formatPlots:
        doPlots(table, onlyWjets, mcEvents, normalize, lumi)

    if not formatCounters:
        return

    arows = []
    arows.append(counter.meanRow(table))
    arows.extend(counter.meanRowFit(table))
    arows.append(counter.maxRow(table))
    arows.append(counter.minRow(table))
    for r in arows:
        table.appendRow(r)

    print table.format()

    ftable = counter.CounterTable()
    def addRow(name):
        col = table.getColumn(name=name)

        minimum = col.getCount(name="Min")
        maximum = col.getCount(name="Max")
        mean = col.getCount(name="Mean")

        ftable.appendRow(counter.CounterRow(name,
                                            ["Mean", "Minimum", "Maximum"],
                                            [mean, minimum, maximum]))
    addRow("Data")
    addRow("EWKMCsum")
    addRow("TTJets")
    addRow("WJets")
    addRow("DYJetsToLL")
    addRow("SingleTop")
    addRow("Diboson")

    cellFormat2 = counter.TableFormatLaTeX(counter.CellFormatTeX(valueFormat="%.4f", withPrecision=2))
    print ftable.format(cellFormat2)
Example #18
0
def main():
    dirEmbs = result.dirEmbs[:]
    if onlyWjets:
        dirEmbs.extend(dirEmbsWjets)
    dirEmbs = ["."] + [os.path.join("..", d) for d in dirEmbs[1:]]
    #    dirEmbs = dirEmbs[0:2]

    style = tdrstyle.TDRStyle()

    tauEmbedding.normalize = normalize
    tauEmbedding.era = "Run2011A"

    ts = dataset.TreeScan(analysisEmb + "/tree",
                          function=None,
                          selection=And(metCut, bTaggingCut, deltaPhi160Cut))

    def printPickEvent(f, tree):
        f.write("%d:%d:%d\n" % (tree.run, tree.lumi, tree.event))

    table = counter.CounterTable()

    for i, d in enumerate(dirEmbs):
        datasets = dataset.getDatasetsFromMulticrabCfg(
            cfgfile=d + "/multicrab.cfg", counters=analysisEmb + "Counters")
        if onlyWjets:
            datasets.remove(
                filter(lambda n: n != "WJets_TuneZ2_Summer11",
                       datasets.getAllDatasetNames()))
        else:
            if mcEvents:
                datasets.remove(
                    filter(
                        lambda n: n != "WJets_TuneZ2_Summer11" and n !=
                        "TTJets_TuneZ2_Summer11" and not "SingleMu" in n,
                        datasets.getAllDatasetNames()))
            datasets.loadLuminosities()
        datasets.remove(
            filter(lambda name: "HplusTB" in name,
                   datasets.getAllDatasetNames()))
        datasets.remove(
            filter(lambda name: "TTToHplus" in name,
                   datasets.getAllDatasetNames()))
        tauEmbedding.updateAllEventsToWeighted(datasets)
        plots.mergeRenameReorderForDataMC(datasets)

        # for ds in datasets.getAllDatasets():
        #     f = open("pickEvents_%s_%d.txt" % (ds.getName(), i), "w")
        #     ds.getDatasetRootHisto(ts.clone(function=lambda tree: printPickEvent(f, tree)))
        #     f.close()

        row = doCounters(datasets)
        row.setName("Embedding %d" % i)
        table.appendRow(row)

    doPlots(table)

    arows = []
    arows.append(counter.meanRow(table))
    arows.extend(counter.meanRowFit(table))
    arows.append(counter.maxRow(table))
    arows.append(counter.minRow(table))
    for r in arows:
        table.appendRow(r)

#    csvSplitter = counter.TableSplitter([" \pm "])
#    cellFormat = counter.TableFormatText(counter.CellFormatTeX(valueFormat='%.3f'), columnSeparator=",")
    cellFormat = counter.TableFormatText(
        counter.CellFormatTeX(valueFormat='%.3f'))
    print "DeltaPhi < 160"
    print
    print table.format(cellFormat)
def doCounters(datasets):
    # Counters
    eventCounter = counter.EventCounter(datasets, counters=counters)
    mainCounter = eventCounter.getMainCounter()

    selectionsCumulative = []
    tauSelectionsCumulative = []
    td = treeDraw.clone(weight="")

    def sel(name, selection):
        selectionsCumulative.append(selection)
        sel = selectionsCumulative[:]
        if len(tauSelectionsCumulative) > 0:
            sel += ["Sum$(%s) >= 1" % "&&".join(tauSelectionsCumulative)]
        mainCounter.appendRow(name, td.clone(selection="&&".join(sel)))

    def tauSel(name, selection):
        tauSelectionsCumulative.append(selection)
        sel = selectionsCumulative[:]
        if len(tauSelectionsCumulative) > 0:
            sel += ["Sum$(%s) >= 1" % "&&".join(tauSelectionsCumulative)]
        mainCounter.appendRow(name, td.clone(selection="&&".join(sel)))

    sel("Primary vertex", pvSelection)

    sel(">= 1 tau candidate", "Length$(taus_p4) >= 1")
    tauSel("Decay mode finding", decayModeFinding)
    tauSel("pT > 15", "(taus_p4.Pt() > 15)")
    tauSel("pT > 40", tauPtCut)  #
    tauSel("eta < 2.1", tauEtaCut)
    tauSel("leading track pT > 20", tauLeadPt)
    tauSel("ECAL fiducial", ecalFiducial)
    tauSel("againstElectron", electronRejection)  #
    tauSel("againstMuon", muonRejection)
    tauSel("isolation", tightIsolation)  #
    tauSel("oneProng", oneProng)  #
    tauSel("Rtau", rtau)  #

    sel("3 jets", jetEventSelection)
    sel("MET", metSelection)
    sel("btag", btagEventSelection)
    sel("deltaPhi<160", deltaPhi160Selection)

    fullSelection = "&&".join(
        selectionsCumulative +
        ["Sum$(%s) >= 1" % "&&".join(tauSelectionsCumulative)])
    fullSelectionCaloMetNoHF = fullSelection + "&&" + caloMetNoHF
    fullSelectionCaloMet = fullSelection + "&&" + caloMet
    #print fullSelection
    f = open("pickEvents.txt", "w")

    def printPickEvent(tree):
        f.write("%d:%d:%d\n" % (tree.run, tree.lumi, tree.event))

    ts = dataset.TreeScan(td.tree,
                          function=printPickEvent,
                          selection=fullSelection)
    ts2 = dataset.TreeScan(td.tree,
                           function=printPickEvent,
                           selection=fullSelectionCaloMetNoHF)
    ts3 = dataset.TreeScan(td.tree,
                           function=printPickEvent,
                           selection=fullSelectionCaloMet)
    ts4 = dataset.TreeDrawCompound(
        ts2, {
            "SingleMu_Mu_170722-172619_Aug05": ts3,
            "SingleMu_Mu_172620-173198_Prompt": ts3,
            "SingleMu_Mu_173236-173692_Prompt": ts3,
        })
    datasets.getDataset("Data").getDatasetRootHisto(ts4)
    f.close()

    ewkDatasets = [
        "WJets",
        "TTJets",
        #        "DYJetsToLL", "SingleTop", "Diboson"
    ]

    eventCounter.normalizeMCByLuminosity()
    mainTable = eventCounter.getMainCounterTable()
    #mainTable.insertColumn(2, counter.sumColumn("EWKMCsum", [mainTable.getColumn(name=name) for name in ewkDatasets]))
    cellFormat = counter.TableFormatText(
        counter.CellFormatText(valueFormat='%.3f',
                               #valueOnly=True
                               ),
        #                                         columnSeparator = ";",
    )
    print mainTable.format(cellFormat)

    return

    effFormat = counter.TableFormatText(
        counter.CellFormatTeX(valueFormat='%.4f'))
    effTable = counter.CounterTable()
    col = table.getColumn(name="Data")
    effTable.appendColumn(col)
    effTable.appendColumn(counter.efficiencyColumn(col.getName() + " eff",
                                                   col))
    col = table.getColumn(name="EWKMCsum")
    effTable.appendColumn(col)
    effTable.appendColumn(counter.efficiencyColumn(col.getName() + " eff",
                                                   col))
    print effTable.format(effFormat)
def doCounters(datasetsEmb, datasetsSig, datasetName):
    lumi = datasetsEmb.getLuminosity()

    # Counters
    eventCounterEmb = result.EventCounterMany(datasetsEmb,
                                              counters=analysisEmb +
                                              "Counters/weighted")
    eventCounterSig = counter.EventCounter(datasetsSig,
                                           counters=analysisSig +
                                           "Counters/weighted")

    def isNotThis(name):
        return name != datasetName

    eventCounterEmb.removeColumns(
        filter(isNotThis, datasetsEmb.getAllDatasetNames()))
    eventCounterSig.removeColumns(
        filter(isNotThis, datasetsSig.getAllDatasetNames()))
    eventCounterSig.normalizeMCToLuminosity(lumi)

    tdCount = dataset.TreeDraw("dummy", weight=weightBTagging)
    tdCountMET = tdCount.clone(weight=weight, selection=metCut)
    tdCountBTagging = tdCount.clone(selection=And(metCut, bTaggingCut))
    tdCountDeltaPhi160 = tdCount.clone(
        selection=And(metCut, bTaggingCut, deltaPhi160Cut))
    tdCountDeltaPhi130 = tdCount.clone(
        selection=And(metCut, bTaggingCut, deltaPhi130Cut))

    def addRow(name, td):
        tdEmb = td.clone(tree=analysisEmb + "/tree")
        tdSig = td.clone(tree=analysisSig + "/tree")
        eventCounterEmb.mainCounterAppendRow(name, tdEmb)
        eventCounterSig.getMainCounter().appendRow(name, tdSig)

    addRow("JetsForEffs", tdCount.clone(weight=weight))
    addRow("METForEffs", tdCountMET)
    addRow("BTagging (SF)", tdCountBTagging)
    addRow("DeltaPhi < 160", tdCountDeltaPhi160)
    addRow("BTagging (SF) again", tdCountBTagging)
    addRow("DeltaPhi < 130", tdCountDeltaPhi130)

    #effFormat = counter.TableFormatText(counter.CellFormatText(valueFormat='%.4f'))
    #effFormat = counter.TableFormatConTeXtTABLE(counter.CellFormatTeX(valueFormat='%.4f'))
    effFormat = counter.TableFormatText(
        counter.CellFormatTeX(valueFormat='%.4f'))

    f = open("counters_%s.txt" % datasetName, "w")

    for function, cname in [
        (lambda c: c.getMainCounterTable(), "Main"),
        (lambda c: c.getSubCounterTable("TauIDPassedEvt::tauID_HPSTight"),
         "Tau")
    ]:
        tableEmb = function(eventCounterEmb)
        tableSig = function(eventCounterSig)

        table = counter.CounterTable()
        col = tableEmb.getColumn(name=datasetName)
        col.setName("Embedded")
        table.appendColumn(col)
        col = tableSig.getColumn(name=datasetName)
        col.setName("Normal")
        table.appendColumn(col)

        f.write("%s counters\n" % cname)
        f.write(table.format())
        f.write("\n")

        if cname == "Main":
            #map(lambda t: t.keepOnlyRows([
            table.keepOnlyRows([
                "All events",
                "Trigger and HLT_MET cut",
                "taus == 1",
                #"trigger scale factor",
                "electron veto",
                "muon veto",
                "MET",
                "njets",
                "btagging",
                "btagging scale factor",
                "JetsForEffs",
                "METForEffs",
                "BTagging (SF)",
                "DeltaPhi < 160",
                "BTagging (SF) again",
                "DeltaPhi < 130"
            ])  #, [tableEmb, tableSig])
        else:
            #map(lambda t: t.keepOnlyRows([
            table.keepOnlyRows([
                "AllTauCandidates",
                "DecayModeFinding",
                "TauJetPt",
                "TauJetEta",
                #"TauLdgTrackExists",
                "TauLdgTrackPtCut",
                "TauECALFiducialCutsCracksAndGap",
                "TauAgainstElectronCut",
                "TauAgainstMuonCut",
                #"EMFractionCut",
                "HPS",
                "TauOneProngCut",
                "TauRtauCut",
            ])  #, [tableEmb, tableSig])

        col = table.getColumn(name="Embedded")
        table.insertColumn(
            1, counter.efficiencyColumn(col.getName() + " eff", col))
        col = table.getColumn(name="Normal")
        table.appendColumn(
            counter.efficiencyColumn(col.getName() + " eff", col))

        f.write("%s counters\n" % cname)
        f.write(table.format(effFormat))
        f.write("\n\n")
    f.close()
Example #21
0
def main():
    dirEmbs = ["."] + [os.path.join("..", d) for d in tauEmbedding.dirEmbs[1:]]
#    dirEmbs = dirEmbs[:2]

    tauEmbedding.normalize=True
    tauEmbedding.era = "Run2011A"
 
    table = counter.CounterTable()
    for i in xrange(len(dirEmbs)):
        tmp = dirEmbs[:]
        del tmp[i]
        row = doCounters(tmp)
        row.setName("Removed embedding %d"%i)
        table.appendRow(row)

    arows = []
    arows.append(counter.meanRow(table))
    arows.append(counter.maxRow(table))
    arows.append(counter.minRow(table))
    arows.append(counter.subtractRow("Max-mean", arows[1], arows[0]))
    arows.append(counter.subtractRow("Mean-min", arows[0], arows[2]))
    for r in arows:
        table.appendRow(r)

    cellFormat = counter.TableFormatText(counter.CellFormatTeX(valueFormat='%.3f'))
    print "DeltaPhi < 160"
    print
    print table.format(cellFormat)
    print
    print

    # Format the table as in AN
    ftable = counter.CounterTable()
    def addRow(name):
        col = table.getColumn(name=name)

        minimum = col.getCount(name="Min")
        maximum = col.getCount(name="Max")

        # Maximum deviation from average
        dev1 = col.getCount(name="Max-mean")
        dev2 = col.getCount(name="Mean-min")
        if dev2.value() > dev1.value():
            dev1 = dev2

        dev1.divide(col.getCount(name="Mean"))
        dev1.multiply(dataset.Count(100))

        ftable.appendRow(counter.CounterRow(name,
                                            ["Minimum", "Maximum", "Largest deviation from average (%)"],
                                            [minimum, maximum, dev1]))

    addRow("Data")
    addRow("EWKMCsum")
    addRow("TTJets")
    addRow("WJets")
    addRow("DYJetsToLL")
    addRow("SingleTop")
    addRow("Diboson")

    cellFormat2 = counter.TableFormatLaTeX(counter.CellFormatTeX(valueFormat="%.4f", withPrecision=2))
    cellFormat2.setColumnFormat(counter.CellFormatTeX(valueFormat="%.1f", valueOnly=True), index=2)
    print ftable.format(cellFormat2)
def doTauCounters(datasetsEmb,
                  datasetsSig,
                  datasetName,
                  ntupleCacheEmb,
                  ntupleCacheSig,
                  normalizeEmb=True):
    lumi = datasetsEmb.getLuminosity()

    # Take unweighted counters for embedded, to get a handle on the muon isolation efficiency
    eventCounterEmb = tauEmbedding.EventCounterMany(
        datasetsEmb,
        counters="/" + tauAnalysisEmb + "Counters",
        normalize=normalizeEmb)
    eventCounterSig = counter.EventCounter(datasetsSig,
                                           counters="/" + tauAnalysisEmb +
                                           "Counters")

    def isNotThis(name):
        return name != datasetName

    eventCounterEmb.removeColumns(
        filter(isNotThis, datasetsEmb.getAllDatasetNames()))
    eventCounterSig.removeColumns(
        filter(isNotThis, datasetsSig.getAllDatasetNames()))

    eventCounterEmb.mainCounterAppendRows(
        ntupleCacheEmb.histogram("counters/weighted/counter"))
    eventCounterSig.getMainCounter().appendRows(
        ntupleCacheSig.histogram("counters/weighted/counter"))

    eventCounterSig.normalizeMCToLuminosity(lumi)

    table = counter.CounterTable()
    col = eventCounterEmb.getMainCounterTable().getColumn(name=datasetName)
    col.setName("Embedded")
    table.appendColumn(col)
    col = eventCounterSig.getMainCounterTable().getColumn(name=datasetName)
    col.setName("Normal")
    table.appendColumn(col)

    lastCountEmb = table.getCount(colName="Embedded",
                                  irow=table.getNrows() - 1)
    lastCountNormal = table.getCount(colName="Normal",
                                     irow=table.getNrows() - 1)

    postfix = ""
    if not normalizeEmb:
        postfix = "_notEmbNormalized"

    effFormat = counter.TableFormatLaTeX(
        counter.CellFormatTeX(valueFormat="%.4f", withPrecision=2))
    countFormat = counter.TableFormatText(
        counter.CellFormatText(valueFormat="%.4f"),
        #columnSeparator="  ;"
    )

    fname = "counters_tau_" + datasetName + postfix + ".txt"
    f = open(fname, "w")
    f.write(table.format(countFormat))
    f.write("\n")

    try:
        ratio = lastCountNormal.clone()
        ratio.divide(lastCountEmb)
        f.write("Normal/embedded = %.4f +- %.4f\n\n" %
                (ratio.value(), ratio.uncertainty()))
    except ZeroDivisionError:
        pass

    f.close()
    print "Printed tau counters to", fname

    if not normalizeEmb:
        return

    tableEff = counter.CounterTable()
    tableEff.appendColumn(
        counter.efficiencyColumn("Embedded eff",
                                 table.getColumn(name="Embedded")))
    tableEff.appendColumn(
        counter.efficiencyColumn("Normal eff", table.getColumn(name="Normal")))

    embeddingMuonIsolationEff = tableEff.getCount(
        rowName="tauEmbeddingMuonsCount", colName="Embedded eff")
    embeddingTauIsolationEff = tableEff.getCount(rowName="Isolation",
                                                 colName="Embedded eff")
    embeddingTotalIsolationEff = embeddingMuonIsolationEff.clone()
    embeddingTotalIsolationEff.multiply(embeddingTauIsolationEff)

    # Remove unnecessary rows
    rowNames = [
        #        "All events",
        "Decay mode finding",
        "Eta cut",
        "Pt cut",
        "Leading track pt",
        "Against electron",
        "Against muon",
        "Isolation",
        "One prong",
        "Rtau",
    ]
    tableEff.keepOnlyRows(rowNames)
    rowIndex = tableEff.getRowNames().index("Isolation")
    tableEff.insertRow(
        rowIndex,
        counter.CounterRow("Mu isolation (emb)",
                           ["Embedded eff", "Normal eff"],
                           [embeddingMuonIsolationEff, None]))
    tableEff.insertRow(
        rowIndex + 1,
        counter.CounterRow("Tau isolation (emb)",
                           ["Embedded eff", "Normal eff"],
                           [embeddingTauIsolationEff, None]))
    tableEff.setCount2(embeddingTotalIsolationEff,
                       rowName="Isolation",
                       colName="Embedded eff")
    #tableEff.setCount2(None, rowName="pT > 15", colName="Normal eff")

    #print table.format(effFormat)
    fname = "counters_tau_" + datasetName + "_eff.txt"
    f = open(fname, "w")
    f.write(tableEff.format(effFormat))
    f.write("\n")
    f.close()
    print "Printed tau efficiencies to", fname
def processDirectory(dset, srcDirName, dstDir, scaleBy, dsetSubtractFrom):
    # Get directories, recurse to them
    dirs = dset.getDirectoryContent(srcDirName,
                                    lambda o: isinstance(o, ROOT.TDirectory))
    dirs = filter(lambda n: n != "configInfo", dirs)

    for d in dirs:
        newDir = dstDir.mkdir(d)
        processDirectory(dset, os.path.join(srcDirName, d), newDir, scaleBy,
                         dsetSubtractFrom)

    # Then process histograms
    histos = dset.getDirectoryContent(srcDirName,
                                      lambda o: isinstance(o, ROOT.TH1))
    dstDir.cd()
    shouldScale = True
    if srcDirName == "counters":
        # Don't touch unweighted counters
        shouldScale = False
    isCounter = srcDirName in ["counters", "counters/weighted"]
    for hname in histos:
        #        drh = dset.getDatasetRootHisto(os.path.join(srcDirName, hname))
        #        hnew = drh.getHistogram() # TH1
        hnew = dset.getAverageHistogram(
            os.path.join(srcDirName, hname),
            normalizeMCByCrossSection=(dsetSubtractFrom is not None))
        hnew.SetName(hname)
        if shouldScale and hname not in "SplittedBinInfo":
            tauEmbedding.scaleTauBRNormalization(hnew)
            if scaleBy is not None:
                hnew.Scale(scaleBy)
        if dsetSubtractFrom is not None:
            drh = dsetSubtractFrom.getDatasetRootHisto(
                os.path.join(srcDirName, hname))
            if dsetSubtractFrom.isMC():
                drh.normalizeByCrossSection()
            hsub = drh.getHistogram()
            if not isCounter:
                # hnew = hsub-hnew
                hnew.Scale(-1)
                hnew.Add(hsub)
            else:
                cnew = counter.HistoCounter("Emb", hnew)
                csub = counter.HistoCounter("Norm", hsub)

                table = counter.CounterTable()
                table.appendColumn(cnew)
                table.appendColumn(csub)
                cres = counter.subtractColumn("Result",
                                              table.getColumn(name="Norm"),
                                              table.getColumn(name="Emb"))

                hnew2 = dataset._counterToHisto(hnew.GetName(),
                                                cres.getPairList())
                hnew2.SetTitle(hnew.GetTitle())
                hnew = hnew2
                if srcDirName == "counters" and hname == "counter" and hnew.GetBinContent(
                        1) < 0:
                    hnew.SetBinContent(1, 0)

        hnew.SetDirectory(dstDir)

        # # set the first count in main counters to 0 if it is negative,
        # # this is to circumvent certain assumptions made elsewhere in
        # # the code
        # if hname == "counter" and (srcDirName == "counters" or srcDirName == "counters/weighted") and hnew.GetBinContent(1) < 0:
        #     hnew.SetBinContent(1, 0)
        #     hnew.SetBinError(1, 0)

        hnew.Write()
        #        ROOT.gDirectory.Delete(hname)
        hnew.Delete()
def doCounters(datasetsEmb2, datasetsSig2, datasetName):
    lumi = datasetsEmb2.getDataset("Data").getLuminosity()

    datasetsEmb = datasetsEmb2.deepCopy()
    datasetsSig = datasetsSig2.deepCopy()

    datasetsEmb.remove(
        filter(lambda name: name != datasetName,
               datasetsEmb.getAllDatasetNames()))
    datasetsSig.remove(
        filter(lambda name: name != datasetName,
               datasetsSig.getAllDatasetNames()))
    # Counters
    eventCounterEmb = counter.EventCounter(datasetsEmb,
                                           counters=analysisEmb + "Counters")
    eventCounterSig = counter.EventCounter(datasetsSig,
                                           counters=analysisSig + "Counters")
    eventCounterEmb.normalizeMCToLuminosity(lumi)
    eventCounterSig.normalizeMCToLuminosity(lumi)

    #effFormat = counter.TableFormatText(counter.CellFormatText(valueFormat='%.4f'))
    #effFormat = counter.TableFormatConTeXtTABLE(counter.CellFormatTeX(valueFormat='%.4f'))
    effFormat = counter.TableFormatText(
        counter.CellFormatTeX(valueFormat='%.4f'))

    counterEmb = eventCounterEmb.getMainCounter()
    counterSig = eventCounterSig.getMainCounter()
    treeDraw = dataset.TreeDraw("dummy")
    tdEmb = treeDraw.clone(tree=analysisEmb + "/tree")
    tdSig = treeDraw.clone(tree=analysisSig + "/tree")
    selectionsCumulative = []
    tauSelectionsCumulative = []

    def sel(name, selection):
        selectionsCumulative.append(selection)
        sel = selectionsCumulative[:]
        if len(tauSelectionsCumulative) > 0:
            sel += ["Sum$(%s) >= 1" % "&&".join(tauSelectionsCumulative)]
        sel = "&&".join(sel)
        counterEmb.appendRow(name, tdEmb.clone(selection=sel))
        counterSig.appendRow(name, tdSig.clone(selection=sel))

    def tauSel(name, selection):
        tauSelectionsCumulative.append(selection)
        sel = selectionsCumulative[:]
        if len(tauSelectionsCumulative) > 0:
            sel += ["Sum$(%s) >= 1" % "&&".join(tauSelectionsCumulative)]
        sel = "&&".join(sel)
        counterEmb.appendRow(name, tdEmb.clone(selection=sel))
        counterSig.appendRow(name, tdSig.clone(selection=sel))


#    sel("Primary vertex", tauPlot.pvSelection)

    sel(">= 1 tau candidate", "Length$(taus_p4) >= 1")
    tauSel("Decay mode finding", tauPlot.decayModeFinding)
    tauSel("pT > 15", "(taus_p4.Pt() > 15)")
    tauSel("pT > 40", tauPlot.tauPtCut)
    tauSel("eta < 2.1", tauPlot.tauEtaCut)
    tauSel("leading track pT > 20", tauPlot.tauLeadPt)
    tauSel("ECAL fiducial", tauPlot.ecalFiducial)
    tauSel("againstElectron", tauPlot.electronRejection)
    tauSel("againstMuon", tauPlot.muonRejection)
    tauSel("isolation", tauPlot.tightIsolation)
    tauSel("oneProng", tauPlot.oneProng)
    tauSel("Rtau", tauPlot.rtau)
    sel("3 jets", tauPlot.jetEventSelection)
    sel("MET", tauPlot.metSelection)
    sel("btag", tauPlot.btagEventSelection)

    table = counter.CounterTable()
    col = counterEmb.getTable().getColumn(name=datasetName)
    col.setName("Embedded")
    table.appendColumn(col)
    col = counterSig.getTable().getColumn(name=datasetName)
    col.setName("Normal")
    table.appendColumn(col)

    col = table.getColumn(name="Embedded")
    table.insertColumn(1, counter.efficiencyColumn(col.getName() + " eff",
                                                   col))
    col = table.getColumn(name="Normal")
    table.appendColumn(counter.efficiencyColumn(col.getName() + " eff", col))

    print "%s counters" % datasetName
    print table.format(effFormat)
Example #25
0
def doPlots(datasetsEmb, datasetsSig, datasetName):
    lumi = datasetsEmb.getDataset("Data").getLuminosity()

    plots._legendLabels[datasetName+"_Embedded"] = "Embedded "+plots._legendLabels[datasetName]
    plots._legendLabels[datasetName+"_Normal"]   = "Normal "+plots._legendLabels[datasetName]

    def createPlot(name):
        name2Emb = name
        name2Sig = name
        if isinstance(name, basestring):
            name2Emb = analysisEmb+"/"+name
            name2Sig = analysisSig+"/"+name
        else:
            name2Emb = name.clone(tree=analysisEmb+"/tree")
            name2Sig = name.clone(tree=analysisSig+"/tree")
        emb = datasetsEmb.getDataset(datasetName).getDatasetRootHisto(name2Emb)
        emb.setName("Embedded")
        sig = datasetsSig.getDataset(datasetName).getDatasetRootHisto(name2Sig)
        sig.setName("Normal")
        p = plots.ComparisonPlot(emb, sig)
        p.histoMgr.normalizeMCToLuminosity(lumi)
        p.histoMgr.setHistoLegendLabelMany({
                "Embedded": "Embedded "+plots._legendLabels[datasetName],
                "Normal":   "Normal "+plots._legendLabels[datasetName],
                })
        p.histoMgr.forEachHisto(styles.generator())

        return p

    opts2 = {"ymin": 0, "ymax": 2}
    def drawControlPlot(path, xlabel, **kwargs):
        drawPlot(createPlot("ControlPlots/"+path), "mcembsig_"+datasetName+"_"+path, xlabel, opts2=opts2, **kwargs)

    # Control plots
    drawControlPlot("SelectedTau_pT_AfterStandardSelections", "#tau-jet p_{T} (GeV/c)", opts={"xmax": 250}, rebin=2, cutBox={"cutValue": 40, "greaterThan": 40})
    drawControlPlot("SelectedTau_eta_AfterStandardSelections", "#tau-jet #eta", opts={"xmin": -2.2, "xmax": 2.2}, ylabel="Events / %.1f", rebin=4, log=False, moveLegend={"dy":-0.6, "dx":-0.2})
    drawControlPlot("SelectedTau_phi_AfterStandardSelections", "#tau-jet #phi", rebin=10, ylabel="Events / %.2f", log=False)
    drawControlPlot("SelectedTau_LeadingTrackPt_AfterStandardSelections", "#tau-jet ldg. charged particle p_{T} (GeV/c)", opts={"xmax": 300}, rebin=2, cutBox={"cutValue": 20, "greaterThan": True})
    drawControlPlot("SelectedTau_Rtau_AfterStandardSelections", "R_{#tau} = p^{ldg. charged particle}/p^{#tau jet}", opts={"xmin": 0.65, "xmax": 1.05, "ymin": 1e-1, "ymaxfactor": 5}, rebin=5, ylabel="Events / %.2f", moveLegend={"dx":-0.3}, cutBox={"cutValue":0.7, "greaterThan":True})
    drawControlPlot("SelectedTau_p_AfterStandardSelections", "#tau-jet p (GeV/c)", rebin=2)
    drawControlPlot("SelectedTau_LeadingTrackP_AfterStandardSelections", "#tau-jet ldg. charged particle p (GeV/c)", rebin=2)
    #drawControlPlot("IdentifiedElectronPt_AfterStandardSelections", "Electron p_{T} (GeV/c)")
    #drawControlPlot("IdentifiedMuonPt_AfterStandardSelections", "Muon p_{T} (GeV/c)")
    drawControlPlot("Njets_AfterStandardSelections", "Number of jets", ylabel="Events")
    drawControlPlot("MET", "Uncorredted PF E_{T}^{miss} (GeV)", rebin=5, opts={"xmax": 400}, cutLine=50)
    drawControlPlot("NBjets", "Number of selected b jets", opts={"xmax": 6}, ylabel="Events", moveLegend={"dx":-0.3, "dy":-0.5}, cutLine=1)

    treeDraw = dataset.TreeDraw("dummy", weight="weightPileup")

    tdDeltaPhi = treeDraw.clone(varexp="acos( (tau_p4.Px()*met_p4.Px()+tau_p4.Py()*met_p4.Py())/(tau_p4.Pt()*met_p4.Et()) )*57.3 >>tmp(18, 0, 180)")
    tdMt = treeDraw.clone(varexp="sqrt(2 * tau_p4.Pt() * met_p4.Et() * (1-cos(tau_p4.Phi()-met_p4.Phi()))) >>tmp(20,0,200)")

    # DeltaPhi
    xlabel = "#Delta#phi(#tau, MET) (^{#circ})"
    def customDeltaPhi(h):
        yaxis = h.getFrame().GetYaxis()
        yaxis.SetTitleOffset(0.8*yaxis.GetTitleOffset())
    drawPlot(createPlot(tdDeltaPhi.clone()), "mcembsig_"+datasetName+"_deltaPhi_1AfterTauID", xlabel, log=False, opts2=opts2, ylabel="Events / %.0f^{#circ}", function=customDeltaPhi, moveLegend={"dx":-0.22}, cutLine=[130, 160])

    # mT
    xlabel = "m_{T} (#tau jet, E_{T}^{miss}) (GeV/c^{2})"
    drawPlot(createPlot(tdMt.clone()), "mcembsig_"+datasetName+"_transverseMass_1AfterTauID", xlabel, opts2=opts2, ylabel="Events / %.0f GeV/c^{2}")


    # After all cuts
    metCut = "(met_p4.Et() > 50)"
    bTaggingCut = "passedBTagging"
    selection = "&&".join([metCut, bTaggingCut])
    drawPlot(createPlot(treeDraw.clone(varexp="tau_p4.Pt() >>tmp(20,0,200)", selection=selection)), "mcembsig_"+datasetName+"_selectedTauPt_3AfterBTagging", "#tau-jet p_{T} (GeV/c)", opts2={"ymin": 0, "ymax": 2})
    drawPlot(createPlot(treeDraw.clone(varexp="met_p4.Pt() >>tmp(16,0,400)", selection=selection)), "mcembsig_"+datasetName+"_MET_3AfterBTagging", "E_{T}^{miss} (GeV)", ylabel="Events / %.0f GeV", opts2={"ymin": 0, "ymax": 2})
    drawPlot(createPlot(tdMt.clone(selection=selection)), "mcembsig_"+datasetName+"_transverseMass_3AfterBTagging", xlabel, opts2={"ymin": 0, "ymax": 2}, ylabel="Events / %.0f GeV/c^{2}")
                        


    eventCounterEmb = counter.EventCounter(datasetsEmb, counters=analysisEmb+"Counters")
    eventCounterSig = counter.EventCounter(datasetsSig, counters=analysisSig+"Counters")
    eventCounterEmb.normalizeMCToLuminosity(lumi)
    eventCounterSig.normalizeMCToLuminosity(lumi)

    #effFormat = counter.TableFormatText(counter.CellFormatText(valueFormat='%.4f'))
    #effFormat = counter.TableFormatConTeXtTABLE(counter.CellFormatTeX(valueFormat='%.4f'))
    effFormat = counter.TableFormatText(counter.CellFormatTeX(valueFormat='%.4f'))

    for function, cname in [
        (lambda c: c.getMainCounterTable(), "Main"),
        (lambda c: c.getSubCounterTable("TauIDPassedEvt::tauID_HPSTight"), "Tau")
        ]:
        tableEmb = function(eventCounterEmb)
        tableSig = function(eventCounterSig)

        table = counter.CounterTable()
        col = tableEmb.getColumn(name=datasetName)
        col.setName("Embedded")
        table.appendColumn(col)
        col = tableSig.getColumn(name=datasetName)
        col.setName("Normal")
        table.appendColumn(col)

        print "%s counters" % cname
        print table.format()

        if cname == "Main":
            #map(lambda t: t.keepOnlyRows([
            table.keepOnlyRows([
                        "All events",
                        "Trigger and HLT_MET cut",
                        "taus == 1",
                        #"trigger scale factor",
                        "electron veto",
                        "muon veto",
                        "MET",
                        "njets",
                        "btagging",
                        "btagging scale factor",
                        "JetsForEffs",
                        "METForEffs",
                        "BTagging",
                        "DeltaPhi < 160",
                        "DeltaPhi < 130"
                        ])#, [tableEmb, tableSig])
        else:
            #map(lambda t: t.keepOnlyRows([
            table.keepOnlyRows([
                        "AllTauCandidates",
                        "DecayModeFinding",
                        "TauJetPt",
                        "TauJetEta",
                        #"TauLdgTrackExists",
                        "TauLdgTrackPtCut",
                        "TauECALFiducialCutsCracksAndGap",
                        "TauAgainstElectronCut",
                        "TauAgainstMuonCut",
                        #"EMFractionCut",
                        "HPS",
                        "TauOneProngCut",
                        "TauRtauCut",
                        ])#, [tableEmb, tableSig])

        col = table.getColumn(name="Embedded")
        table.insertColumn(1, counter.efficiencyColumn(col.getName()+" eff", col))
        col = table.getColumn(name="Normal")
        table.appendColumn(counter.efficiencyColumn(col.getName()+" eff", col))

        print "%s counters" % cname
        print table.format(effFormat)
def printCountersOld(datasets, datasetsMC, analysisPrefix, normalizeToLumi=None):
    print "============================================================"
    print "Dataset info: "
    datasets.printInfo()

    eventCounter = makeEventCounter(datasets)
    if normalizeToLumi == None:
        eventCounter.normalizeMCByLuminosity()
    else:
        eventCounter.normalizeMCToLuminosity(normalizeToLumi)
    
    mainCounterMap = {
        "allEvents": "All events",
        "passedTrigger": "Triggered",
        "passedScrapingVeto": "Scaping veto",
        "passedHBHENoiseFilter": "HBHE noise filter",
        "passedPrimaryVertexFilter": "PV filter",
        analysisPrefix+"countAll": "All events",
        analysisPrefix+"countTrigger": "Triggered",
        analysisPrefix+"countPrimaryVertex": "Good primary vertex",
        analysisPrefix+"countGlobalTrackerMuon": "Global \& tracker muon",
        analysisPrefix+"countMuonKin": "Muon \pT, $\eta$ cuts",
        analysisPrefix+"countMuonQuality": "Muon quality cuts",
        analysisPrefix+"countMuonIP": "Muon transverse IP",
        analysisPrefix+"countMuonVertexDiff": "Muon dz",
        analysisPrefix+"countJetMultiplicityCut": "Njets",
        analysisPrefix+"countMETCut": "MET cut"
        }
    
    latexFormat = counter.TableFormatLaTeX(counter.CellFormatTeX(valueFormat="%.0f"))
    latexFormat2 = counter.TableFormatLaTeX(counter.CellFormatTeX(valueFormat="%.1f"))
    #latexFormat = counter.TableFormatConTeXtTABLE(counter.CellFormatTeX(valueFormat="%.0f", valueOnly=True))
    
    print "============================================================"
    print "Main counter (%s)" % eventCounter.getNormalizationString()
    #eventCounter.getMainCounter().printCounter()
    table = eventCounter.getMainCounterTable()

#    addSumColumn(table)
#    addTtwFractionColumn(table)
#    addPurityColumn(table)
#    addDyFractionColumn(table)
#    addQcdFractionColumn(table)

#    reorderCounterTable(table)

#    print table.format()
    print table.format(latexFormat)
#    print table.format(latexFormat2)
    return
    
    #print "------------------------------------------------------------"
    #print counterEfficiency(eventCounter.getMainCounterTable()).format(FloatDecimalFormat(4))
    
    # mainTable = eventCounter.getMainCounterTable()
    # effTable = counterEfficiency(mainTable)
    # for icol in xrange(0, effTable.getNcolumns()):
    #     column = effTable.getColumn(icol)
    #     column.setName(column.getName()+" eff")
    #     mainTable.insertColumn(icol*2+1, column)
    
    # print "------------------------------------------------------------"
    # printCounter(mainTable, FloatDecimalFormat(4))
    
    
    eventCounter = makeEventCounter(datasetsMC)
    print "============================================================"
    print "Main counter (%s)" % eventCounter.getNormalizationString()
    print eventCounter.getMainCounterTable().format(counter.TableFormatText(counter.CellFormatText(valueOnly=True, valueFormat="%.0f")))
    
    
    # Make the Data column entries comparable to the MC
    table.renameRows(mainCounterMap)
    dataCol = table.getColumn(0)
    table.removeColumn(0)
    dataCol.removeRow(2) # scraping
    dataCol.removeRow(2) # HBHE
    dataCol.removeRow(2) # pv filter
    dataCol.removeRow(2) # all events
    dataCol.removeRow(2) # triggered
    table.insertColumn(0, dataCol)
    addDataMcRatioColumn(table)
    
    # LaTeX tables for note
    latexFormat.setColumnFormat(counter.CellFormatTeX(valueFormat="%.3f"), name="Data/MCsum")
    latexFormat.setColumnFormat(counter.CellFormatTeX(valueFormat="%.1f"), name="SingleTop")
    
    tableDataMc = counter.CounterTable()
    tableDataMc.appendColumn(table.getColumn(name="Data"))
    tableDataMc.appendColumn(table.getColumn(name="MCsum"))
    tableDataMc.appendColumn(table.getColumn(name="Data/MCsum"))
    print tableDataMc.format(latexFormat)
    
    tableMc = counter.CounterTable()
    #tableMc.appendColumn(table.getColumn(name="MCsum"))
    for mcName in datasets.getMCDatasetNames():
        tableMc.appendColumn(table.getColumn(name=mcName))
    print tableMc.format(latexFormat)
    
    tableRatio = counter.CounterTable()
    for cname in ["TTJets/(TTJets+WJets)", "Purity", "QCD/MCsum", "DY/MCsum"]:
        tableRatio.appendColumn(table.getColumn(name=cname))
        latexFormat.setColumnFormat(counter.CellFormatTeX(valueFormat="%.2f", valueOnly=True), name=cname)
    print tableRatio.format(latexFormat)
def doCounters(datasetsEmb, datasetsSig, datasetName):
    lumi = datasetsEmb.getLuminosity()
    treeDraw = dataset.TreeDraw("dummy", weight=weight)

    # Counters
    eventCounterEmb = tauEmbedding.EventCounterMany(datasetsEmb, counters=analysisEmb+"Counters")
    eventCounterSig = counter.EventCounter(datasetsSig, counters=analysisSig+"Counters")

    def isNotThis(name):
        return name != datasetName

    eventCounterEmb.removeColumns(filter(isNotThis, datasetsEmb.getAllDatasetNames()))
    eventCounterSig.removeColumns(filter(isNotThis, datasetsSig.getAllDatasetNames()))
    eventCounterSig.normalizeMCToLuminosity(lumi)

    #effFormat = counter.TableFormatText(counter.CellFormatText(valueFormat='%.4f'))
    #effFormat = counter.TableFormatConTeXtTABLE(counter.CellFormatTeX(valueFormat='%.4f'))
    effFormat = counter.TableFormatText(counter.CellFormatTeX(valueFormat='%.4f'))

    tdEmb = treeDraw.clone(tree=analysisEmb+"/tree")
    tdSig = treeDraw.clone(tree=analysisSig+"/tree")
    selectionsCumulative = []
    tauSelectionsCumulative = []
    def sel(name, selection):
        selectionsCumulative.append(selection)
        sel = selectionsCumulative[:]
        if len(tauSelectionsCumulative) > 0:
            sel += ["Sum$(%s) >= 1" % "&&".join(tauSelectionsCumulative)]
        sel = "&&".join(sel)
        eventCounterEmb.mainCounterAppendRow(name, tdEmb.clone(selection=sel))
        eventCounterSig.getMainCounter().appendRow(name, tdSig.clone(selection=sel))
    def tauSel(name, selection):
        tauSelectionsCumulative.append(selection)
        sel = selectionsCumulative[:]
        if len(tauSelectionsCumulative) > 0:
            sel += ["Sum$(%s) >= 1" % "&&".join(tauSelectionsCumulative)]
        sel = "&&".join(sel)
        eventCounterEmb.mainCounterAppendRow(name, tdEmb.clone(selection=sel))
        eventCounterSig.getMainCounter().appendRow(name, tdSig.clone(selection=sel))

#    sel("Primary vertex", tauEmbedding.tauNtuple.pvSelection)
    sel(">= 1 tau candidate", "Length$(taus_p4) >= 1")
    tauSel("Decay mode finding", tauEmbedding.tauNtuple.decayModeFinding)
    tauSel("pT > 15", tauEmbedding.tauNtuple.tauPtPreCut)
    tauSel("pT > 40", tauEmbedding.tauNtuple.tauPtCut)
    tauSel("eta < 2.1", tauEmbedding.tauNtuple.tauEtaCut)
    tauSel("leading track pT > 20", tauEmbedding.tauNtuple.tauLeadPt)
    tauSel("ECAL fiducial", tauEmbedding.tauNtuple.ecalFiducial)
    tauSel("againstElectron", tauEmbedding.tauNtuple.electronRejection)
    tauSel("againstMuon", tauEmbedding.tauNtuple.muonRejection)
    tauSel("isolation", tauEmbedding.tauNtuple.tightIsolation)
    tauSel("oneProng", tauEmbedding.tauNtuple.oneProng)
    tauSel("Rtau", tauEmbedding.tauNtuple.rtau)
    sel("3 jets", tauEmbedding.tauNtuple.jetEventSelection)
    sel("MET", tauEmbedding.tauNtuple.metSelection)
    sel("btag", tauEmbedding.tauNtuple.btagEventSelection)

    table = counter.CounterTable()
    col = eventCounterEmb.getMainCounterTable().getColumn(name=datasetName)
    col.setName("Embedded")
    table.appendColumn(col)
    col = eventCounterSig.getMainCounterTable().getColumn(name=datasetName)
    col.setName("Normal")
    table.appendColumn(col)

    col = table.getColumn(name="Embedded")
    table.insertColumn(1, counter.efficiencyColumn(col.getName()+" eff", col))
    col = table.getColumn(name="Normal")
    table.appendColumn(counter.efficiencyColumn(col.getName()+" eff", col))

    print "%s counters" % datasetName
    print table.format(effFormat)

    f = open("counters_"+datasetName+".txt", "w")
    f.write(table.format(effFormat))
    f.write("\n")
    f.close()
def doCounters(datasetsEmb, datasetsSig, datasetName, normalizeEmb=True):
    lumi = datasetsEmb.getLuminosity()

    # Counters
    eventCounterEmb = tauEmbedding.EventCounterMany(
        datasetsEmb,
        normalize=normalizeEmb)  #, counters=analysisEmb+"/counters")
    eventCounterSig = counter.EventCounter(datasetsSig)

    def isNotThis(name):
        return name != datasetName

    eventCounterEmb.removeColumns(
        filter(isNotThis, datasetsEmb.getAllDatasetNames()))
    eventCounterSig.removeColumns(
        filter(isNotThis, datasetsSig.getAllDatasetNames()))
    eventCounterSig.normalizeMCToLuminosity(lumi)

    tdCount = dataset.TreeDraw("dummy",
                               weight=tauEmbedding.signalNtuple.weightBTagging)
    tdCountMET = tdCount.clone(weight=tauEmbedding.signalNtuple.weight,
                               selection=tauEmbedding.signalNtuple.metCut)
    tdCountBTagging = tdCount.clone(
        selection=And(tauEmbedding.signalNtuple.metCut,
                      tauEmbedding.signalNtuple.bTaggingCut))
    tdCountDeltaPhi160 = tdCount.clone(selection=And(
        tauEmbedding.signalNtuple.metCut, tauEmbedding.signalNtuple.
        bTaggingCut, tauEmbedding.signalNtuple.deltaPhi160Cut))
    tdCountDeltaPhi130 = tdCount.clone(selection=And(
        tauEmbedding.signalNtuple.metCut, tauEmbedding.signalNtuple.
        bTaggingCut, tauEmbedding.signalNtuple.deltaPhi130Cut))

    def addRow(name, td):
        tdEmb = td.clone(tree=analysisEmb + "/tree")
        tdSig = td.clone(tree=analysisSig + "/tree")
        eventCounterEmb.mainCounterAppendRow(name, tdEmb)
        eventCounterSig.getMainCounter().appendRow(name, tdSig)

    # addRow("JetsForEffs", tdCount.clone(weight=tauEmbedding.signalNtuple.weight))
    # addRow("METForEffs", tdCountMET)
    # addRow("BTagging (SF)", tdCountBTagging)
    # addRow("DeltaPhi < 160", tdCountDeltaPhi160)
    # addRow("BTagging (SF) again", tdCountBTagging)
    # addRow("DeltaPhi < 130", tdCountDeltaPhi130)

    table = counter.CounterTable()
    col = eventCounterEmb.getMainCounterTable().getColumn(name=datasetName)
    col.setName("Embedded")
    table.appendColumn(col)
    col = eventCounterSig.getMainCounterTable().getColumn(name=datasetName)
    col.setName("Normal")
    table.appendColumn(col)

    tableTau = counter.CounterTable()
    tmp = "TauIDPassedEvt::TauSelection_HPS"
    col = eventCounterEmb.getSubCounterTable(tmp).getColumn(name=datasetName)
    col.setName("Embedded")
    tableTau.appendColumn(col)
    col = eventCounterSig.getSubCounterTable(tmp).getColumn(name=datasetName)
    col.setName("Normal")
    tableTau.appendColumn(col)

    postfix = ""
    if not normalizeEmb:
        postfix = "_notEmbNormalized"

    fname = "counters_selections_%s%s.txt" % (datasetName, postfix)
    f = open(fname, "w")
    f.write(table.format())
    f.write("\n")
    f.write(tableTau.format())
    f.close()
    print "Printed selection counters to", fname

    if not normalizeEmb:
        return

    # Calculate efficiencies
    table.keepOnlyRows([
        "njets", "MET", "btagging", "btagging scale factor",
        "DeltaPhi(Tau,MET) upper limit"
    ])
    # btag SF efficiency w.r.t. MET
    row = table.getRow(name="MET")
    row.setName("METForEff")
    table.insertRow(3, row)

    tableEff = counter.CounterTable()
    tableEff.appendColumn(
        counter.efficiencyColumn("Embedded eff",
                                 table.getColumn(name="Embedded")))
    tableEff.appendColumn(
        counter.efficiencyColumn("Normal eff", table.getColumn(name="Normal")))
    tableEff.removeRow(name="METForEff")

    effFormat = counter.TableFormatText(
        counter.CellFormatTeX(valueFormat='%.4f', withPrecision=2))

    #    print table.format(effFormat)

    fname = "counters_selections_%s_eff.txt" % datasetName
    f = open(fname, "w")
    f.write(tableEff.format(effFormat))
    f.close()
    print "Printed selection efficiencies to", fname
Example #29
0
def doCounters(datasetsEmb):

    # All embedded events
    eventCounterAll = counter.EventCounter(
        datasetsEmb.getFirstDatasetManager(),
        counters=analysisEmbAll + counters)
    eventCounterAll.normalizeMCByLuminosity()
    tableAll = eventCounterAll.getMainCounterTable()
    tableAll.keepOnlyRows([
        "All events",
    ])
    tableAll.renameRows({"All events": "All embedded events"})

    # Mu eff + Wtau mu
    eventCounterMuEff = counter.EventCounter(
        datasetsEmb.getFirstDatasetManager(),
        counters=analysisEmbNoTauEff + counters)
    eventCounterMuEff.normalizeMCByLuminosity()
    tauEmbedding.scaleNormalization(eventCounterMuEff)
    tableMuEff = eventCounterMuEff.getMainCounterTable()
    tableMuEff.keepOnlyRows(["All events"])
    tableMuEff.renameRows({"All events": "mu eff + Wtaumu"})

    # Event counts after embedding normalization, before tau trigger eff,
    # switch to calculate uncertainties of the mean of 10 trials
    eventCounterNoTauEff = tauEmbedding.EventCounterMany(
        datasetsEmb, counters=analysisEmbNoTauEff + counters)
    tableNoTauEff = eventCounterNoTauEff.getMainCounterTable()
    tableNoTauEff.keepOnlyRows([
        "Trigger and HLT_MET cut",
        "njets",
    ])
    tableNoTauEff.renameRows({
        "Trigger and HLT_MET cut": "caloMET > 60",
        "njets": "tau ID"
    })

    # Event counts after tau trigger eff
    eventCounter = tauEmbedding.EventCounterMany(datasetsEmb,
                                                 counters=analysisEmb +
                                                 counters)
    table = eventCounter.getMainCounterTable()
    table.keepOnlyRows([
        "njets", "MET", "btagging scale factor", "deltaPhiTauMET<160",
        "deltaPhiTauMET<130"
    ])
    table.renameRows({
        "njets": "Tau trigger efficiency",
        "btagging scale factor": "b tagging"
    })

    # Combine the rows to one table
    result = counter.CounterTable()
    for tbl in [tableAll, tableMuEff, tableNoTauEff, table]:
        for iRow in xrange(tbl.getNrows()):
            result.appendRow(tbl.getRow(index=iRow))

    addMcSum(result)
    cellFormat = counter.TableFormatText(
        counter.CellFormatTeX(valueFormat='%.4f', withPrecision=2))

    print result.format(cellFormat)
def process(datasets, datasetName, postfix, countName):
    # Handle counter
    eventCounter = counter.EventCounter(datasets)
    mainTable = eventCounter.getMainCounterTable()

    neventsCount = mainTable.getCount(rowName=countName, colName=datasetName)
    nevents = neventsCount.value()
#    column = eventCounter.getSubCounterTable("Classification"+postfix).getColumn(name=datasetName)
#
#    columnFraction = column.clone()
#    columnFraction.setName("Fraction (%)")
#
#    # Consistency check, and do the division
#    tmp = 0
#    for irow in xrange(column.getNrows()):
#        tmp += column.getCount(irow).value()
#
#        frac = dataset.divideBinomial(columnFraction.getCount(irow), neventsCount)
#        frac.multiply(dataset.Count(100))
#        columnFraction.setCount(irow, frac)
#
#    if int(nevents) != int(tmp):
#        raise Exception("Consistency check failed: nevents = %d, tmp = %d" % (int(nevents), int(tmp)))
#
    table = counter.CounterTable()
#    table.appendColumn(column)
#    table.appendColumn(columnFraction)
#
    cellFormat = counter.CellFormatText(valueFormat='%.4f', withPrecision=2)
    tableFormat = counter.TableFormatText(cellFormat)

    print
    print "Dataset %s, step %s, nevents %d" % (datasetName, postfix, int(nevents))
    print table.format(tableFormat)

    # Make plots
    dset = datasets.getDataset(datasetName)
    tmp = Counts()
    oldCanvasDefW = ROOT.gStyle.GetCanvasDefW()
    ROOT.gStyle.SetCanvasDefW(int(oldCanvasDefW*1.5))

    # (tauID, leptonVeto)
    def usualRejected(obj2):
        _tauIDLabels = tauIDLabels(obj2)
        ret = [("None", "None"), ("None", "#tau_{1}")]
        ret.extend([(x, "#tau_{1}") for x in _tauIDLabels[4:]])
        ret.extend([(x, obj2) for x in _tauIDLabels])
        ret.extend([(x, "Other") for x in _tauIDLabels])
        return ret
    usualEmbedding = [("#tau_{1}", "None"), ("#tau_{1}+other (corr. sel.)", "None")]
    def usualFakeTau(obj2):
        return [(x, "None") for x in tauIDLabels(obj2)[4:]]
    doubleFakeTau = [("Other", "None")]
    usualCase1 = [(x, "#tau_{1}") for x in tauIDLabels("")[1:4]]
    usualCase3 = [("#tau_{1}+other (wrong sel.)", "None")]
    embCase4 = [(x, "None") for x in tauIDLabels("")[1:4]]
    def doubleCase2(obj2):
        return [(obj2, "None"), (obj2+"+other", "None")]

    selectionStep = {"Before": "",
                     "AfterJetSelection": "passJetSelection",
                     "AfterMET": "passMET",
                     "AfterBTag": "passBTag",
                     "AfterAllSelections": "passDeltaPhi"}[postfix]

    treeDraw = dataset.TreeDraw("tree", varexp="LeptonVetoStatus:TauIDStatus >>htemp(%d,0,%d, %d,0,%d" % (Enum.tauSize, Enum.tauSize, Enum.leptonSize, Enum.leptonSize))

    for name, obj2, obj2Type in [
        ("tau1_electron2", "e_{2}", Enum.obj2Electron),
        ("tau1_quark2", "q_{2}", Enum.obj2Quark),
        ("tau1_muon2_nonEmb", "#mu_{2}", Enum.obj2Muon),
        ]:
        tmp += calculatePlot(dset, neventsCount, name, postfix,
                             treeDraw=treeDraw.clone(selection=And("Obj2Type==%d"%obj2Type, selectionStep), binLabelsX=tauIDLabels(obj2), binLabelsY=leptonVetoLabels(obj2)),
                             rejected=usualRejected(obj2), embedding=usualEmbedding, faketau=usualFakeTau(obj2),
                             case1=usualCase1, case3=usualCase3)

    tmp += calculatePlot(dset, neventsCount, "tau1_muon2_Emb", postfix,
                         treeDraw=treeDraw.clone(selection=And("Obj2Type==%d"%Enum.obj2MuonEmb, selectionStep), binLabelsX=tauIDLabels("#mu_{2}"), binLabelsY=leptonVetoLabels("#mu_{2}")),
                         rejected=usualRejected("#mu_{2}")+usualCase1,
                         faketau=usualFakeTau("#mu_{2}"),
                         case4=embCase4)
#    createMuon2Plot(dset, "tau1_muon2_Emb", postfix)

    for name, obj2, obj2Type in [
        ("tau1_tau2_notInAcceptance", "#tau_{2}", Enum.obj2TauNotInAcceptance),
        ("tau1_tauh2", "#tau_{h,2}", Enum.obj2Tauh),
        ("tau1_taue2", "#tau_{e,2}", Enum.obj2Taue),
        ("tau1_taumu2_nonEmb", "#tau_{#mu,2}", Enum.obj2Taumu),
        ]:
        tmp += calculatePlot(dset, neventsCount, name, postfix,
                             treeDraw=treeDraw.clone(selection=And("Obj2Type==%d"%obj2Type, selectionStep), binLabelsX=tauIDLabels(obj2), binLabelsY=leptonVetoLabels(obj2)),
                             rejected=usualRejected(obj2), embedding=usualEmbedding, faketau=doubleFakeTau,
                             case1=usualCase1, case3=usualCase3,
                             case2=doubleCase2(obj2))

    tmp += calculatePlot(dset, neventsCount, "tau1_taumu2_Emb", postfix,
                         treeDraw=treeDraw.clone(selection=And("Obj2Type==%d"%Enum.obj2TaumuEmb, selectionStep), binLabelsX=tauIDLabels("#tau_{#mu,2}"), binLabelsY=leptonVetoLabels("#tau_{#mu,2}")),
                         rejected=usualRejected("#tau_{#mu,2}")+usualCase1,
                         faketau=doubleFakeTau,
                         case4=embCase4)

    ROOT.gStyle.SetCanvasDefW(oldCanvasDefW)


    ## Ntuple stuff

    embeddingSelection = Or(*[And("Obj2Type == %d"%obj2, "LeptonVetoStatus == %d"%Enum.leptonNone, Or(*["TauIDStatus == %d" % x for x in [Enum.tauTau1, Enum.tauTau1OtherCorrect]]))
                              for obj2 in [Enum.obj2Electron, Enum.obj2Quark, Enum.obj2Muon, Enum.obj2TauNotInAcceptance, Enum.obj2Tauh, Enum.obj2Taue, Enum.obj2Taumu]])
    case1Selection = Or(*[And("Obj2Type == %d"%obj2, "LeptonVetoStatus == %d"%Enum.leptonTau1, Or(*["TauIDStatus == %d" % x for x in [Enum.tauTau1, Enum.tauTau1OtherCorrect, Enum.tauTau1OtherWrong]]))
                              for obj2 in [Enum.obj2Electron, Enum.obj2Quark, Enum.obj2Muon, Enum.obj2TauNotInAcceptance, Enum.obj2Tauh, Enum.obj2Taue, Enum.obj2Taumu]])
    case2Selection = Or(*[And("Obj2Type == %d"%obj2, "LeptonVetoStatus == %d"%Enum.leptonNone, Or(*["TauIDStatus == %d" % x for x in [Enum.tauObj2, Enum.tauObj2Other]]))
                              for obj2 in [Enum.obj2TauNotInAcceptance, Enum.obj2Tauh, Enum.obj2Taue, Enum.obj2Taumu]])

    embeddingSelection = And(selectionStep, embeddingSelection)
    case1Selection = And(selectionStep, case1Selection)
    case2Selection = And(selectionStep, case2Selection)
    
    createTransverseMassPlot(dset, "case1", postfix, nominalSelection=embeddingSelection, compareSelection=case1Selection,
                             nominalLegend="Embedding (correct)", compareLegend="Case 1")
    createTransverseMassPlot(dset, "case2", postfix, nominalSelection=embeddingSelection, compareSelection=case2Selection,
                             nominalLegend="Embedding (correct)", compareLegend="Case 2")

    # plotNames = [
    #             "tau1_electron2",
    #             "tau1_quark2",
    #             "tau1_muon2_nonEmb",     
    #             "tau1_muon2_Emb",
    #             "tau1_tau2_notInAcceptance",
    #             "tau1_tauh2", 
    #             "tau1_taue2",
    #             "tau1_taumu2_nonEmb",
    #             "tau1_taumu2_Emb"
    #             ]
    # for name in plotNames:
    #     tmp += calculatePlot(dset, neventsCount, name, postfix)

    if int(nevents) != int(tmp.all):
        raise Exception("Consistency check failed: nevents = %d, tmp = %d" % (int(nevents), int(tmp.all)))

    tmp.printResults()
    print
    tmp.printLegend()
    tmp.crossCheck()

    allEmbeddingIncluded = int(tmp.embedding) + int(tmp.case1) + int(tmp.case3)

    print
    print "So, the number of events included by embedding is %d" % allEmbeddingIncluded
    print "Of these,"
 
    frac = dataset.divideBinomial(dataset.Count(int(tmp.embedding)), dataset.Count(allEmbeddingIncluded))
    frac.multiply(dataset.Count(100))
    print "  * %d (%s %%) are included correctly" % (int(tmp.embedding), cellFormat.format(frac))

    frac = dataset.divideBinomial(dataset.Count(int(tmp.case3)), dataset.Count(allEmbeddingIncluded))
    frac.multiply(dataset.Count(100))
    print "  * %d (%s %%) are included correctly, but wrong object is chosen as tau_h" % (int(tmp.case3), cellFormat.format(frac))

    frac = dataset.divideBinomial(dataset.Count(int(tmp.case1)), dataset.Count(allEmbeddingIncluded))
    frac.multiply(dataset.Count(100))
    print "  * %d (%s %%) are included incorrectly (tau_1 identified in lepton veto)" % (int(tmp.case1), cellFormat.format(frac))

    print "In addition, the following events are incorrectly rejected"
    # Note that these ratios are NOT binomial!
    # Although apparently, in practice, the result is the same
    
    #frac = dataset.divideBinomial(dataset.Count(int(tmp.case2)), dataset.Count(allEmbeddingIncluded))
    frac = dataset.Count(tmp.case2, math.sqrt(tmp.case2))
    frac.divide(dataset.Count(allEmbeddingIncluded, math.sqrt(allEmbeddingIncluded)))
    frac.multiply(dataset.Count(100))
    print "  * %d (%s %%): tau_1 not identified as tau_h, but decay of tau_2 would be" % (int(tmp.case2), cellFormat.format(frac))

    #frac = dataset.divideBinomial(dataset.Count(int(tmp.case4)), dataset.Count(allEmbeddingIncluded))
    frac = dataset.Count(tmp.case4, math.sqrt(tmp.case4))
    frac.divide(dataset.Count(allEmbeddingIncluded, math.sqrt(allEmbeddingIncluded)))
    frac.multiply(dataset.Count(100))
    print "  * %d (%s %%): mu_2 would be accepted for embedding, and is not identified in lepton veto" % (int(tmp.case4), cellFormat.format(frac))