def calculatePlot(dset, neventsCount, name, postfix, treeDraw=None, rejected=None, embedding=None, faketau=None, case1=None, case2=None, case3=None, case4=None):
    datasetName = dset.getName()
    if treeDraw is None:
        h = dset.getDatasetRootHisto(name+"_"+postfix).getHistogram()
    else:
        h = dset.getDatasetRootHisto(treeDraw).getHistogram()
    ROOT.gStyle.SetPaintTextFormat(".0f")
    createDrawPlot("count_"+datasetName+"_"+name+"_"+postfix, h)

    hFrac = h.Clone("hfrac")
    hFracErr = h.Clone("hfracErr")
    counts = Counts()
    for xbin in xrange(1, h.GetNbinsX()+1):
        for ybin in xrange(1, h.GetNbinsY()+1):
            value = h.GetBinContent(xbin, ybin)
            counts.all += value

            this = (h.GetXaxis().GetBinLabel(xbin), h.GetYaxis().GetBinLabel(ybin))
            if rejected is not None and this in rejected:
                counts.rejected += value
            elif embedding is not None and this in embedding:
                counts.embedding += value
            elif faketau is not None and this in faketau:
                counts.faketau += value
            elif case1 is not None and this in case1:
                counts.case1 += value
            elif case2 is not None and this in case2:
                counts.case2 += value
            elif case3 is not None and this in case3:
                counts.case3 += value
            elif case4 is not None and this in case4:
                counts.case4 += value

            frac = dataset.divideBinomial(dataset.Count(value, 0), neventsCount)
            frac.multiply(dataset.Count(100))
            hFrac.SetBinContent(xbin, ybin, frac.value())
            hFracErr.SetBinContent(xbin, ybin, frac.uncertainty())

#    print name
#    counts.printResults()
#    print
    try:
        counts.crossCheck()
    except Exception as e:
        raise Exception(str(e)+"\nBin sources:\nrejected: %s\nembedding: %s\nfaketau: %s\ncase1: %s\ncase2: %s\ncase3: %s\ncase4: %s" % (rejected, embedding, faketau, case1, case2, case3, case4))


    ROOT.gStyle.SetPaintTextFormat(".4f")
    createDrawPlot("frac_"+datasetName+"_"+name+"_"+postfix, hFrac)
#    createDrawPlot("frac_"+datasetName+"_"+name+"_"+postfix+"_Uncertainty", hFracErr)

    return counts
Esempio n. 2
0
def normaliseEmbedding(th1):
    for bin in xrange(0, th1.GetNbinsX() + 1):
        value = dataset.Count(th1.GetBinContent(bin), th1.GetBinError(bin))
        value.multiply(embeddingNormalisation)

        th1.SetBinContent(bin, value.value())
        th1.SetBinError(bin, value.uncertainty())
Esempio n. 3
0
 def addRow(name, newktt, nsignal):
     fraction = None
     if nsignal != None:
         fraction = nsignal.clone()
         total = nsignal.clone()
         total.add(newktt)
         fraction.divide(total)
         fraction.multiply(dataset.Count(100))
     result.appendRow(counter.CounterRow(name, ["EWK+tt events", "Signal events", "Signal fraction (\%)"], [newktt, nsignal, fraction]))
Esempio n. 4
0
def signalAreaEvents(embedded, expected, normfactor):
    mtMin = 0

    lowBin = embedded.FindBin(mtMin)
    upBin = embedded.GetNbinsX() + 1  # include the overflow bin

    #embeddedCount = integrate(embedded, lowBin, upBin)
    #expectedCount = integrate(expected, lowBin, upBin)
    #embeddedCount = dataset.Count(embedded.Integral(lowBin, upBin), 0)
    #expectedCount = dataset.Count(expected.Integral(lowBin, upBin), 0)
    embeddedCount = dataset.Count(embedded.Integral(), 0)
    expectedCount = dataset.Count(expected.Integral(), 0)

    prediction = embeddedCount.copy()
    prediction.multiply(normfactor)

    print "Embedded events %.2f" % embeddedCount.value()
    print "Predicted events %.2f +- %.2f" % (prediction.value(),
                                             prediction.uncertainty())
    print "Expected events %.2f" % expectedCount.value()
Esempio n. 5
0
    def addRow(name):
        col = table.getColumn(name=name)

        minimum = col.getCount(name="Min")
        maximum = col.getCount(name="Max")

        # Maximum deviation from average
        dev1 = col.getCount(name="Max-mean")
        dev2 = col.getCount(name="Mean-min")
        if dev2.value() > dev1.value():
            dev1 = dev2

        dev1.divide(col.getCount(name="Mean"))
        dev1.multiply(dataset.Count(100))

        ftable.appendRow(counter.CounterRow(name,
                                            ["Minimum", "Maximum", "Largest deviation from average (%)"],
                                            [minimum, maximum, dev1]))
def doPlots(table, onlyWjets, mcEvents, normalize, lumi):
    nrows = table.getNrows()
    function = ROOT.TF1("fitFunction", "[0]") 
    function.SetParameter(0, 0)
    f2 = ROOT.TF1("fitG", "gaus")
    f2.SetLineColor(ROOT.kRed)
    f2.SetLineWidth(2)

    binning = {
        "Data": (8, 60, 100),
        "Diboson": (8, 0, 2),
        "DYJetsToLL": (8, 1, 5),
        "EWKMCsum": (8, 40, 120),
        "SingleTop": (8, 3, 6),
        "TTJets": (10, 25, 35),
        "W3Jets": (10, 6, 11),
        "WJets": (14, 10, 80),
        }
    if onlyWjets:
        binning["WJets"] = (24, 10, 90)
    if not normalize:
        binning["Data"] = (10, 70, 120)
        binning["EWKMCsum"] = (6, 60, 120)
        binning["SingleTop"] = (8, 4, 6)
        binning["TTJets"] = (10, 32, 42)
        binning["W3Jets"] = (12, 6, 12)
        if onlyWjets:
            binning["WJets"] = (10, 20, 60)
    if mcEvents:
        binning["TTJets"] = (12, 320, 440)
        binning["WJets"] = (24, 30, 90)

    for icol in xrange(table.getNcolumns()):
        name = table.getColumnNames()[icol]
        label = plots._legendLabels.get(name, name)
        if name != "Data":
            label += " simulation"
        h = ROOT.TH1F(name, name, nrows, 0, nrows)
        h2 = ROOT.TH1F(name+"_dist", name, *(binning.get(name, (10, 0, 100))))
        mean = dataset.Count(0, 0)
        for irow in xrange(nrows):
            count = table.getCount(irow, icol)
            h.SetBinContent(irow+1, count.value())
            h.SetBinError(irow+1, count.uncertainty())
            h2.Fill(count.value())
            mean.add(count)
        mean = dataset.Count(mean.value()/nrows, mean.uncertainty()/nrows)

        h.Fit("fitFunction")

        value = function.GetParameter(0)
        error = function.GetParError(0)

        # function.SetParameters(1., 40., 1.);
        # function.SetParLimits(0, 0.0, 1.0);
        # fitResult = graph.Fit(function, "NRSE+EX0");
        # print "Fit status", fitResult.Status()
        # #fitResult.Print("V");
        # #fitResult.GetCovarianceMatrix().Print();
        # function.SetLineColor(graph.GetMarkerColor());
        # function.SetLineWidth(2);
        function.Draw("same")
        ROOT.gPad.Update()
        stat = h.FindObject("stats")
        if stat:
            stat.SetX1NDC(0.2)
            stat.SetX2NDC(0.44)
            stat.SetY1NDC(0.2)
            stat.SetY2NDC(0.3)
            stat.SetTextColor(ROOT.kRed)
            stat.SetLineColor(ROOT.kRed)
        # return (function, fitResult)

        styles.dataStyle.apply(h)
        p = plots.PlotBase([h])
        p.histoMgr.setHistoDrawStyle(name, "EP")
        p.createFrame("fluctuation_"+name, opts={"ymin": 0, "ymaxfactor": 1.2, "nbins": nrows})
        p.frame.GetXaxis().SetTitle("Embedding trial number")
        ylabel = "Simulation"
        if name == "Data":
            ylabel = "Data"
        ylabel += " events"
        p.frame.GetYaxis().SetTitle(ylabel)
        step = 1
        start = 0
        if onlyWjets:
            start = 4
            step = 5
        for irow in xrange(start, nrows, step):
            p.frame.GetXaxis().SetBinLabel(irow+1, "%d"%(irow+1))

        xmin = p.frame.GetXaxis().GetXmin()
        xmax = p.frame.GetXaxis().GetXmax()

        leg = histograms.moveLegend(histograms.createLegend(), dx=-0.07, dy=-0.6, dh=-0.15)
        leg.AddEntry(h, "Trial values", "P")

        def createLine(val, st=1, col=ROOT.kRed):
            l = ROOT.TLine(xmin, val, xmax, val)
            l.SetLineWidth(2)
            l.SetLineStyle(st)
            l.SetLineColor(col)
            return l

        fv = createLine(value)
        leg.AddEntry(fv, "Fitted value", "L")
        p.appendPlotObject(fv)
        # fe = createLine(value+error, ROOT.kDashed)
        # leg.AddEntry(fe, "Fit uncertainty", "L")
        # p.appendPlotObject(fe)
        # p.appendPlotObject(createLine(value-error, ROOT.kDashed))
        v = createLine(mean.value(), col=ROOT.kBlue)
        leg.AddEntry(v, "Mean", "L")
        p.appendPlotObject(v)
        ve = createLine(mean.value()+mean.uncertainty(), st=ROOT.kDashed, col=ROOT.kBlue)
        leg.AddEntry(ve, "Mean uncertainty", "L")
        p.appendPlotObject(ve)
        p.appendPlotObject(createLine(mean.value()-mean.uncertainty(), st=ROOT.kDashed, col=ROOT.kBlue))

        p.legend = leg

        p.appendPlotObject(histograms.PlotText(0.65, 0.33, label, size=20))
        p.draw()
        if name != "Data":
            histograms.addCmsPreliminaryText(text="Simulation")
        histograms.addEnergyText()
        histograms.addLuminosityText(None, None, lumi)
        p.save()

        ###############

        f2.SetParameter(1, value)
        h2.Fit("fitG")
#        f2.Draw("same")
        ROOT.gPad.Update()
        stat = h2.FindObject("stats")
        if stat:
            stat.SetX1NDC(0.62)
            stat.SetX2NDC(0.9)
            stat.SetY1NDC(0.7)
            stat.SetY2NDC(0.85)
            stat.SetTextColor(ROOT.kRed)
            stat.SetLineColor(ROOT.kRed)

        styles.dataStyle.apply(h2)
        p = plots.PlotBase([h2])
        p.histoMgr.setHistoDrawStyle(name+"_dist", "HIST")
        p.createFrame("fluctuation_"+name+"_dist", opts={"ymin": 0, "ymaxfactor": 1.4, "nbins": nrows})
        p.frame.GetXaxis().SetTitle(ylabel)
        p.frame.GetYaxis().SetTitle("Occurrances")

        ymin = p.frame.GetYaxis().GetXmin()
        ymax = p.frame.GetYaxis().GetXmax()

        leg = histograms.moveLegend(histograms.createLegend(), dx=-0.07, dy=-0.25, dh=-0.15)
        leg.AddEntry(h2, "Trials", "F")
        leg.AddEntry(f2, "Gaussian fit", "L")

        def createLine2(val, st=1):
            l = ROOT.TLine(val, ymin, val, ymax)
            l.SetLineWidth(1)
            l.SetLineColor(ROOT.kBlue)
            l.SetLineStyle(st)
            return l

        p.appendPlotObject(h2, "FUNC")
        p.appendPlotObject(stat)
        p.appendPlotObject(histograms.PlotText(0.65, 0.88, label, size=20))
        # fv = createLine2(value)
        # leg.AddEntry(fv, "Fit of values", "L")
        # p.appendPlotObject(fv)
        # fe = createLine2(value+error, ROOT.kDashed)
        # leg.AddEntry(fe, "Fit of values unc.", "L")
        # p.appendPlotObject(fe)
        # p.appendPlotObject(createLine2(value-error, ROOT.kDashed))
        p.legend = leg

        p.draw()

        if name != "Data":
            histograms.addCmsPreliminaryText(text="Simulation")
        histograms.addEnergyText()
        histograms.addLuminosityText(None, None, lumi)
        p.save()
Esempio n. 7
0
def integrate(th, firstBin, lastBin):
    integral = dataset.Count(0, 0)
    for bin in xrange(firstBin, lastBin + 1):
        integral.add(dataset.Count(th.GetBinContent(bin), th.GetBinError(bin)))
    return integral
def doScaleFactors(histoSig, histoEmb, outputDir, opts):
    binning = systematics._dataDrivenCtrlPlotBinning["shapeTransverseMass"]
    histoSig.Rebin(len(binning) - 1, "newsig", array.array("d", binning))
    histoEmb.Rebin(len(binning) - 1, "newemb", array.array("d", binning))

    grSig = histoSig.getSystematicUncertaintyGraph()
    grEmb = histoEmb.getSystematicUncertaintyGraph()

    hSig = histoSig.getRootHisto()
    hEmb = histoEmb.getRootHisto()

    scaleFactors = []
    scaleFactors_stat = []

    identities = []

    def equal(a, b):
        if a == 0.0:
            return b == 0.0
        return abs((a - b) / a) < 0.0001

    for i in xrange(0, grSig.GetN()):
        lowEdge = grSig.GetX()[i] - grSig.GetErrorXlow(i)

        sig_val = grSig.GetY()[i]
        sig_err_up = grSig.GetErrorYhigh(i)
        sig_err_down = grSig.GetErrorYlow(i)
        emb_val = grEmb.GetY()[i]
        emb_err_up = grEmb.GetErrorYhigh(i)
        emb_err_down = grEmb.GetErrorYlow(i)

        # Employ count
        cemb = dataset.Count(emb_val, emb_err_up, emb_err_down)
        csig = dataset.Count(sig_val, sig_err_up, sig_err_down)
        csig.divide(cemb)

        cemb_stat = dataset.Count(hEmb.GetBinContent(i + 1),
                                  hEmb.GetBinError(i + 1))
        csig_stat = dataset.Count(hSig.GetBinContent(i + 1),
                                  hSig.GetBinError(i + 1))
        csig_stat.divide(cemb_stat)

        if not equal(lowEdge, hEmb.GetBinLowEdge(i + 1)):
            raise Exception("Low edges not equal (%.10g vs %.10g)" %
                            (lowEdge, hEmb.GetBinLowEdge(i + 1)))
        if not equal(csig.value(), csig_stat.value()):
            raise Exception("Values not equal (%.10g vs %.10g)" %
                            (csig.value(), csig_stat.value()))

        print "bin %.1f, sf %.7f +%.7f -%.7f (stat +-%.7f)" % (
            lowEdge, csig.value(), csig.uncertainty(), csig.systUncertainty(),
            csig_stat.uncertainty())

        d = OrderedDict.OrderedDict()
        d["mt"] = lowEdge
        d["efficiency"] = csig.value()
        d["uncertaintyPlus"] = csig.uncertainty()
        d["uncertaintyMinus"] = csig.systUncertainty()
        scaleFactors.append(d)
        d = OrderedDict.OrderedDict()
        d["mt"] = lowEdge
        d["efficiency"] = csig.value()
        d["uncertaintyPlus"] = csig_stat.uncertainty()
        d["uncertaintyMinus"] = csig_stat.uncertainty()
        scaleFactors_stat.append(d)
        d = OrderedDict.OrderedDict()
        d["mt"] = lowEdge
        d["efficiency"] = 1.0
        d["uncertaintyPlus"] = 0.0
        d["uncertaintyMinus"] = 0.0
        identities.append(d)

    par = OrderedDict.OrderedDict()
    par2 = OrderedDict.OrderedDict()
    par2["firstRun"] = 1  # to support also dataEfficiency in MC
    par2["lastRun"] = 208686
    par2["luminosity"] = 1  # dummy value, not used for anything
    par2["bins"] = scaleFactors
    par["Run2012ABCD"] = par2
    par2 = OrderedDict.OrderedDict()
    par2["firstRun"] = 1  # to support also dataEfficiency in MC
    par2["lastRun"] = 208686
    par2["luminosity"] = 1  # dummy value, not used for anything
    par2["bins"] = scaleFactors_stat
    par["Run2012ABCD_statOnly"] = par2

    ret = OrderedDict.OrderedDict()
    ret["_multicrab_embedded"] = os.getcwd()
    ret["_multicrab_signalAnalysisGenTau"] = opts.dirSig

    ret["dataParameters"] = par

    ret["mcParameters"] = {"Run2012ABCD": {"bins": identities}}

    tauEmbedding.writeToFile(outputDir, "embedding_mt_weight.json",
                             json.dumps(ret, indent=2))
Esempio n. 9
0
import HiggsAnalysis.HeavyChHiggsToTauNu.tools.tdrstyle as tdrstyle
import HiggsAnalysis.HeavyChHiggsToTauNu.tools.styles as styles

# Configuration
#analysis = "signalAnalysis"
analysis = "signalAnalysisTauSelectionHPSTightTauBased"
counters = analysis + "Counters/weighted"
prefix = "TauEmbeddingDataSignalAnalysisData"

embeddingSignalAnalysis = "."
signalAnalysis = "../../multicrab_signalAnalysis_tauIdScan_110411_165833"

useData = True
useData = False

embeddingNormalisation = dataset.Count(0.700, 0.345)
if not useData:
    embeddingNormalisation = dataset.Count(0.369, 0.042)  # MC
    prefix = prefix.replace("Data", "MC")


# main function
def main():
    # Read the datasets
    # Take only TT+W from signal analysis, and data from embedding+signal analysis
    datasetsEmbSig = dataset.getDatasetsFromMulticrabCfg(
        cfgfile=embeddingSignalAnalysis + "/multicrab.cfg", counters=counters)
    datasetsSig = dataset.getDatasetsFromMulticrabCfg(cfgfile=signalAnalysis +
                                                      "/multicrab.cfg",
                                                      counters=counters)
def process(datasets, datasetName, postfix, countName):
    # Handle counter
    eventCounter = counter.EventCounter(datasets)
    mainTable = eventCounter.getMainCounterTable()

    neventsCount = mainTable.getCount(rowName=countName, colName=datasetName)
    nevents = neventsCount.value()
#    column = eventCounter.getSubCounterTable("Classification"+postfix).getColumn(name=datasetName)
#
#    columnFraction = column.clone()
#    columnFraction.setName("Fraction (%)")
#
#    # Consistency check, and do the division
#    tmp = 0
#    for irow in xrange(column.getNrows()):
#        tmp += column.getCount(irow).value()
#
#        frac = dataset.divideBinomial(columnFraction.getCount(irow), neventsCount)
#        frac.multiply(dataset.Count(100))
#        columnFraction.setCount(irow, frac)
#
#    if int(nevents) != int(tmp):
#        raise Exception("Consistency check failed: nevents = %d, tmp = %d" % (int(nevents), int(tmp)))
#
    table = counter.CounterTable()
#    table.appendColumn(column)
#    table.appendColumn(columnFraction)
#
    cellFormat = counter.CellFormatText(valueFormat='%.4f', withPrecision=2)
    tableFormat = counter.TableFormatText(cellFormat)

    print
    print "Dataset %s, step %s, nevents %d" % (datasetName, postfix, int(nevents))
    print table.format(tableFormat)

    # Make plots
    dset = datasets.getDataset(datasetName)
    tmp = Counts()
    oldCanvasDefW = ROOT.gStyle.GetCanvasDefW()
    ROOT.gStyle.SetCanvasDefW(int(oldCanvasDefW*1.5))

    # (tauID, leptonVeto)
    def usualRejected(obj2):
        _tauIDLabels = tauIDLabels(obj2)
        ret = [("None", "None"), ("None", "#tau_{1}")]
        ret.extend([(x, "#tau_{1}") for x in _tauIDLabels[4:]])
        ret.extend([(x, obj2) for x in _tauIDLabels])
        ret.extend([(x, "Other") for x in _tauIDLabels])
        return ret
    usualEmbedding = [("#tau_{1}", "None"), ("#tau_{1}+other (corr. sel.)", "None")]
    def usualFakeTau(obj2):
        return [(x, "None") for x in tauIDLabels(obj2)[4:]]
    doubleFakeTau = [("Other", "None")]
    usualCase1 = [(x, "#tau_{1}") for x in tauIDLabels("")[1:4]]
    usualCase3 = [("#tau_{1}+other (wrong sel.)", "None")]
    embCase4 = [(x, "None") for x in tauIDLabels("")[1:4]]
    def doubleCase2(obj2):
        return [(obj2, "None"), (obj2+"+other", "None")]

    selectionStep = {"Before": "",
                     "AfterJetSelection": "passJetSelection",
                     "AfterMET": "passMET",
                     "AfterBTag": "passBTag",
                     "AfterAllSelections": "passDeltaPhi"}[postfix]

    treeDraw = dataset.TreeDraw("tree", varexp="LeptonVetoStatus:TauIDStatus >>htemp(%d,0,%d, %d,0,%d" % (Enum.tauSize, Enum.tauSize, Enum.leptonSize, Enum.leptonSize))

    for name, obj2, obj2Type in [
        ("tau1_electron2", "e_{2}", Enum.obj2Electron),
        ("tau1_quark2", "q_{2}", Enum.obj2Quark),
        ("tau1_muon2_nonEmb", "#mu_{2}", Enum.obj2Muon),
        ]:
        tmp += calculatePlot(dset, neventsCount, name, postfix,
                             treeDraw=treeDraw.clone(selection=And("Obj2Type==%d"%obj2Type, selectionStep), binLabelsX=tauIDLabels(obj2), binLabelsY=leptonVetoLabels(obj2)),
                             rejected=usualRejected(obj2), embedding=usualEmbedding, faketau=usualFakeTau(obj2),
                             case1=usualCase1, case3=usualCase3)

    tmp += calculatePlot(dset, neventsCount, "tau1_muon2_Emb", postfix,
                         treeDraw=treeDraw.clone(selection=And("Obj2Type==%d"%Enum.obj2MuonEmb, selectionStep), binLabelsX=tauIDLabels("#mu_{2}"), binLabelsY=leptonVetoLabels("#mu_{2}")),
                         rejected=usualRejected("#mu_{2}")+usualCase1,
                         faketau=usualFakeTau("#mu_{2}"),
                         case4=embCase4)
#    createMuon2Plot(dset, "tau1_muon2_Emb", postfix)

    for name, obj2, obj2Type in [
        ("tau1_tau2_notInAcceptance", "#tau_{2}", Enum.obj2TauNotInAcceptance),
        ("tau1_tauh2", "#tau_{h,2}", Enum.obj2Tauh),
        ("tau1_taue2", "#tau_{e,2}", Enum.obj2Taue),
        ("tau1_taumu2_nonEmb", "#tau_{#mu,2}", Enum.obj2Taumu),
        ]:
        tmp += calculatePlot(dset, neventsCount, name, postfix,
                             treeDraw=treeDraw.clone(selection=And("Obj2Type==%d"%obj2Type, selectionStep), binLabelsX=tauIDLabels(obj2), binLabelsY=leptonVetoLabels(obj2)),
                             rejected=usualRejected(obj2), embedding=usualEmbedding, faketau=doubleFakeTau,
                             case1=usualCase1, case3=usualCase3,
                             case2=doubleCase2(obj2))

    tmp += calculatePlot(dset, neventsCount, "tau1_taumu2_Emb", postfix,
                         treeDraw=treeDraw.clone(selection=And("Obj2Type==%d"%Enum.obj2TaumuEmb, selectionStep), binLabelsX=tauIDLabels("#tau_{#mu,2}"), binLabelsY=leptonVetoLabels("#tau_{#mu,2}")),
                         rejected=usualRejected("#tau_{#mu,2}")+usualCase1,
                         faketau=doubleFakeTau,
                         case4=embCase4)

    ROOT.gStyle.SetCanvasDefW(oldCanvasDefW)


    ## Ntuple stuff

    embeddingSelection = Or(*[And("Obj2Type == %d"%obj2, "LeptonVetoStatus == %d"%Enum.leptonNone, Or(*["TauIDStatus == %d" % x for x in [Enum.tauTau1, Enum.tauTau1OtherCorrect]]))
                              for obj2 in [Enum.obj2Electron, Enum.obj2Quark, Enum.obj2Muon, Enum.obj2TauNotInAcceptance, Enum.obj2Tauh, Enum.obj2Taue, Enum.obj2Taumu]])
    case1Selection = Or(*[And("Obj2Type == %d"%obj2, "LeptonVetoStatus == %d"%Enum.leptonTau1, Or(*["TauIDStatus == %d" % x for x in [Enum.tauTau1, Enum.tauTau1OtherCorrect, Enum.tauTau1OtherWrong]]))
                              for obj2 in [Enum.obj2Electron, Enum.obj2Quark, Enum.obj2Muon, Enum.obj2TauNotInAcceptance, Enum.obj2Tauh, Enum.obj2Taue, Enum.obj2Taumu]])
    case2Selection = Or(*[And("Obj2Type == %d"%obj2, "LeptonVetoStatus == %d"%Enum.leptonNone, Or(*["TauIDStatus == %d" % x for x in [Enum.tauObj2, Enum.tauObj2Other]]))
                              for obj2 in [Enum.obj2TauNotInAcceptance, Enum.obj2Tauh, Enum.obj2Taue, Enum.obj2Taumu]])

    embeddingSelection = And(selectionStep, embeddingSelection)
    case1Selection = And(selectionStep, case1Selection)
    case2Selection = And(selectionStep, case2Selection)
    
    createTransverseMassPlot(dset, "case1", postfix, nominalSelection=embeddingSelection, compareSelection=case1Selection,
                             nominalLegend="Embedding (correct)", compareLegend="Case 1")
    createTransverseMassPlot(dset, "case2", postfix, nominalSelection=embeddingSelection, compareSelection=case2Selection,
                             nominalLegend="Embedding (correct)", compareLegend="Case 2")

    # plotNames = [
    #             "tau1_electron2",
    #             "tau1_quark2",
    #             "tau1_muon2_nonEmb",     
    #             "tau1_muon2_Emb",
    #             "tau1_tau2_notInAcceptance",
    #             "tau1_tauh2", 
    #             "tau1_taue2",
    #             "tau1_taumu2_nonEmb",
    #             "tau1_taumu2_Emb"
    #             ]
    # for name in plotNames:
    #     tmp += calculatePlot(dset, neventsCount, name, postfix)

    if int(nevents) != int(tmp.all):
        raise Exception("Consistency check failed: nevents = %d, tmp = %d" % (int(nevents), int(tmp.all)))

    tmp.printResults()
    print
    tmp.printLegend()
    tmp.crossCheck()

    allEmbeddingIncluded = int(tmp.embedding) + int(tmp.case1) + int(tmp.case3)

    print
    print "So, the number of events included by embedding is %d" % allEmbeddingIncluded
    print "Of these,"
 
    frac = dataset.divideBinomial(dataset.Count(int(tmp.embedding)), dataset.Count(allEmbeddingIncluded))
    frac.multiply(dataset.Count(100))
    print "  * %d (%s %%) are included correctly" % (int(tmp.embedding), cellFormat.format(frac))

    frac = dataset.divideBinomial(dataset.Count(int(tmp.case3)), dataset.Count(allEmbeddingIncluded))
    frac.multiply(dataset.Count(100))
    print "  * %d (%s %%) are included correctly, but wrong object is chosen as tau_h" % (int(tmp.case3), cellFormat.format(frac))

    frac = dataset.divideBinomial(dataset.Count(int(tmp.case1)), dataset.Count(allEmbeddingIncluded))
    frac.multiply(dataset.Count(100))
    print "  * %d (%s %%) are included incorrectly (tau_1 identified in lepton veto)" % (int(tmp.case1), cellFormat.format(frac))

    print "In addition, the following events are incorrectly rejected"
    # Note that these ratios are NOT binomial!
    # Although apparently, in practice, the result is the same
    
    #frac = dataset.divideBinomial(dataset.Count(int(tmp.case2)), dataset.Count(allEmbeddingIncluded))
    frac = dataset.Count(tmp.case2, math.sqrt(tmp.case2))
    frac.divide(dataset.Count(allEmbeddingIncluded, math.sqrt(allEmbeddingIncluded)))
    frac.multiply(dataset.Count(100))
    print "  * %d (%s %%): tau_1 not identified as tau_h, but decay of tau_2 would be" % (int(tmp.case2), cellFormat.format(frac))

    #frac = dataset.divideBinomial(dataset.Count(int(tmp.case4)), dataset.Count(allEmbeddingIncluded))
    frac = dataset.Count(tmp.case4, math.sqrt(tmp.case4))
    frac.divide(dataset.Count(allEmbeddingIncluded, math.sqrt(allEmbeddingIncluded)))
    frac.multiply(dataset.Count(100))
    print "  * %d (%s %%): mu_2 would be accepted for embedding, and is not identified in lepton veto" % (int(tmp.case4), cellFormat.format(frac))
Esempio n. 11
0
def main():
    # Read the datasets
    #datasets = dataset.getDatasetsFromMulticrabCfg(counters=counters)
    datasets = dataset.getDatasetsFromMulticrabCfg(analysisName=analysis,
                                                   searchMode=searchMode,
                                                   dataEra=dataEra,
                                                   optimizationMode=optMode)
    datasets.updateNAllEventsToPUWeighted()
    datasets.loadLuminosities()

    plots.mergeRenameReorderForDataMC(datasets)

    # Remove signals other than M120
    ###    datasets.remove(filter(lambda name: "TTToHplus" in name and not "M120" in name, datasets.getAllDatasetNames()))
    #datasets.remove(filter(lambda name: "HplusTB" in name, datasets.getAllDatasetNames()))
    datasets.remove(
        filter(lambda name: "TTToHplusBHminusB" in name,
               datasets.getAllDatasetNames()))
    datasets.remove(
        filter(lambda name: "TTToHplus" in name,
               datasets.getAllDatasetNames()))
    # Set the signal cross sections to a given BR(t->H), BR(h->taunu)
    xsect.setHplusCrossSectionsToBR(datasets, br_tH=0.05, br_Htaunu=1)

    # Set the signal cross sections to a value from MSSM
    #    xsect.setHplusCrossSectionsToMSSM(datasets, tanbeta=20, mu=200)

    ###    plots.mergeWHandHH(datasets) # merging of WH and HH signals must be done after setting the cross section

    style = tdrstyle.TDRStyle()
    eventCounter = counter.EventCounter(datasets)
    #eventCounter = counter.EventCounter(datasets, counters=counters)
    eventCounter.normalizeMCByCrossSection()
    mainTable = eventCounter.getMainCounterTable()
    cellFormat = counter.TableFormatText(cellFormat=counter.CellFormatText(
        valueOnly=True))
    print mainTable.format(cellFormat)

    #eventCounterUnweighted = counter.EventCounter(datasets, mainCounterOnly=True, counters="counters")
    #eventCounterUnweighted.normalizeMCByCrossSection()
    #mainTableUnweighted = eventCounterUnweighted.getMainCounterTable()
    #print mainTableUnweighted.format(cellFormat)

    signalDatasets = [
        "HplusTB_M180",
        "HplusTB_M190",
        "HplusTB_M200",
        "HplusTB_M220",
        "HplusTB_M250",
        "HplusTB_M300",
        #"HplusTB_M400",
        #"HplusTB_M500",
        #"HplusTB_M600",
        #"TTToHplusBWB_M80",
        #"TTToHplusBWB_M90",
        #"TTToHplusBWB_M100",
        #"TTToHplusBWB_M120",
        #"TTToHplusBWB_M140",
        #"TTToHplusBWB_M150",
        #"TTToHplusBWB_M155",
        #"TTToHplusBWB_M160",
    ]
    allName = "Trigger and HLT_MET cut"

    cuts = [
        #"Offline selection begins",
        "Trigger and HLT_MET cut",
        "primary vertex",
        "taus == 1",
        "tau trigger scale factor",
        "electron veto",
        "muon veto",
        "njets",
        "QCD tail killer collinear",
        "MET",
        "btagging",
        "btagging scale factor",
        "QCD tail killer back-to-back"
    ]

    xvalues = [180, 190, 200, 220, 250, 300]
    #xvalues = [80, 90, 100, 120, 140, 150, 155, 160]
    xerrs = [0] * len(xvalues)
    yvalues = {}
    yerrs = {}
    for cut in cuts:
        yvalues[cut] = []
        yerrs[cut] = []
    for name in signalDatasets:
        column = mainTable.getColumn(name=name)
        #columnUnweighted = mainTableUnweighted.getColumn(name=name)

        # Get the counts (returned objects are of type dataset.Count,
        # and have both value and uncertainty
        #allCount = column.getCount(column.getRowNames().index("Trigger and HLT_MET cut"))
        # Somewhat weird way to get total cross section via unweighted counters
        #rowNames = column.getRowNames()
        #if "allEvents" in rowNames:
        #    allCount = columnUnweighted.getCount(rowNames.index("allEvents"))
        #else:
        #    # Hack needed because non-triggered signal pattuples do not have allEvents counter!
        #    allCount = columnUnweighted.getCount(rowNames.index("primaryVertexAllEvents"))
        #dset = datasets.getDataset(name)
        #allCount.multiply(dataset.Count(dset.getNAllEvents()/dset.getNAllEventsUnweighted()))
        allCount = dataset.Count(
            datasets.getDataset(name).getCrossSection(), 0)

        for cut in cuts:
            cutCount = column.getCount(column.getRowNames().index(cut))
            eff = cutCount.clone()
            eff.divide(allCount)  # N(cut) / N(all)
            if column.getRowNames().index(cut) == 9:  ## btagging
                print cut, eff.value()
            yvalues[cut].append(eff.value())
            yerrs[cut].append(eff.uncertainty())

    def createErrors(cutname):
        gr = ROOT.TGraphErrors(len(xvalues), array.array("d", xvalues),
                               array.array("d", yvalues[cutname]),
                               array.array("d", xerrs),
                               array.array("d", yerrs[cutname]))
        gr.SetMarkerStyle(24)
        gr.SetMarkerColor(2)
        gr.SetMarkerSize(0.9)
        gr.SetLineStyle(1)
        gr.SetLineWidth(2)
        return gr

    #gtrig = createErrors("primary vertex")
    gtrig = createErrors("Trigger and HLT_MET cut")
    print gtrig
    gtrig.SetLineColor(38)
    gtrig.SetMarkerColor(38)
    gtrig.SetMarkerStyle(20)
    gtrig.SetLineStyle(2)
    gtrig.SetMarkerSize(2)

    gtau = createErrors("taus == 1")
    gtau.SetLineColor(2)
    gtau.SetMarkerColor(2)
    gtau.SetMarkerStyle(20)
    gtau.SetMarkerSize(2)
    gtau.SetLineStyle(3)
    #gtau = createErrors("trigger scale factor")

    gveto = createErrors("muon veto")
    gveto.SetLineColor(1)
    gveto.SetMarkerColor(1)
    gveto.SetMarkerStyle(21)
    gveto.SetMarkerSize(2)
    gveto.SetLineStyle(4)
    gjets = createErrors("njets")
    gjets.SetLineColor(4)
    gjets.SetMarkerColor(4)
    gjets.SetMarkerStyle(22)
    gjets.SetMarkerSize(2)
    gjets.SetLineStyle(1)
    gcoll = createErrors("QCD tail killer collinear")
    gcoll.SetLineColor(6)
    gcoll.SetMarkerColor(6)
    gcoll.SetMarkerStyle(26)
    gcoll.SetMarkerSize(2)
    gcoll.SetLineStyle(2)
    gmet = createErrors("MET")
    gmet.SetLineColor(1)
    gmet.SetMarkerColor(1)
    gmet.SetMarkerStyle(24)
    gmet.SetMarkerSize(2)
    gmet.SetLineStyle(5)
    gbtag = createErrors("btagging")
    gbtag.SetLineColor(2)
    gbtag.SetMarkerColor(2)
    gbtag.SetMarkerStyle(25)
    gbtag.SetMarkerSize(2)
    gbtag.SetLineStyle(6)
    print gbtag
    gback = createErrors("QCD tail killer back-to-back")
    gback.SetLineColor(7)
    gback.SetMarkerColor(7)
    gback.SetMarkerStyle(23)
    gback.SetMarkerSize(2)
    gback.SetLineStyle(1)
    #gtau = createErrors("trigger scale factor")

    glist = [gtrig, gtau, gveto, gjets, gcoll, gmet, gbtag, gback]

    opts = {"xmin": 175, "xmax": 310, "ymin": 0.001}
    canvasFrame = histograms.CanvasFrame(
        [histograms.HistoGraph(g, "", "") for g in glist], "SignalEfficiency",
        **opts)
    canvasFrame.frame.GetYaxis().SetTitle("Selection efficiency")
    canvasFrame.frame.GetXaxis().SetTitle("m_{H^{#pm}} (GeV/c^{2})")
    canvasFrame.canvas.SetLogy(True)
    canvasFrame.frame.Draw()

    for gr in glist:
        gr.Draw("PC same")

    histograms.addStandardTexts()

    legend2 = histograms.createLegend(x1=0.5, y1=0.7, x2=0.9, y2=0.85)

    legend2.AddEntry(gtrig, "Trigger", "lp")
    legend2.AddEntry(gtau, "Loose #tau identification", "lp")
    legend2.AddEntry(gveto, "lepton vetoes", "lp")
    legend2.AddEntry(gjets, "3 jets", "lp")
    legend2.Draw()

    legend = histograms.createLegend(x1=0.35, y1=0.15, x2=0.7, y2=0.3)
    legend.AddEntry(gcoll, "QCD tail killer collinear", "lp")
    legend.AddEntry(gmet, "MET > 60 GeV", "lp")
    legend.AddEntry(gbtag, "b tagging ", "lp")
    legend.AddEntry(gback, "QCD tail killer back-to-back: Tight", "lp")
    legend.Draw()
    canvasFrame.canvas.SaveAs(".png")
    canvasFrame.canvas.SaveAs(".C")
    canvasFrame.canvas.SaveAs(".eps")