def main(): from optparse import OptionParser parser = OptionParser() parser.add_option("-i", "--inputfile", dest="inputfile") parser.add_option("-l", "--logfile", dest="logfile") parser.add_option("-N", "--multiplicity", dest="N", type="int") parser.add_option("", "--ST", dest="ST", type="int") (options, args) = parser.parse_args() from ROOT import gROOT, TFile, kFALSE, TGraph, TVectorD gROOT.ProcessLine(".L roostats_cl95.C+") from ROOT import roostats_cl95, roostats_cla, roostats_limit import configurations as config ilum = config.integrated_luminosity slum = ilum * config.relative_luminosity_uncertainty seff = config.nominal_signal_uncertainty infile = TFile(options.inputfile, "READ") from HistoStore import HistoStore store = HistoStore() hIntBkg = infile.Get("IntegralBackground_N%dup" % options.N) hIntData = infile.Get("IntegralData_N%dup" % options.N) ibin = hIntData.FindBin(options.ST) nBkg = hIntBkg.GetBinContent(ibin) sBkg = hIntBkg.GetBinError(ibin) nData = int(hIntData.GetBinContent(ibin)) print "%10d %10.2f +/- %10.2f" % (nData, nBkg, sBkg) print ilum, slum, 1.0, seff, nBkg, sBkg, nData #CLs method rl = roostats_limit(ilum, slum, 1.0, seff, nBkg, sBkg, nData, kFALSE, 1, "cls", "my.png", 23576) cl95 = rl.GetObservedLimit() cla = rl.GetExpectedLimit() exp_up = rl.GetOneSigmaHighRange() exp_down = rl.GetOneSigmaLowRange() exp_2up = rl.GetTwoSigmaHighRange() exp_2down = rl.GetTwoSigmaLowRange() #WAS Bayesian a la: #cl95 = roostats_cl95(ilum, slum, 1.0, seff, nBkg, sBkg, nData, kFALSE, 1, "bayesian", "") #cla = roostats_cla(ilum, slum, 1.0, seff, nBkg, sBkg, 1) logfile = open(options.logfile, "w") logfile.write( "%-10d %-10.5f %-10.5f %-10.5f %-10.5f %-10.5f %-10.5f\n" % (options.ST, cl95, cla, exp_up, exp_down, exp_2up, exp_2down)) logfile.close()
from ModelParser import XsecParser, ModelGroup import csv from ROOT import TGraphErrors, TVectorD from HistoStore import HistoStore store = HistoStore() models = [] with open("xsec_tmp2.txt", "rb") as f: reader = csv.reader(f, delimiter=" ", skipinitialspace=True) for row in reader: model = XsecParser(row) models.append(model) group = ModelGroup(models) for generator, group_generator in group.items(): for n, group_n in group_generator.items(): for icolor, (MD, models) in enumerate(group_n.items()): vsize = len(models) vx = TVectorD(vsize) vxsec = TVectorD(vsize) vex = TVectorD(vsize) vey = TVectorD(vsize) for i, m in enumerate(models): vx[i] = m.M vex[i] = 0.0 vxsec[i] = m.xsec vey[i] = m.xsecErr
def main(): from optparse import OptionParser parser = OptionParser() parser.add_option("-i", "--inputfile", dest="inputfile") parser.add_option("-o", "--ouputfile", dest="outputfile") parser.add_option("-b", "--batch", action="store_true",\ dest="isBatch", default=False) parser.add_option("--normalization", nargs=2,\ type="float", dest="norm_range") parser.add_option("--fit", nargs=2,\ type="float", dest="fit_range") (options, args) = parser.parse_args() isSaveOutput = options.outputfile is not None if not (options.inputfile): parser.error("Please specify inputfiles.") import configurations as config integrated_luminosity = config.integrated_luminosity if options.fit_range: fit_range = options.fit_range norm_range = (fit_range[1] - 200., fit_range[1]) else: fit_range = config.fit_range norm_range = config.norm_range # Override normalization range from input if options.norm_range: norm_range = options.norm_range from Styles import formatST, formatTemplate, formatUncertainty from ROOT import TFile, TF1, TH1D, TMath, TCanvas, TLegend,\ TGraphAsymmErrors, TVectorD, gStyle gStyle.SetPadTopMargin(0.05) gStyle.SetPadRightMargin(0.05) #gStyle.SetOptFit(2222222) #gStyle.SetStatX(0.95) #gStyle.SetStatY(0.95) gStyle.SetOptStat(0000000) gStyle.SetOptFit(0000000) from ROOT import TFile, TCanvas, TMultiGraph, TLegend, TPaveText #input file name infile = TFile(options.inputfile, "READ") from HistoStore import HistoStore store = HistoStore() canvas = HistoStore() print "Fit range: %d - %d GeV" % fit_range print "Normalization range: %d - %d GeV" % norm_range print "Integrated luminosity: %d inv. pb" % integrated_luminosity # Fit for N in config.exclusive_multiplicities: hST = infile.Get("plots%dJets/ST" % N) hST.GetXaxis().SetNdivisions(510) if not options.isBatch: c = TCanvas("TemplateN%d" % N, "TemplateN%d" % N, 500, 500) canvas.book(c) formatST(hST) hST.SetTitle("") hST.Draw("e") hST.GetXaxis().SetRangeUser(fit_range[0], config.maxST) hST.GetYaxis().SetRangeUser(1e-2, 2e4) hST.GetYaxis().SetTitleOffset(1.25) hST.GetYaxis().SetTitleSize(0.04) hST.GetYaxis().SetLabelSize(0.04) hST.GetXaxis().SetTitleSize(0.04) hST.GetXaxis().SetLabelSize(0.04) c.SetLogy(1) for i,formula in enumerate(config.templates): print "formula %d" % (i) if N == 2: f = TF1("templateN%d_%d" % (N, i), formula, 0, 10000) elif N == 3: f = store.get("templateN2_%d" % i).Clone("templateN%d_%d" % (N, i)) if i < 3: hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "QN0", "", 1800, 2800) hST.Fit(f, "N0", "", 1800, 2800) elif i == 0: hST.Fit(f, "Q0", "", fit_range[0], fit_range[1]) elif i > 2: hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) formatTemplate(f, N, i) store.book(f) if not options.isBatch: f.Draw("same") hTemplate = hST.Clone("histoTemplateN%d_%d" % (N,i)) hTemplate.Reset() hTemplate.Eval(f) formatTemplate(hTemplate, N, i) store.book(hTemplate) if i == 0: hRef = hTemplate.Clone("ReferenceTemplateN%d_0" % N) store.book(hRef) # Print Chi-squre/Ndof print "N = %d, Chi^2/Ndof = %0.2f/%d" %\ (N, f.GetChisquare(), f.GetNDF()) if not options.isBatch: c.Update() c.Print("TemplateN%d.pdf" % N) c.Print("TemplateN%d.png" % N) # Calculate scale/error from OptimizationTools import OptimizeScale for histoN, templateN in [[2,3]]: hST = store.get("ReferenceTemplateN%d_0" % histoN) hTemplate = store.get("ReferenceTemplateN%d_0" % templateN) hlnL, scale, err = OptimizeScale(hST, hTemplate, norm_range) hlnL.SetName("LogLikelihood_%dto%d" % (templateN, histoN)) store.book(hlnL) for i in range(len(config.templates)): hTemplate = store.get("histoTemplateN%d_%d" % (templateN, i)) hTemplate_ = hTemplate.Clone("histoTemplateN%d_%d__RescaledToN%d" % (templateN, i, histoN)) hTemplate_.Scale(scale) store.book(hTemplate_) # Shape Uncertainty hBkgTemplate = store.get("histoTemplateN2_0") hBkgTemplate.Rebin(config.rebin) nbins = hBkgTemplate.GetNbinsX() vST = TVectorD(nbins) vBkg = TVectorD(nbins) vexl = TVectorD(nbins) vexh = TVectorD(nbins) shape_el = TVectorD(nbins) shape_eh = TVectorD(nbins) rel_shape_el = TVectorD(nbins) rel_shape_eh = TVectorD(nbins) for i in range(nbins): vST[i] = hBkgTemplate.GetBinCenter(i+1) if (vST[i] < config.com): vBkg[i] = hBkgTemplate.GetBinContent(i+1) else: vBkg[i] = 0.0 vexl[i] = 0.0 vexh[i] = 0.0 shape_el[i] = 0.0 shape_eh[i] = 0.0 rel_shape_el[i] = 0.0 rel_shape_eh[i] = 0.0 for i in range(len(config.templates)): for label in ["histoTemplateN2_%d", "histoTemplateN3_%d__RescaledToN2"]: if label % i == "histoTemplateN2_0": continue h = store.get(label % i) h.Rebin(config.rebin) for ibin in range(nbins): diff = h.GetBinContent(ibin+1) - vBkg[ibin] if diff > 0 and diff > shape_eh[ibin]: shape_eh[ibin] = diff elif diff < 0 and abs(diff) > shape_el[ibin]: shape_el[ibin] = abs(diff) # Relative Shape Uncertaincy for i in range(nbins): if vBkg[i] > 0: #rel_shape_el[i] = rel_shape_el[i] / vBkg[i] #hape_eh[i] = rel_shape_eh[i] / vBkg[i] max_err = max(shape_el[i], shape_eh[i]) shape_el[i] = max_err shape_eh[i] = max_err rel_shape_el[i] = max_err /vBkg[i] rel_shape_eh[i] = max_err /vBkg[i] else: rel_shape_el[i] = 0.0 rel_shape_eh[i] = 0.0 #print vST[i], vBkg[i], rel_shape_el[i], rel_shape_eh[i] gShapeUncertainty = TGraphAsymmErrors(vST, vBkg, vexl, vexh, shape_el, shape_eh) gShapeUncertainty.SetName("Shape_Uncertainty") formatUncertainty(gShapeUncertainty) store.book(gShapeUncertainty) gRelShapeUncertainty = TGraphAsymmErrors(vST, vexl, vexl, vexh, rel_shape_el, rel_shape_eh) gRelShapeUncertainty.SetName("Relative_Shape_Uncertainty") formatUncertainty(gRelShapeUncertainty) store.book(gRelShapeUncertainty) # Generate Backgrouds for N in config.label_for_data: hST = infile.Get("plotsN%s/ST" % N) rel_scale_err2 = 0.0 scale_factor = 1.0 for Nref in config.label_for_ref: if N == Nref: continue template = store.get("ReferenceTemplateN%s_0" % Nref) hlnL, scale, err = OptimizeScale(hST, template, norm_range) hlnL.SetName("LogLikelihood_%sto%s" % (Nref, N)) store.book(hlnL) if Nref == "2": scale_factor = scale rel_scale_err2 += err/scale * err/scale print "%s/%s %.3f $\pm$ %.3f" % (N, Nref, scale, err) vy = TVectorD(nbins) veyh = TVectorD(nbins) veyl = TVectorD(nbins) for i in range(nbins): vy[i] = vBkg[i] * scale_factor veyh[i] = vy[i] * TMath.Sqrt(rel_scale_err2 + rel_shape_eh[i]*rel_shape_eh[i]) veyl[i] = vy[i] * TMath.Sqrt(rel_scale_err2 + rel_shape_el[i]*rel_shape_el[i]) print "Scaling uncertainty (%s): %.2f" %\ (N, TMath.sqrt(rel_scale_err2) * 100.0) gBkg = TGraphAsymmErrors(vST, vy, vexl, vexh, veyl, veyh) gBkg.SetName("BackgroundGraph_N%s" % N) formatUncertainty(gBkg) store.book(gBkg) hST.Rebin(config.rebin) hST.SetName("Data_N%s" % N) formatST(hST) store.book(hST) hBkg = hST.Clone("Background_N%s" % N) hBkg.Reset() store.book(hBkg) for i in range(nbins): ibin = hBkg.FindBin(vST[i]) hBkg.SetBinContent(ibin, vy[i]) hBkg.SetBinError(ibin, max(veyh[i], vexl[i])) from OptimizationTools import Integral hIntBkg = hBkg.Clone("IntegralBackground_N%s" % N) Integral(hIntBkg) store.book(hIntBkg) hIntData = hST.Clone("IntegralData_N%s" % N) Integral(hIntData) store.book(hIntData) # Plot Shape Uncertainty if not options.isBatch: legend_shape = TLegend(0.4244355,0.4241525,0.9395968,0.8652542) legend_shape.SetTextFont(42) legend_shape.SetFillColor(0) legend_shape.SetLineColor(0) legend_shape.SetTextSize(0.036) c = TCanvas("ShapeUncertaintyN2", "ShapeUncertaintyN2", 500, 500) canvas.book(c) gShapeUncertainty.Draw("AC3") gShapeUncertainty.GetXaxis().SetNdivisions(510) gShapeUncertainty.GetXaxis().SetRangeUser(fit_range[0], config.maxST) gShapeUncertainty.GetYaxis().SetRangeUser(5e-2, 5e6) legend_shape.AddEntry(store.get("Data_N2"), "Data (N = 2)", "p") legend_shape.AddEntry(gShapeUncertainty, "Shape Uncertainty", "f") for i in range(len(config.templates)): for label in ["histoTemplateN2_%d", "histoTemplateN3_%d__RescaledToN2"]: h = store.get(label % i) h.GetXaxis().SetRangeUser(fit_range[0], config.maxST) h.Draw("histcsame") h.GetXaxis().SetNdivisions(510) if label == "histoTemplateN2_%d": N = 2 else: N = 3 legend_shape.AddEntry(h, "Parameterization %d (N = %d)" % (i, N), "l") store.get("Data_N2").Draw("esame") cmslabel = TPaveText(0.45,0.90,0.60,0.93,"brNDC") cmslabel.AddText(config.cmsTitle) #cmslabel.AddText(config.cmsSubtitle) cmslabel.SetFillColor(0) cmslabel.SetTextSize(0.041) cmslabel.Draw("plain") c.SetLogy(1) legend_shape.Draw("plain") c.Update() c.Print("ShapeUncertaintyN2.pdf") c.Print("ShapeUncertaintyN2.png") if isSaveOutput: store.saveAs(options.outputfile) if not options.isBatch: raw_input("Press Enter to continue...")
def main(): from optparse import OptionParser parser = OptionParser() parser.add_option("-i", "--inputfile", dest="inputfile") parser.add_option("-o", "--ouputfile", dest="outputfile") parser.add_option("-b", "--batch", action="store_true",\ dest="isBatch", default=False) parser.add_option("--normalization", nargs=2,\ type="float", dest="norm_range") parser.add_option("--fit", nargs=2,\ type="float", dest="fit_range") (options, args) = parser.parse_args() isSaveOutput = options.outputfile is not None if not (options.inputfile): parser.error("Please specify inputfiles.") import configurations as config if options.fit_range: fit_range = options.fit_range norm_range = (fit_range[1] - 200., fit_range[1]) else: fit_range = config.fit_range norm_range = config.norm_range # Override normalization range from input if options.norm_range: norm_range = options.norm_range from Styles import formatST, formatTemplate, formatUncertainty from ROOT import TFile, TF1, TH1D, TMath, TCanvas, TLegend,\ TGraphAsymmErrors, TVectorD, gStyle gStyle.SetPadTopMargin(0.05) gStyle.SetPadRightMargin(0.05) #gStyle.SetOptFit(1111111) #input file name infile = TFile(options.inputfile, "READ") from HistoStore import HistoStore store = HistoStore() canvas = HistoStore() print "Fit range: %d - %d GeV" % fit_range print "Normalization range: %d - %d GeV" % norm_range # Fit for N in config.exclusive_multiplicities: hST = infile.Get("plots%dJets/ST" % N) if not options.isBatch: c = TCanvas("TemplateN%d" % N, "TemplateN%d" % N, 500, 500) canvas.book(c) formatST(hST) hST.Draw("e") hST.GetXaxis().SetRangeUser(fit_range[0], config.maxST) hST.GetYaxis().SetRangeUser(1e-2, 2e4) hST.GetYaxis().SetTitleOffset(1.2) c.SetLogy(1) for i, formula in enumerate(config.templates): if N == 2: f = TF1("templateN%d_%d" % (N, i), formula, 0, 10000) elif N == 3: f = store.get("templateN2_%d" % i).Clone("templateN%d_%d" % (N, i)) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) hST.Fit(f, "QN0", "", fit_range[0], fit_range[1]) if i == 0: hST.Fit(f, "Q0", "", fit_range[0], fit_range[1]) formatTemplate(f, N, i) store.book(f) if not options.isBatch: f.Draw("same") hTemplate = hST.Clone("histoTemplateN%d_%d" % (N, i)) hTemplate.Reset() hTemplate.Eval(f) formatTemplate(hTemplate, N, i) store.book(hTemplate) if i == 0: hRef = hTemplate.Clone("ReferenceTemplateN%d_0" % N) store.book(hRef) # Print Chi-squre/Ndof print "N = %d, Chi^2/Ndof = %0.2f/%d" %\ (N, f.GetChisquare(), f.GetNDF()) if not options.isBatch: c.Update() # Calculate scale/error from OptimizationTools import OptimizeScale for histoN, templateN in [[2, 3]]: hST = store.get("ReferenceTemplateN%d_0" % histoN) hTemplate = store.get("ReferenceTemplateN%d_0" % templateN) hlnL, scale, err = OptimizeScale(hST, hTemplate, norm_range) hlnL.SetName("LogLikelihood_%dto%d" % (templateN, histoN)) store.book(hlnL) for i in range(len(config.templates)): hTemplate = store.get("histoTemplateN%d_%d" % (templateN, i)) hTemplate_ = hTemplate.Clone("histoTemplateN%d_%d__RescaledToN%d" % (templateN, i, histoN)) hTemplate_.Scale(scale) store.book(hTemplate_) # Shape Uncertainty hBkgTemplate = store.get("histoTemplateN2_0") hBkgTemplate.Rebin(config.rebin) nbins = hBkgTemplate.GetNbinsX() vST = TVectorD(nbins) vBkg = TVectorD(nbins) vexl = TVectorD(nbins) vexh = TVectorD(nbins) shape_el = TVectorD(nbins) shape_eh = TVectorD(nbins) rel_shape_el = TVectorD(nbins) rel_shape_eh = TVectorD(nbins) for i in range(nbins): vST[i] = hBkgTemplate.GetBinCenter(i + 1) if (vST[i] < config.com): vBkg[i] = hBkgTemplate.GetBinContent(i + 1) else: vBkg[i] = 0.0 vexl[i] = 0.0 vexh[i] = 0.0 shape_el[i] = 0.0 shape_eh[i] = 0.0 rel_shape_el[i] = 0.0 rel_shape_eh[i] = 0.0 for i in range(len(config.templates)): for label in [ "histoTemplateN2_%d", "histoTemplateN3_%d__RescaledToN2" ]: if label % i == "histoTemplateN2_0": continue h = store.get(label % i) h.Rebin(config.rebin) for ibin in range(nbins): diff = h.GetBinContent(ibin + 1) - vBkg[ibin] if diff > 0 and diff > shape_eh[ibin]: shape_eh[ibin] = diff elif diff < 0 and abs(diff) > shape_el[ibin]: shape_el[ibin] = abs(diff) # Relative Shape Uncertaincy for i in range(nbins): if vBkg[i] > 0: #rel_shape_el[i] = rel_shape_el[i] / vBkg[i] #hape_eh[i] = rel_shape_eh[i] / vBkg[i] max_err = max(shape_el[i], shape_eh[i]) shape_el[i] = max_err shape_eh[i] = max_err rel_shape_el[i] = max_err / vBkg[i] rel_shape_eh[i] = max_err / vBkg[i] else: rel_shape_el[i] = 0.0 rel_shape_eh[i] = 0.0 #print vST[i], vBkg[i], rel_shape_el[i], rel_shape_eh[i] gShapeUncertainty = TGraphAsymmErrors(vST, vBkg, vexl, vexh, shape_el, shape_eh) gShapeUncertainty.SetName("Shape_Uncertainty") formatUncertainty(gShapeUncertainty) store.book(gShapeUncertainty) gRelShapeUncertainty = TGraphAsymmErrors(vST, vexl, vexl, vexh, rel_shape_el, rel_shape_eh) gRelShapeUncertainty.SetName("Relative_Shape_Uncertainty") formatUncertainty(gRelShapeUncertainty) store.book(gRelShapeUncertainty) # Generate Backgrouds for N in config.label_for_data: hST = infile.Get("plotsN%s/ST" % N) rel_scale_err2 = 0.0 scale_factor = 1.0 for Nref in config.label_for_ref: if N == Nref: continue template = store.get("ReferenceTemplateN%s_0" % Nref) hlnL, scale, err = OptimizeScale(hST, template, norm_range) hlnL.SetName("LogLikelihood_%sto%s" % (Nref, N)) store.book(hlnL) if Nref == "2": scale_factor = scale rel_scale_err2 += err / scale * err / scale print "%s/%s %.3f +/- %.3f" % (N, Nref, scale, err) vy = TVectorD(nbins) veyh = TVectorD(nbins) veyl = TVectorD(nbins) for i in range(nbins): vy[i] = vBkg[i] * scale_factor veyh[i] = vy[i] * TMath.Sqrt(rel_scale_err2 + rel_shape_eh[i] * rel_shape_eh[i]) veyl[i] = vy[i] * TMath.Sqrt(rel_scale_err2 + rel_shape_el[i] * rel_shape_el[i]) print "Scaling uncertainty (%s): %.2f" %\ (N, TMath.sqrt(rel_scale_err2) * 100.0) gBkg = TGraphAsymmErrors(vST, vy, vexl, vexh, veyl, veyh) gBkg.SetName("BackgroundGraph_N%s" % N) formatUncertainty(gBkg) store.book(gBkg) hST.Rebin(config.rebin) hST.SetName("Data_N%s" % N) formatST(hST) store.book(hST) hBkg = hST.Clone("Background_N%s" % N) hBkg.Reset() store.book(hBkg) for i in range(nbins): ibin = hBkg.FindBin(vST[i]) hBkg.SetBinContent(ibin, vy[i]) hBkg.SetBinError(ibin, max(veyh[i], vexl[i])) from OptimizationTools import Integral hIntBkg = hBkg.Clone("IntegralBackground_N%s" % N) Integral(hIntBkg) store.book(hIntBkg) hIntData = hST.Clone("IntegralData_N%s" % N) Integral(hIntData) store.book(hIntData) # Plot Shape Uncertainty if not options.isBatch: legend_shape = TLegend(0.5544355, 0.5741525, 0.9395968, 0.9152542) legend_shape.SetTextFont(42) legend_shape.SetFillColor(0) legend_shape.SetLineColor(0) c = TCanvas("ShapeUncertaintyN2", "ShapeUncertaintyN2", 500, 500) canvas.book(c) gShapeUncertainty.Draw("AC3") gShapeUncertainty.GetXaxis().SetRangeUser(fit_range[0], config.maxST) gShapeUncertainty.GetYaxis().SetRangeUser(5e-2, 1.2e6) legend_shape.AddEntry(store.get("Data_N2"), "Data (N = 2)", "p") legend_shape.AddEntry(gShapeUncertainty, "Shape Uncertainty", "f") for i in range(len(config.templates)): for label in [ "histoTemplateN2_%d", "histoTemplateN3_%d__RescaledToN2" ]: h = store.get(label % i) h.GetXaxis().SetRangeUser(fit_range[0], config.maxST) h.Draw("histcsame") if label == "histoTemplateN2_%d": N = 2 else: N = 3 legend_shape.AddEntry(h, "Parametrization %d (N = %d)" % (i, N), "l") store.get("Data_N2").Draw("esame") c.SetLogy(1) legend_shape.Draw("plain") c.Update() if isSaveOutput: store.saveAs(options.outputfile) if not options.isBatch: raw_input("Press Enter to continue...")
import csv from ModelParser import ModelParser, ModelGroup models = [] with open("work/ModelXsecLimits.txt", "rb") as f: reader = csv.reader(f, delimiter=" ", skipinitialspace=True) for row in reader: model = ModelParser(row) models.append(model) from ROOT import TFile, TVectorD, TGraph from Styles import formatXsecCL from HistoStore import HistoStore store = HistoStore() group = ModelGroup(models) for generator, group_generator in group.items(): texfile = open("table_content-%s.tex" % generator, "w") for n, group_n in group_generator.items(): for icolor, (MD, models) in enumerate(group_n.items()): vsize = len(models) vx = TVectorD(vsize) vxsec = TVectorD(vsize) vcl95 = TVectorD(vsize) vcla = TVectorD(vsize) for i, m in enumerate(models): vx[i] = m.M vxsec[i] = m.xsec vcl95[i] = m.cl95
from ModelParser import XsecParser, ModelGroup import csv from ROOT import TGraphErrors, TVectorD from HistoStore import HistoStore store = HistoStore() models = [] with open("xsec_tmp2.txt", "rb") as f: reader = csv.reader(f, delimiter=" ", skipinitialspace=True) for row in reader: model =XsecParser(row) models.append(model) group = ModelGroup(models) for generator, group_generator in group.items(): for n, group_n in group_generator.items(): for icolor, (MD, models) in enumerate(group_n.items()): vsize = len(models) vx = TVectorD(vsize) vxsec = TVectorD(vsize) vex = TVectorD(vsize) vey = TVectorD(vsize) for i,m in enumerate(models): vx[i] = m.M vex[i] = 0.0 vxsec[i] = m.xsec vey[i] = m.xsecErr
import csv from ModelParser import ModelParser, ModelGroup models = [] with open("work/ModelXsecLimits.txt", "rb") as f: reader = csv.reader(f, delimiter=" ", skipinitialspace=True) for row in reader: model = ModelParser(row) models.append(model) from ROOT import TFile, TVectorD, TGraph from Styles import formatXsecCL from HistoStore import HistoStore store = HistoStore() group = ModelGroup(models) for generator, group_generator in group.items(): texfile = open("table_content-%s.tex" % generator, "w") for n, group_n in group_generator.items(): for icolor, (MD, models) in enumerate(group_n.items()): vsize = len(models) vx = TVectorD(vsize) vxsec = TVectorD(vsize) vcl95 = TVectorD(vsize) vcla = TVectorD(vsize) for i,m in enumerate(models): vx[i] = m.M vxsec[i] = m.xsec