def rewtOneHist(dataset, hwts): fileName = condor_dir + "/" + dataset + ".root" if not os.path.exists(fileName): print "WARNING: didn't find ", fileName return print "About to reweight histogram in " + fileName inFile = TFile(fileName, "UPDATE") if inFile.IsZombie() or not inFile.GetNkeys(): return inFile.cd() h = inFile.Get(str(arguments.histToBeReWeighted)).Clone() if not h: print " Could not find hist named " + arguments.histToBeReWeighted + " in " + inFile.GetName( ) return h.SetDirectory(0) newName = h.GetName() + str(arguments.suffixRename) h.SetName(newName) dir = arguments.histToBeReWeighted dir = dir[:dir.rfind("/")] print "Will write hist to directory " + dir inFile.cd(dir) tdir = inFile.GetDirectory(dir) tdir.Delete(newName + ";*") for i in range(1, h.GetNbinsX() + 1): val = h.GetBinContent(i) err = h.GetBinError(i) binCtr = h.GetBinCenter(i) wt = hwts.GetBinContent(hwts.FindBin(binCtr)) h.SetBinContent(i, val * wt) h.SetBinError(i, err * wt) h.Write() inFile.Close()
from ROOT import TFile, gROOT, gDirectory, TH1, TH2, TH3, TIter, TKey gROOT.SetBatch() outputFile = TFile(condor_dir + "/" + outputFileName, "RECREATE") channels = [] processed_datasets = [] #### check which input datasets have valid output files for sample in datasets: fileName = qcd_dir + "/" + sample + ".root" if not os.path.exists(fileName): print fileName, "does not exist" continue testFile = TFile(fileName) if testFile.IsZombie() or not testFile.GetNkeys(): continue processed_datasets.append(sample) if len(processed_datasets) is 0: sys.exit("No datasets have been processed") #### open first input file and re-make its directory structure in the output file testFile = TFile(qcd_dir + "/" + processed_datasets[0] + ".root") testFile.cd() for key in testFile.GetListOfKeys(): if (key.GetClassName() != "TDirectoryFile"): continue outputFile.cd() outputFile.mkdir(key.GetName()) rootDirectory = key.GetName()
def GetFittedQCDYieldAndError(pathToDir, distribution): BackgroundHistograms = [] fileName = condor_dir + "/" + data_dataset + ".root" if not os.path.exists(fileName): return 0 inputFile = TFile(fileName) if inputFile.IsZombie() or not inputFile.GetNkeys(): return 0 TargetHistogram = inputFile.Get("OSUAnalysis/" + region_names['A'] + "/" + distribution['name']).Clone() TargetHistogram.SetDirectory(0) QCDHistogram = inputFile.Get("OSUAnalysis/" + region_names['C'] + "/" + distribution['name']).Clone() QCDHistogram.SetDirectory(0) BackgroundHistograms.append(QCDHistogram) CutFlowHistogram = inputFile.Get("OSUAnalysis/" + region_names['C'] + "CutFlow") QCDInputYield = CutFlowHistogram.GetBinContent( CutFlowHistogram.GetNbinsX()) inputFile.Close() for sample in fitting_backgrounds: # loop over different samples that get held constant in the fit dataset_file = "%s/%s.root" % (condor_dir, sample) inputFile = TFile(dataset_file) HistogramObj = inputFile.Get(pathToDir + "/" + region_names['A'] + "/" + distribution['name']) if not HistogramObj: print "WARNING: Could not find histogram " + pathToDir + "/" + distribution[ 'name'] + " in file " + dataset_file + ". Will skip it and continue." continue Histogram = HistogramObj.Clone() Histogram.SetDirectory(0) inputFile.Close() BackgroundHistograms.append(Histogram) nBackgrounds = len(BackgroundHistograms) def fitf(x, par): xBin = BackgroundHistograms[0].FindBin(x[0]) value = 0.0 # create the fit function to be used, with one parameter for each yield and one parameter for the error (to be set to -1,0,1 for varying by +-1 sigma) for i in range(0, len(BackgroundHistograms)): value += par[i] * BackgroundHistograms[i].GetBinContent( xBin) + par[i + len(BackgroundHistograms )] * BackgroundHistograms[i].GetBinError(xBin) return value if distribution.has_key('lowerLimit'): lowerLimit = distribution['lowerLimit'] else: lowerLimit = TargetHistogram.GetBinLowEdge(1) if distribution.has_key('upperLimit'): upperLimit = distribution['upperLimit'] else: upperLimit = TargetHistogram.GetBinLowEdge( TargetHistogram.GetNbinsX()) + TargetHistogram.GetBinWidth( TargetHistogram.GetNbinsX()) func = TF1("fit", fitf, lowerLimit, upperLimit, 2 * (nBackgrounds)) # initialize QCD scale factor parameter func.SetParameter(0, 1.0) func.SetParName(0, 'QCD_ScaleFactor') # initialize the other backgrounds that are held constant for i in range(1, nBackgrounds): func.FixParameter(i, 1.0) nameString = "background_" + str(i) func.SetParName(i, nameString) # shift each constant background component up and down by 1 sigma, refit and save new yields parErrorRanges = [] for i in range(1, len(BackgroundHistograms)): for j in [-1, 1]: for k in range(len(BackgroundHistograms), 2 * len(BackgroundHistograms)): func.FixParameter(k, 0) func.FixParameter(i + len(BackgroundHistograms), j) for k in range(0, 9): if j == -1: print "Scale down " + func.GetParName( i) + " iteration " + str(k + 1) + "..." if j == 1: print "Scale up " + func.GetParName( i) + " iteration " + str(k + 1) + "..." TargetHistogram.Fit("fit", "QEMR0") TargetHistogram.Fit("fit", "QEMR0") # add the new QCD yields to the list of errors parErrorRanges.append(func.GetParameter(0)) # get the QCD yield for the central values of the background histograms for i in range(nBackgrounds, 2 * (nBackgrounds)): func.FixParameter(i, 0) for i in range(0, 9): print "Iteration " + str(i + 1) + "..." TargetHistogram.Fit("fit", "QEMR0") TargetHistogram.Fit("fit", "QEMR0") # take average of the deviations from the central value scaleDown = parErrorRanges[0] scaleUp = parErrorRanges[1] parError = (abs(scaleUp - scaleDown)) / 2 yieldAndError = [] yieldAndError.append(func.GetParameter(0) * QCDInputYield) yieldAndError.append(parError) return yieldAndError
def MakeOneDHist(pathToDir, distribution): numFittingSamples = 0 HeaderLabel = TPaveLabel(header_x_left, header_y_bottom, header_x_right, header_y_top, HeaderText, "NDC") HeaderLabel.SetTextAlign(32) HeaderLabel.SetBorderSize(0) HeaderLabel.SetFillColor(0) HeaderLabel.SetFillStyle(0) LumiLabel = TPaveLabel(topLeft_x_left, topLeft_y_bottom, topLeft_x_right, topLeft_y_top, LumiText, "NDC") LumiLabel.SetBorderSize(0) LumiLabel.SetFillColor(0) LumiLabel.SetFillStyle(0) NormLabel = TPaveLabel() NormLabel.SetDrawOption("NDC") NormLabel.SetX1NDC(topLeft_x_left) NormLabel.SetX2NDC(topLeft_x_right) NormLabel.SetBorderSize(0) NormLabel.SetFillColor(0) NormLabel.SetFillStyle(0) NormText = "" if arguments.normalizeToUnitArea: NormText = "Scaled to unit area" elif arguments.normalizeToData: NormText = "MC scaled to data" NormLabel.SetLabel(NormText) YieldsLabel = TPaveText(0.39, 0.7, 0.59, 0.9, "NDC") YieldsLabel.SetBorderSize(0) YieldsLabel.SetFillColor(0) YieldsLabel.SetFillStyle(0) YieldsLabel.SetTextAlign(12) RatiosLabel = TPaveText() RatiosLabel.SetDrawOption("NDC") RatiosLabel.SetBorderSize(0) RatiosLabel.SetFillColor(0) RatiosLabel.SetFillStyle(0) RatiosLabel.SetTextAlign(32) Legend = TLegend() Legend.SetBorderSize(0) Legend.SetFillColor(0) Legend.SetFillStyle(0) fittingIntegral = 0 scaleFactor = 1 HistogramsToFit = [] TargetDataset = distribution['target_dataset'] FittingLegendEntries = [] DataLegendEntries = [] FittingHistogramDatasets = [] Stack_list = [] Stack_list.append(THStack("stack_before", distribution['name'])) Stack_list.append(THStack("stack_after", distribution['name'])) fileName = condor_dir + "/" + distribution['target_dataset'] + ".root" if not os.path.exists(fileName): return inputFile = TFile(fileName) if inputFile.IsZombie() or not inputFile.GetNkeys(): return Target = inputFile.Get("OSUAnalysis/" + distribution['channel'] + "/" + distribution['name']).Clone() Target.SetDirectory(0) inputFile.Close() Target.SetMarkerStyle(20) Target.SetMarkerSize(0.8) Target.SetFillStyle(0) Target.SetLineColor(colors[TargetDataset]) Target.SetLineStyle(1) Target.SetLineWidth(2) targetIntegral = Target.Integral() if (arguments.normalizeToUnitArea and Target.Integral() > 0): Target.Scale(1. / Target.Integral()) if arguments.rebinFactor: RebinFactor = int(arguments.rebinFactor) #don't rebin histograms which will have less than 5 bins or any gen-matching histograms if Target.GetNbinsX() >= RebinFactor * 5 and Target.GetName().find( "GenMatch") is -1: Target.Rebin(RebinFactor) ### formatting target histogram and adding to legend legendIndex = 0 Legend.AddEntry(Target, labels[TargetDataset], "LEP") legendIndex = legendIndex + 1 if not outputFile.Get("OSUAnalysis"): outputFile.mkdir("OSUAnalysis") if not outputFile.Get("OSUAnalysis/" + distribution['channel']): outputFile.Get("OSUAnalysis").mkdir(distribution['channel']) for sample in distribution[ 'datasets']: # loop over different samples requested to be fit dataset_file = "%s/%s.root" % (condor_dir, sample) inputFile = TFile(dataset_file) HistogramObj = inputFile.Get(pathToDir + "/" + distribution['channel'] + "/" + distribution['name']) if not HistogramObj: print "WARNING: Could not find histogram " + pathToDir + "/" + distribution[ 'channel'] + "/" + distribution[ 'name'] + " in file " + dataset_file + ". Will skip it and continue." continue Histogram = HistogramObj.Clone() Histogram.SetDirectory(0) inputFile.Close() if arguments.rebinFactor: RebinFactor = int(arguments.rebinFactor) #don't rebin histograms which will have less than 5 bins or any gen-matching histograms if Histogram.GetNbinsX() >= RebinFactor * 5 and Histogram.GetName( ).find("GenMatch") is -1: Histogram.Rebin(RebinFactor) xAxisLabel = Histogram.GetXaxis().GetTitle() unitBeginIndex = xAxisLabel.find("[") unitEndIndex = xAxisLabel.find("]") if unitBeginIndex is not -1 and unitEndIndex is not -1: #x axis has a unit yAxisLabel = "Entries / " + str(Histogram.GetXaxis().GetBinWidth( 1)) + " " + xAxisLabel[unitBeginIndex + 1:unitEndIndex] else: yAxisLabel = "Entries per bin (" + str( Histogram.GetXaxis().GetBinWidth(1)) + " width)" if not arguments.makeFancy: histoTitle = Histogram.GetTitle() else: histoTitle = "" legLabel = labels[sample] if (arguments.printYields): yieldHist = Histogram.Integral() legLabel = legLabel + " (%.1f)" % yieldHist FittingLegendEntries.append(legLabel) if (types[sample] == "bgMC"): numFittingSamples += 1 fittingIntegral += Histogram.Integral() Histogram.SetLineStyle(1) if (arguments.noStack): Histogram.SetFillStyle(0) Histogram.SetLineColor(colors[sample]) Histogram.SetLineWidth(2) else: Histogram.SetFillStyle(1001) Histogram.SetFillColor(colors[sample]) Histogram.SetLineColor(1) Histogram.SetLineWidth(1) elif (types[sample] == "signalMC"): numFittingSamples += 1 Histogram.SetFillStyle(0) Histogram.SetLineColor(colors[sample]) Histogram.SetLineStyle(1) Histogram.SetLineWidth(2) if (arguments.normalizeToUnitArea and Histogram.Integral() > 0): Histogram.Scale(1. / Histogram.Integral()) HistogramsToFit.append(Histogram) FittingHistogramDatasets.append(sample) #scaling histograms as per user's specifications if targetIntegral > 0 and fittingIntegral > 0: scaleFactor = targetIntegral / fittingIntegral for fittingHist in HistogramsToFit: if arguments.normalizeToData: fittingHist.Scale(scaleFactor) if arguments.normalizeToUnitArea and not arguments.noStack and fittingIntegral > 0: fittingHist.Scale(1. / fittingIntegral) elif arguments.normalizeToUnitArea and arguments.noStack and fittingHist.Integral( ) > 0: fittingHist.Scale(1. / fittingHist.Integral()) def fitf(x, par): xBin = HistogramsToFit[0].FindBin(x[0]) value = 0.0 for i in range(0, len(HistogramsToFit)): value += par[i] * HistogramsToFit[i].GetBinContent(xBin) + par[ i + len(HistogramsToFit)] * HistogramsToFit[i].GetBinError(xBin) return value lowerLimit = Target.GetBinLowEdge(1) upperLimit = Target.GetBinLowEdge(Target.GetNbinsX()) + Target.GetBinWidth( Target.GetNbinsX()) if 'lowerLimit' in distribution: lowerLimit = distribution['lowerLimit'] if 'upperLimit' in distribution: upperLimit = distribution['upperLimit'] func = TF1("fit", fitf, lowerLimit, upperLimit, 2 * len(HistogramsToFit)) for i in range(0, len(HistogramsToFit)): if 'fixed_datasets' in distribution and distribution['datasets'][ i] in distribution['fixed_datasets']: func.FixParameter(i, 1.0) else: func.SetParameter(i, 1.0) # func.SetParLimits (i, 0.0, 1.0e2) # comment this out so we don't have to pre-normalize the QCD input sample func.SetParName(i, labels[FittingHistogramDatasets[i]]) shiftedScaleFactors = [] if arguments.parametricErrors: # loop over all input histograms and shift them +- 1 sigma for i in range(0, len(HistogramsToFit)): sfs = [] # -1 => -1 sigma, +1 => +1 sigma for j in [-1, 1]: # loop over the parameters holding the errors for each dataset, fixing all to 0 for k in range(len(HistogramsToFit), 2 * len(HistogramsToFit)): func.FixParameter(k, 0) # fix the error of the dataset of interest to +-1 func.FixParameter(i + len(HistogramsToFit), j) # perform new fit for k in range(0, distribution['iterations'] - 1): if j == -1: print "Scale down " + labels[FittingHistogramDatasets[ i]] + " iteration " + str(k + 1) + "..." if j == 1: print "Scale up " + labels[FittingHistogramDatasets[ i]] + " iteration " + str(k + 1) + "..." Target.Fit("fit", "QEMR0") Target.Fit("fit", "VEMR0") # save the new scale factors for each dataset for k in range(0, len(HistogramsToFit)): sfs.append(func.GetParameter(k)) shiftedScaleFactors.append(sfs) # reset the parameters with the errors of each dataset to 0 for i in range(len(HistogramsToFit), 2 * len(HistogramsToFit)): func.FixParameter(i, 0) # do the fit to get the central values for i in range(0, distribution['iterations'] - 1): print "Iteration " + str(i + 1) + "..." Target.Fit("fit", "QEMR0") Target.Fit("fit", "VEMR0") if arguments.parametricErrors: # make a list of the largest errors on each contribution by shifting any other contribution parErrors = [] # loop over all the datasets for i in range(0, len(HistogramsToFit)): centralValue = func.GetParameter(i) maxError = 0 # find the maximum deviation from the central value and save that for shiftedScaleFactor in shiftedScaleFactors[i]: currentError = abs(shiftedScaleFactor - centralValue) if currentError > maxError: maxError = currentError parErrors.append(maxError) finalMax = 0 if not arguments.noStack: for fittingHist in HistogramsToFit: finalMax += fittingHist.GetMaximum() else: for fittingHist in HistogramsToFit: if (fittingHist.GetMaximum() > finalMax): finalMax = fittingHist.GetMaximum() if (Target.GetMaximum() > finalMax): finalMax = Target.GetMaximum() Target.SetMaximum(1.1 * finalMax) Target.SetMinimum(0.0001) Canvas = TCanvas(distribution['name'] + "_FitFunction") Canvas.cd(1) Target.Draw() func.Draw("same") outputFile.cd("OSUAnalysis/" + distribution['channel']) Canvas.Write() if arguments.savePDFs: if histogram == input_histograms[0]: Canvas.Print(pdfFileName + "(", "pdf") else: Canvas.Print(pdfFileName, "pdf") Target.SetStats(0) ### formatting bgMC histograms and adding to legend legendIndex = numFittingSamples - 1 for Histogram in reversed(HistogramsToFit): if (arguments.noStack): Legend.AddEntry(Histogram, FittingLegendEntries[legendIndex], "L") else: Legend.AddEntry(Histogram, FittingLegendEntries[legendIndex], "F") legendIndex = legendIndex - 1 ### Drawing histograms to canvas makeRatioPlots = arguments.makeRatioPlots makeDiffPlots = arguments.makeDiffPlots yAxisMin = 0.0001 if arguments.setYMin: yAxisMin = float(arguments.setYMin) ### Draw everything to the canvases !!!! for i in range(0, 2): # 0 => before, 1 => after integrals = [] ratios = [] errors = [] if i == 1: # loop over each dataset, saving it's yield and the errors on it for j in range(0, len(HistogramsToFit)): integrals.append(HistogramsToFit[j].Integral()) HistogramsToFit[j].Scale(func.GetParameter(j)) ratios.append(func.GetParameter(j)) errors.append(func.GetParError(j)) for fittingHist in HistogramsToFit: if not arguments.noStack: Stack_list[i].Add(fittingHist) #creating the histogram to represent the statistical errors on the stack if not arguments.noStack: ErrorHisto = HistogramsToFit[0].Clone("errors") ErrorHisto.SetFillStyle(3001) ErrorHisto.SetFillColor(13) ErrorHisto.SetLineWidth(0) if i == 1: Legend.AddEntry(ErrorHisto, "Stat. Errors", "F") for Histogram in HistogramsToFit: if Histogram is not HistogramsToFit[0]: ErrorHisto.Add(Histogram) if i == 0: Canvas = TCanvas(distribution['name'] + "_Before") if i == 1: Canvas = TCanvas(distribution['name'] + "_After") if makeRatioPlots or makeDiffPlots: Canvas.SetFillStyle(0) Canvas.Divide(1, 2) Canvas.cd(1) gPad.SetPad(0, 0.25, 1, 1) gPad.SetMargin(0.15, 0.05, 0.01, 0.07) gPad.SetFillStyle(0) gPad.Update() gPad.Draw() if arguments.setLogY: gPad.SetLogy() Canvas.cd(2) gPad.SetPad(0, 0, 1, 0.25) # format: gPad.SetMargin(l,r,b,t) gPad.SetMargin(0.15, 0.05, 0.4, 0.01) gPad.SetFillStyle(0) gPad.SetGridy(1) gPad.Update() gPad.Draw() Canvas.cd(1) ### finding the maximum value of anything going on the canvas, so we know how to set the y-axis finalMax = 0 if numFittingSamples is not 0 and not arguments.noStack: finalMax = ErrorHisto.GetMaximum() + ErrorHisto.GetBinError( ErrorHisto.GetMaximumBin()) else: for bgMCHist in HistogramsToFit: if (bgMCHist.GetMaximum() > finalMax): finalMax = bgMCHist.GetMaximum() if (Target.GetMaximum() > finalMax): finalMax = Target.GetMaximum() + Target.GetBinError( Target.GetMaximumBin()) finalMax = 1.15 * finalMax if arguments.setYMax: finalMax = float(arguments.setYMax) if not arguments.noStack: # draw stacked background samples Stack_list[i].SetTitle(histoTitle) Stack_list[i].Draw("HIST") Stack_list[i].GetXaxis().SetTitle(xAxisLabel) Stack_list[i].GetYaxis().SetTitle(yAxisLabel) Stack_list[i].SetMaximum(finalMax) Stack_list[i].SetMinimum(yAxisMin) if makeRatioPlots or makeDiffPlots: Stack_list[i].GetHistogram().GetXaxis().SetLabelSize(0) #draw shaded error bands ErrorHisto.Draw("A E2 SAME") else: #draw the unstacked backgrounds HistogramsToFit[0].SetTitle(histoTitle) HistogramsToFit[0].Draw("HIST") HistogramsToFit[0].GetXaxis().SetTitle(xAxisLabel) HistogramsToFit[0].GetYaxis().SetTitle(yAxisLabel) HistogramsToFit[0].SetMaximum(finalMax) HistogramsToFit[0].SetMinimum(yAxisMin) for bgMCHist in HistogramsToFit: bgMCHist.Draw("A HIST SAME") Target.Draw("A E X0 SAME") #legend coordinates, empirically determined :-) x_left = 0.6761745 x_right = 0.9328859 x_width = x_right - x_left y_max = 0.9 entry_height = 0.05 if (numFittingSamples is not 0): #then draw the data & bgMC legend numExtraEntries = 2 # count the target and (lack of) title Legend.SetX1NDC(x_left) numExtraEntries = numExtraEntries + 1 # count the stat. errors entry Legend.SetY1NDC(y_max - entry_height * (numExtraEntries + numFittingSamples)) Legend.SetX2NDC(x_right) Legend.SetY2NDC(y_max) Legend.Draw() RatiosLabel.SetX1NDC(x_left - 0.1) RatiosLabel.SetX2NDC(x_right) RatiosLabel.SetY2NDC(Legend.GetY1NDC() - 0.1) RatiosLabel.SetY1NDC(RatiosLabel.GetY2NDC() - entry_height * (numFittingSamples)) # Deciding which text labels to draw and drawing them drawLumiLabel = False drawNormLabel = False offsetNormLabel = False drawHeaderLabel = False if not arguments.normalizeToUnitArea: #don't draw the lumi label if there's no data and it's scaled to unit area drawLumiLabel = True # move the normalization label down before drawing if we drew the lumi. label offsetNormLabel = True if arguments.normalizeToUnitArea or arguments.normalizeToData: drawNormLabel = True if arguments.makeFancy: drawHeaderLabel = True drawLumiLabel = False # now that flags are set, draw the appropriate labels if drawLumiLabel: LumiLabel.Draw() if drawNormLabel: if offsetNormLabel: NormLabel.SetY1NDC(topLeft_y_bottom - topLeft_y_offset) NormLabel.SetY2NDC(topLeft_y_top - topLeft_y_offset) else: NormLabel.SetY1NDC(topLeft_y_bottom) NormLabel.SetY2NDC(topLeft_y_top) NormLabel.Draw() if drawHeaderLabel: HeaderLabel.Draw() YieldsLabel.Clear() mcYield = Stack_list[i].GetStack().Last().Integral() dataYield = Target.Integral() if i == 0: YieldsLabel.AddText("Before Fit to Data") if i == 1: YieldsLabel.AddText("After Fit to Data") YieldsLabel.AddText("data yield: " + '%.1f' % dataYield) YieldsLabel.AddText("bkgd yield: " + '%.1f' % mcYield) YieldsLabel.AddText("data/bkgd: " + '%.2f' % (dataYield / mcYield)) if i == 1: for j in range(0, len(FittingLegendEntries)): if abs(ratios[j] - 1) < 0.001 and abs( errors[j] ) < 0.001: #then it probably was held fixed continue if arguments.showFittedYields: yield_ = ratios[j] * integrals[j] yielderror_ = errors[j] * yield_ text = FittingLegendEntries[ j] + " yield: " + '%.0f' % yield_ + ' #pm %.0f' % yielderror_ else: text = FittingLegendEntries[ j] + " ratio: " + '%.2f' % ratios[ j] + ' #pm %.2f' % errors[j] text = text + " (fit)" if arguments.parametricErrors: yield_ = ratios[j] * integrals[j] yieldParError_ = parErrors[j] * yield_ if arguments.showFittedYields: text += ' #pm %.2f' % yieldParError_ else: text += ' #pm %.2f' % parErrors[j] text = text + " (sys)" RatiosLabel.AddText(text) YieldsLabel.Draw() RatiosLabel.Draw() # drawing the ratio or difference plot if requested if (makeRatioPlots or makeDiffPlots): Canvas.cd(2) BgSum = Stack_list[i].GetStack().Last() if makeRatioPlots: if arguments.ratioRelErrMax: Comparison = ratioHistogram(Target, BgSum, arguments.ratioRelErrMax) else: Comparison = ratioHistogram(Target, BgSum) elif makeDiffPlots: Comparison = Target.Clone("diff") Comparison.Add(BgSum, -1) Comparison.SetTitle("") Comparison.GetYaxis().SetTitle("Data-Bkgd") Comparison.GetXaxis().SetTitle(xAxisLabel) Comparison.GetYaxis().CenterTitle() Comparison.GetYaxis().SetTitleSize(0.1) Comparison.GetYaxis().SetTitleOffset(0.5) Comparison.GetXaxis().SetTitleSize(0.15) Comparison.GetYaxis().SetLabelSize(0.1) Comparison.GetXaxis().SetLabelSize(0.15) if makeRatioPlots: RatioYRange = 1.15 if arguments.ratioYRange: RatioYRange = float(arguments.ratioYRange) Comparison.GetYaxis().SetRangeUser(-1 * RatioYRange, RatioYRange) elif makeDiffPlots: YMax = Comparison.GetMaximum() YMin = Comparison.GetMinimum() if YMax <= 0 and YMin <= 0: Comparison.GetYaxis().SetRangeUser(-1.2 * YMin, 0) elif YMax >= 0 and YMin >= 0: Comparison.GetYaxis().SetRangeUser(0, 1.2 * YMax) else: #axis crosses y=0 if abs(YMax) > abs(YMin): Comparison.GetYaxis().SetRangeUser( -1.2 * YMax, 1.2 * YMax) else: Comparison.GetYaxis().SetRangeUser( -1.2 * YMin, 1.2 * YMin) Comparison.GetYaxis().SetNdivisions(205) Comparison.Draw("E0") if i == 0: Canvas.Write(distribution['name'] + "_Before") if arguments.savePDFs: pathToDirString = plainTextString(pathToDir) Canvas.SaveAs(condor_dir + "/fitting_histogram_pdfs/" + pathToDirString + "/" + distribution['name'] + "_Before.pdf") if i == 1: Canvas.Write(distribution['name'] + "_After") if arguments.savePDFs: pathToDirString = plainTextString(pathToDir) Canvas.SaveAs(condor_dir + "/fitting_histogram_pdfs/" + pathToDirString + "/" + distribution['name'] + "_After.pdf")
from ROOT import TFile, TGraph, TCanvas, TLegend, TPad from array import array import sys # plot all on canvas graphs = {} print(len(sys.argv[1:])) for filename in sys.argv[1:]: f = TFile(filename + '.root', 'READ') f.GetList().ls() for i in range(0, f.GetNkeys()): g = f.Get('igprof_cumulative' + str(i)) graphs.setdefault(g.GetTitle(), [[], []])[0].append(g) graphs[g.GetTitle()][1].append(filename) for title, graphsList in graphs.items(): # check Y axis range minval = [] maxval = [] for g in graphsList[0]: minval.append(g.GetYaxis().GetXmin()) maxval.append(g.GetYaxis().GetXmax()) canv = TCanvas("igprof", "igprof runtime", 0, 0, 1600, 1000) canv.Divide(2, 1) pad = canv.cd(1) leg = TLegend(0.1, 0.1, 0.9, 0.9) for i, g in enumerate(graphsList[0]): g.SetMarkerColor(i + 1) g.SetLineColor(i + 1) g.SetMarkerStyle(20 + i)
Legend = TLegend(0.70, 0.65, 0.9, 0.9) Legend.SetBorderSize(0) Legend.SetFillColor(0) Legend.SetFillStyle(0) finalMax = 0 Histograms = [] for histogram in input_histograms: fileName = "condor/" + histogram['condor_dir'] + "/" + histogram[ 'dataset'] + ".root" if not os.path.exists(fileName): continue inputFile = TFile(fileName) if inputFile.IsZombie() or not inputFile.GetNkeys(): continue Numerator = inputFile.Get("OSUAnalysis/" + histogram['channel_numerator'] + "/" + histogram['name']).Clone() Denominator = inputFile.Get("OSUAnalysis/" + histogram['channel_denominator'] + "/" + histogram['name']).Clone() Histogram = ROOT.TGraphAsymmErrors(Numerator, Denominator) inputFile.Close() fullTitle = Histogram.GetTitle() splitTitle = fullTitle.split(":") Histogram.SetTitle(splitTitle[1].lstrip(" "))
[[], []])[0].append(float(parameter)) result[method_name][1].append(float(total.replace("'", ""))) #TODO: add total column as well? #print(result) paramname = "energy (GeV)" accuracy = 1.e-1 file = TFile(sys.argv[1] + '.root', 'RECREATE') i = 0 for name, x in result.items(): #iteritems(): if len(x[0]) != len(x[1]): print("Lengths of x0 and x1 differ! Terminating.") exit() # check if sth is const mean = sum(x[1]) / len(x[1]) if not (all(abs(item - mean) < accuracy for item in x[1])): x_arr = array('f', x[0]) y_arr = array('f', x[1]) graph = TGraph(len(x[0]), x_arr, y_arr) graph.SetName("igprof_cumulative" + str(i)) graph.SetTitle(name) i += 1 graph.GetXaxis().SetTitle(paramname) graph.GetYaxis().SetTitle("%") graph.SetMarkerColor(1) graph.SetMarkerStyle(20) graph.SetMarkerSize(1.5) graph.Write() print("Created " + str(file.GetNkeys()) + " graphs") file.Close()
def GetFittedQCDYieldAndError(pathToDir): fileName = condor_dir + "/" + data_dataset + ".root" if not os.path.exists(fileName): return 0 inputFile = TFile(fileName) if inputFile.IsZombie() or not inputFile.GetNkeys(): return 0 DataHistogram = inputFile.Get("OSUAnalysis/" + region_names['A'] + "CutFlow").Clone() DataHistogram.SetDirectory(0) nBins = DataHistogram.GetNbinsX() content = DataHistogram.GetBinContent(nBins) error = DataHistogram.GetBinError(nBins) relError = error / content if content > 0 else 0 print 'Data : ' + str(content) + ' +- ' + str(error) + ' ( +-' + str( relError * 100.0) + '% )' inputFile.Close() for sample in impurities: # loop over different samples that get subtracted from the total and save the cutflow table dataset_file = "%s/%s.root" % (condor_dir, sample) inputFile = TFile(dataset_file) HistogramObj = inputFile.Get(pathToDir + "/" + region_names['A'] + "CutFlow") if not HistogramObj: print "WARNING: Could not find histogram " + pathToDir + "CutFlow" + " in file " + dataset_file + ". Will skip it and continue." continue MCHistogram = HistogramObj.Clone() MCHistogram.SetDirectory(0) inputFile.Close() nBins = MCHistogram.GetNbinsX() content = MCHistogram.GetBinContent(nBins) error = MCHistogram.GetBinError(nBins) statRelError = error / content if content > 0 else 0 sysRelError = getSystematicError(sample) # add the stat. and sys. errors and apply them to the last bin of the cutflow table totalRelError = sqrt(statRelError * statRelError + sysRelError * sysRelError) MCHistogram.SetBinError(nBins, totalRelError * content) # subtract the MC histogram from the data DataHistogram.Add(MCHistogram, -1) print str(sample) + ' : ' + str(content) + ' +- ' + str( MCHistogram.GetBinError(nBins)) + ' ( +-' + str( totalRelError * 100.0) + '% )' nBins = DataHistogram.GetNbinsX() content = DataHistogram.GetBinContent(nBins) error = DataHistogram.GetBinError(nBins) yieldAndError = [] yieldAndError.append(content) yieldAndError.append(error) return yieldAndError