lgd.SetBorderSize(0) lgd.SetFillColor(0) # Z measured rawFF.SetBins(10, 0, 1.0) DrawFF(rawFF, 'hRaw_' + tag) lgd.AddEntry(rawFF, 'Raw') for nIter in range(4, 5): bayes.SetIterations(nIter) hist = DrawFF(bayes.Hreco(0), 'hBayes' + repr(nIter) + '_' + tag) for ix in range(1, hist.GetNbinsX()): hist.SetBinError(ix, rawFF.GetBinError(ix)) lgd.AddEntry(hist, 'Bayes (N=%d)' % nIter) lgd.Draw('same') padFF.Print(printFile, 'Title:' + tag) padFF.Write('c' + tag) detMix = mc.hResponseFF_Prompt.Clone('hDetMix') detMix.Add(mc.hResponseFF_Bdecay) detMix.SetTitle('Detector response matrix - z (Combined)') UnfoldFF(raw.hZPromptAfter, mc.hResponseFF_Prompt, 'Prompt') UnfoldFF(raw.hZBdecayAfter, mc.hResponseFF_Bdecay, 'Bdecay') UnfoldFF(raw.hZPromptAfter, detMix, 'PromptMix') UnfoldFF(raw.hZBdecayAfter, detMix, 'BdecayMix') padFF.Clear() PrintCover(padFF, printFile, '', isBack=True) out.Write() out.Close()
def main(argv=None): import sys import os from optparse import OptionParser from ROOT import TCanvas, TFile from src.dqmjson import dqm_get_json, dqm_getTFile, dqm_getTFile_Version if argv == None: argv = sys.argv[1:] parser = OptionParser() parser.add_option("-C", "--config", dest="config", default=[], action="append", help="configuration defining the plots to make") parser.add_option( "-o", "--output", dest="outPath", default=None, help="path to output plots. If it does not exsist it is created") parser.add_option( "-r", "--runs", dest="runs", default="all", help= "mask for the run (full boolean and math capabilities e.g. run > 10 and run *2 < -1)" ) parser.add_option( "-D", "--dataset", dest="dset", default="Jet", help= "mask for the primary dataset (default is Jet), e.g. Cosmics, MinimumBias" ) parser.add_option( "-E", "--epoch", dest="epoch", default="Run2012", help= "mask for the data-taking epoch (default is Run2012), e.g. Run2011B, Run2011A, etc." ) parser.add_option( "-R", "--reco", dest="reco", default="Prompt", help= "mask for the reconstruction type (default is Prompt), e.g. 08Nov2011, etc." ) parser.add_option( "-t", "--tag", dest="tag", default="v*", help="mask for the reco dataset tag (default is v*), e.g. v5") parser.add_option("-d", "--datatier", dest="datatier", default="DQMIO", help="mask for the datatier name (default is DQMIO)") parser.add_option( "-s", "--state", dest="state", default="ALL", help= "mask for strip state, options are ALL, PEAK, DECO, or MIXED -- only applicable if dataset is 'Cosmics'" ) parser.add_option("-L", "--list", dest="list", type="string", default=[], action="store") parser.add_option("-J", "--json", dest="json", type="string", default=[], action="store") (opts, args) = parser.parse_args(argv) if opts.config == []: opts.config = "trendPlots.ini" config = BetterConfigParser() config.read(opts.config) # if opts.json!=[]: # import simplejson as json # aaf = open("Cert_190456-191276_8TeV_PromptReco_Collisions12_JSON_MuonPhys.txt","r") # tempF=open("temp.txt") initStyle(config) dsetmask = ".*/" + opts.dset + "/" + opts.epoch + ".*" + opts.reco + "*.*" + opts.tag + "/" + opts.datatier print "dsetmask = ", dsetmask print "opts.state = ", opts.state print "opts.runs = ", opts.runs print "opts.list = ", opts.list print "opts.json = ", opts.json runs = getRunsFromDQM(config, dsetmask, opts.dset, opts.state, opts.datatier, opts.runs, opts.list, opts.json) if not runs: raise StandardError, "*** Number of runs matching run/mask/etc criteria is equal to zero!!!" print "runs= ", runs print "got %s run between %s and %s" % (len(runs), min( runs.keys()), max(runs.keys())) # getReferenceRun(config, runs) plots, cache = initPlots(config) runInCache = [] for itest in range(0, len(cache.keys())): runInCache.append(cache.keys()[itest][1]) for run in sorted(runs.keys()): if cache == None or runs[run][1] not in runInCache: print "............------------>>> RUN %s NOT IN CACHE" % ( runs[run][1]) rc = dqm_get_json(runs[run][0], runs[run][1], runs[run][2], "Info/ProvInfo") print "............------------>>> RunIsComplete flag: ", rc[ 'runIsComplete']['value'] isDone = int(rc['runIsComplete']['value']) if opts.datatier != "DQMIO": isDone = 1 else: isDone = 1 print "............------------>>> RUN %s IN CACHE" % ( runs[run][1]) if isDone == 1: if (runs[run][2] != 0): fopen = False tfile = None for plot in plots: cacheLocation = (runs[run][0], runs[run][1], runs[run][2], plot.getPath(), plot.getMetric()) if (cache == None and not fopen) or (cacheLocation not in cache and not fopen): version = dqm_getTFile_Version(runs[run][0], runs[run][1], runs[run][2], opts.datatier) tfile = dqm_getTFile(runs[run][0], runs[run][1], runs[run][2], version, opts.datatier) print "-----> Openning File Version ", version fopen = True plot.addRun(runs[run][0], runs[run][1], runs[run][2], tfile) if fopen: tfile.Close() else: print "Not File Version found" else: print "################### RUN %s NOT FULLY PROCESSED, SKIP #############" % ( runs[run][1]) cachePath = config.get("output", "cachePath") cacheFile = open(cachePath, "w") cacheFile.write(str(cache)) cacheFile.close() outPath = "fig/" + opts.reco + "/" + opts.epoch + "/" + opts.dset if 'Cosmics' in opts.dset: outPath = outPath + "/" + opts.state ##outPath = config.get("output","defautlOutputPath") if not opts.outPath == None: outPath = opts.outPath if not os.path.exists(outPath): os.makedirs(outPath) makeSummary = config.getboolean("output", "makeSummary") canvasSize = [ int(i) for i in config.get("styleDefaults", "canvasSize").split("x") ] canvas = TCanvas("trendplot", "trendplot", canvasSize[0], canvasSize[1]) canvas.Clear() canvas.SetBottomMargin(0.14) canvas.SetGridy() if makeSummary: canvas.Print(os.path.join(outPath, "trendPlots.ps[")) for plot in plots: #(graph, legend, refLabel) = plot.getGraph() try: plot.getGraph() except: print "Error producing plot:", plot.getName() print "Possible cause: no entries, or non-existing plot name" continue (graph, legend) = plot.getGraph() canvas.Clear() graph.Draw("AP") graph.GetYaxis().SetTitleOffset(1.6) plot.formatGraphAxis(graph) #refLabel.Draw() legend.Draw() canvas.SetLeftMargin(0.125) plot.drawAnnotation() canvas.Modified() canvas.Update() for formatExt in config.get("output", "formats").split(): if formatExt == 'root': (histotemp, legend) = plot.getHISTO() #(graphtemp, legend1) = plot.getGraphSimple() #plot.formatGraphAxis(graphtemp) F_out = TFile.Open( os.path.join(outPath, "%s.%s" % (plot.getName(), formatExt)), "RECREATE") F_out.cd() histotemp.Write() graph.Write() # graphtemp.SetDrawOption("AP") # graphtemp.Write("P") F_out.Close() else: canvas.Print( os.path.join(outPath, "%s.%s" % (plot.getName(), formatExt))) if makeSummary: canvas.Print(os.path.join(outPath, "trendPlots.ps")) if makeSummary: canvas.Print(os.path.join(outPath, "trendPlots.ps]"))
mA_binmax = mA_max_plot mA_binmin = 0 mA_nbin = 1000 mA_bins = np.linspace(mA_binmin, mA_binmax, mA_nbin) graph_contour = TH2F('mass_plane', ';M_{bb} [GeV];M_{llbb} [GeV]', mA_nbin, mA_binmin, mA_binmax, mH_nbin, mH_binmin, mH_binmax) k = 0 for mh in mH_bins: for ma in mA_bins: if k % 1000 == 0: sys.stdout.write('\r\tInterpolate TGraph2D : %0.f%%' % ((k / (mH_nbin * mA_nbin)) * 100)) sys.stdout.flush() out_graph = graph.Interpolate(ma, mh) graph_contour.Fill(ma, mh, out_graph) k += 1 print() graph_contour.SetLineWidth(4) graph_contour.SetContour(1) graph_contour.SetContourLevel(0, cut1) graph_contour.GetXaxis().SetLimits(0, mA_max_plot) graph_contour.GetYaxis().SetLimits(0, mH_max_plot) graph_contour.Draw('CONT2 same') Cut.Draw() gPad.Update() #input("Press key to end") c1.Print(path_plot + 'Graph_mH_' + mH + '_mA_' + mA + '.pdf') c1.Clear()
def main(): from sys import argv from ROOT import TCanvas, TPad, TH1F, TH2F, TH1I, THStack, TLegend, TF1, TGraphErrors, TTree from numpy import array import numpy as n from setTDRStyle import setTDRStyle region = "SignalNonRectCombinedConstrained" hCanvas = TCanvas("hCanvas", "Distribution", 800, 800) plotPad = ROOT.TPad("plotPad", "plotPad", 0, 0, 1, 1) style = setTDRStyle() plotPad.UseCurrentStyle() plotPad.Draw() plotPad.cd() #~ result=([name,0., parameters,parametersError,parametersMass,parametersChi2,parametersLogLH0,parametersLogLH1, project, region, title, name]) bckgOnlyPkl = loadToysForBackgroundOnly(region) bckgOnlyHist = TH1F("bckgOnlyHist", "bckgOnlyHist", 50, -1, 22) nTest = 0 nTestLarger = 0 nominatorHist = ROOT.TH1F("nominatorHist", "nominatorHist", 1, 0, 1) nominatorHistFull = ROOT.TH1F("nominatorHist", "nominatorHist", 1, 0, 1) denominatorHist = ROOT.TH1F("denominatorHist", "denominatorHist", 1, 0, 1) denominatorHistFull = ROOT.TH1F("denominatorHist", "denominatorHist", 1, 0, 1) for index, value in bckgOnlyPkl[2].iteritems(): if not index == "38ed30c9": if bckgOnlyPkl[4][index] > 0: denominatorHistFull.Fill(0.5) if -2 * (bckgOnlyPkl[7][index] - bckgOnlyPkl[6][index]) >= 2 * 4.3808: nominatorHistFull.Fill(0.5) if bckgOnlyPkl[4][index] < 90 and bckgOnlyPkl[4][index] > 0: bckgOnlyHist.Fill( -2 * (bckgOnlyPkl[7][index] - bckgOnlyPkl[6][index])) nTest = nTest + 1 denominatorHist.Fill(0.5) if -2 * (bckgOnlyPkl[7][index] - bckgOnlyPkl[6][index]) >= 2 * 4.3808: nTestLarger = nTestLarger + 1 nominatorHist.Fill(0.5) print nTest, nTestLarger hCanvas.Clear() plotPad = ROOT.TPad("plotPad", "plotPad", 0, 0, 1, 1) plotPad.UseCurrentStyle() plotPad.Draw() plotPad.cd() hCanvas.DrawFrame( -1, 0, 18, bckgOnlyHist.GetBinContent(bckgOnlyHist.GetMaximumBin()) + 20, "; %s ; %s" % ("-2*(log(L_{1})-log(L_{0}))", "N_{Results}")) chi2Shape = ROOT.TF1("chi2Shape", "[2]*TMath::GammaDist(x,[0],[1],2)", 0, 18) chi2Shape.SetParameters(1.5, 0, 200) #~ chi2Shape.Draw("same") #~ bckgOnlyHist.Fit("chi2Shape") hCanvas.DrawFrame( -1, 0, 22, bckgOnlyHist.GetBinContent(bckgOnlyHist.GetMaximumBin()) + 20, "; %s ; %s" % ("-2*(log(L_{1})-log(L_{0}))", "N_{Results}")) latex = ROOT.TLatex() latex.SetTextSize(0.04) latex.SetNDC(True) latex.DrawLatex( 0.15, 0.96, "CMS Simulation #sqrt{s} = 8 TeV, #scale[0.6]{#int}Ldt = %s fb^{-1}" % 19.4) latex = ROOT.TLatex() latex.SetTextSize(0.04) latex.SetNDC(True) #~ latex.DrawLatex(0.6, 0.6, "#splitline{fit of #chi^{2} dist., NDF = %.2f}{#chi^{2}/N_{dof} %.2f}"%(chi2Shape.GetParameter(0)*2,chi2Shape.GetChisquare()/chi2Shape.GetNDF())) observedLine = ROOT.TLine( 2 * 4.3808, 0, 2 * 4.3808, bckgOnlyHist.GetBinContent(bckgOnlyHist.GetMaximumBin()) + 20) observedLine.SetLineColor(ROOT.kRed) observedLine.SetLineStyle(2) observedLine.Draw("same") bckgOnlyHist.Draw("samepe") #~ chi2Shape.Draw("same") ROOT.gStyle.SetOptStat(0) ROOT.gStyle.SetOptFit(0) legend = ROOT.TLegend(0.5, 0.75, 0.95, 0.95) legend.SetFillStyle(0) legend.SetBorderSize(0) legend.SetTextFont(42) legend.AddEntry(observedLine, "observed", "l") hist = ROOT.TH1F() hist.SetLineColor(ROOT.kWhite) #~ efficiencyObject = ROOT.TEfficiency(nominatorHist,denominatorHist) #~ uncertaintyUp = efficiencyObject.Wilson(nTestLarger,nTest,0.69,ROOT.kTRUE) #~ uncertaintyDown = efficiencyObject.Wilson(nTestLarger,nTest,0.69,ROOT.kFALSE) #~ print uncertaintyUp pValue = getEffciency(nominatorHist, denominatorHist) pValueFull = getEffciency(nominatorHistFull, denominatorHistFull) #~ legend.AddEntry(hist,"p-Value low mass: %.3f + %.3f - %.3f (%.2f #sigma)"%(pValue[0],pValue[1],pValue[2], ROOT.TMath.NormQuantile(1.0-pValue[0]/2))) legend.AddEntry( hist, "p-Value: %.3f + %.3f - %.3f (%.2f #sigma)" % (pValueFull[0], pValueFull[1], pValueFull[2], ROOT.TMath.NormQuantile(1.0 - pValue[0] / 2))) legend.Draw("same") hCanvas.Print("significanceStudy_BackgroundOnly.pdf") hCanvas.Print("significanceStudy_BackgroundOnly.root") print nTest, nTestLarger
def plotBlockComparison(treeBlockA,treeBlockB,variable,additionalCut,nBins,firstBin,lastBin,labelX,labelY,suffix,log=False,signal=False): hCanvas = TCanvas("hCanvas", "Distribution", 800,800) plotPad = ROOT.TPad("plotPad","plotPad",0,0.3,1,1) ratioPad = ROOT.TPad("ratioPad","ratioPad",0,0.,1,0.3) setTDRStyle() plotPad.UseCurrentStyle() ratioPad.UseCurrentStyle() plotPad.Draw() ratioPad.Draw() plotPad.cd() legend = TLegend(0.5, 0.55, 0.8, 0.95) legend.SetFillStyle(0) legend.SetBorderSize(1) minMll = 20 ROOT.gStyle.SetOptStat(0) Cutlabel = ROOT.TLatex() Cutlabel.SetTextAlign(12) Cutlabel.SetTextSize(0.03) Labelin = ROOT.TLatex() Labelin.SetTextAlign(12) Labelin.SetTextSize(0.07) Labelin.SetTextColor(ROOT.kRed+2) Labelout = ROOT.TLatex() Labelout.SetTextAlign(12) Labelout.SetTextSize(0.07) Labelout.SetTextColor(ROOT.kBlack) EMuhistBlockA = createHistoFromTree(treeBlockA, variable, additionalCut, nBins, firstBin, lastBin, -1) EMuhistBlockB = createHistoFromTree(treeBlockB, variable, additionalCut, nBins, firstBin, lastBin, -1) EMuhistBlockB.Scale(9.2/10.4) print EMuhistBlockA.Integral() print EMuhistBlockB.Integral() EMuhistBlockA.SetMarkerStyle(21) EMuhistBlockB.SetMarkerStyle(22) EMuhistBlockA.SetMarkerColor(ROOT.kGreen+3) EMuhistBlockB.SetMarkerColor(ROOT.kBlack) EMuhistBlockA.SetLineColor(ROOT.kGreen+3) EMuhistBlockB.SetLineColor(ROOT.kBlack) if log: yMin=0.1 yMax = max(EMuhistBlockA.GetBinContent(EMuhistBlockA.GetMaximumBin()),EMuhistBlockB.GetBinContent(EMuhistBlockB.GetMaximumBin()))*10 plotPad.SetLogy() else: yMin=0 yMax = max(EMuhistBlockA.GetBinContent(EMuhistBlockA.GetMaximumBin()),EMuhistBlockB.GetBinContent(EMuhistBlockB.GetMaximumBin()))*1.5 hCanvas.DrawFrame(firstBin,yMin,lastBin,yMax,"; %s ; %s" %(labelX,labelY)) EMuhistBlockA.Draw("samep") EMuhistBlockB.Draw("samep") legend.AddEntry(EMuhistBlockA,"First 9.2 fb^{-1}","p") legend.AddEntry(EMuhistBlockB,"Second 10.4 fb^{-1} scaled","p") #~ latex = ROOT.TLatex() latex.SetTextSize(0.043) latex.SetTextFont(42) latex.SetNDC(True) latex.DrawLatex(0.13, 0.95, "CMS Preliminary, #sqrt{s} = 8 TeV, #scale[0.6]{#int}Ldt = 9.2-10.4 fb^{-1}") #~ legend.Draw("same") ratioPad.cd() ratioGraphs = ratios.RatioGraph(EMuhistBlockA,EMuhistBlockB, firstBin, lastBin,title="Bl. A / Bl. B",yMin=0.0,yMax=2,ndivisions=10,color=ROOT.kGreen+3,adaptiveBinning=0.25) ratioGraphs.draw(ROOT.gPad,True,False,True,chi2Pos=0.8) if signal: name = "OFUnblinding_SignalRegion_%s_%s.pdf" else: name = "OFUnblinding_Inclusive_%s_%s.pdf" if variable == "p4.M()": hCanvas.Print(name%(suffix,"Mll")) else: hCanvas.Print(name%(suffix,variable)) hCanvas.Clear()
def MCSPlot(self, pname): #print self.fname f = TFile(self.fname) self.RMS = {} self.RMSErr = {} self.Chi2 = {} self.RMSsysdiff = {} self.RMSsyserr = {} # create a plot for each histvarname for histvar in self.histvarnames: self.RMS[histvar] = {} self.RMSErr[histvar] = {} self.RMSsysdiff[histvar] = {} self.RMSsyserr[histvar] = {} self.Chi2[histvar] = {} names = [histvar + '_' + x for x in self.histstatenames] print names hists = [f.Get(histvar + '_' + x) for x in self.histstatenames] print hists[0] hists[0].SetTitle("") # hists[3].Scale(norm) # self.formatHist(hist[0], 0) resplots = [x.Clone() for x in hists] resplots[0].SetTitle('') resplots[0].GetYaxis().SetTitle("Normalized Residuals") # if histvar == 'thetaScatt': leg = TLegend(0.55, 0.73, 0.89, 0.92) leg.SetLineColor(10) # else: # leg = TLegend(0.35,0.2,0.65,0.5) for i in range(len(self.histstatedesc)): hists[i].Sumw2() if histvar == 'theta2Scatt': hists[i].Rebin(8) resplots[i].Rebin(8) elif histvar == 'thetaScatt': hists[i].Rebin(1) resplots[i].Rebin(1) else: hists[i].Rebin(1) resplots[i].Rebin(1) self.formatHists(hists[i], i) self.formatHists(resplots[i], i) self.addToRMS(i, hists[i], hists[0], resplots[i], histvar) if histvar == 'theta2Scatt': hists[i].GetYaxis().SetTitle('Probability per ' + str( round(1000 * 1000 * hists[i].GetXaxis().GetBinWidth(4), 2)) + ' mrad^{2}') else: hists[i].GetYaxis().SetTitle('Probability per ' + str( round(1000 * hists[i].GetXaxis().GetBinWidth(4), 2)) + ' mrad') leg.AddEntry(hists[i], self.histstatedesc[i], self.histopts[i]) #print hists[0] self.calculateChi2(i, hists[i], hists[0], resplots[i], histvar, pname) c = TCanvas(self.fname[:-5] + '_' + histvar + '_c1') if self.desc[0] == 'XePion': t1 = TText(0.18, 0.885, "MICE ISIS cycle 2015/03") t2 = TText(0.18, 0.85, "Xe, " + self.desc[1][2:5] + ", MAUS v3.1.2") else: t1 = TText(0.18, 0.885, "MICE ISIS cycle 2015/04") t2 = TText(0.18, 0.85, "LiH, " + self.desc[1][2:5] + ", MAUS v3.1.2") t1.SetNDC(1) t1.SetTextSize(0.04) t1.SetTextFont(42) t2.SetNDC(1) t2.SetTextSize(0.03) t2.SetTextFont(42) hists[0].GetYaxis().SetRangeUser(4e-4, 2.0) hists[0].SetTitle(";" + hists[0].GetXaxis().GetTitle() + " (radians);" + hists[0].GetYaxis().GetTitle()) hists[0].Draw('ep') c.SetBottomMargin(0.15) c.SetTopMargin(0.075) for h in hists[1:len(self.histstatedesc)]: h.Draw('epsame') leg.SetTextSize(0.04) leg.Draw('same') t1.Draw() t2.Draw() c.SetLogy() c.SaveAs(pname + '_' + self.fname[:-5] + '_' + histvar + '_sys.eps') c.SaveAs(pname + '_' + self.fname[:-5] + '_' + histvar + '_sys.root') c.SaveAs(pname + '_' + self.fname[:-5] + '_' + histvar + '_sys_pq.jpg') c.Clear() c.SetLogy(0) resplots[0].GetYaxis().SetRangeUser(-2, 2) resplots[0].SetTitle(";" + resplots[0].GetXaxis().GetTitle() + " (radians);" + resplots[0].GetYaxis().GetTitle()) leg.SetX1NDC(0.5) leg.SetX2NDC(0.89) leg.SetY1NDC(0.2) leg.SetY2NDC(0.4) resplots[0].Draw("p") for r in resplots: r.Draw('psame') leg.SetTextSize(0.04) leg.Draw('same') t1.Draw() t2.Draw() # pblock.Draw() c.SaveAs(pname + '_' + self.fname[:-5] + '_' + histvar + '_sys_res_T.eps') c.SaveAs(pname + '_' + self.fname[:-5] + '_' + histvar + '_sys_res_pq.jpg') momhist = f.Get("cor_mom") #mom = [momhist.GetMean() + 19.468, momhist.GetMeanError()] #if self.fname.find("LiHMuon_03172") >= 0: # mom = [momhist.GetMean()*1.107 + 1.05, momhist.GetMeanError()] #elif self.fname.find("LiHMuon_03200") >= 0: # mom = [momhist.GetMean()*1.104 + 1.139, momhist.GetMeanError()] #elif self.fname.find("LiHMuon_03240") >= 0: # mom = [momhist.GetMean()*1.17 - 9.41, momhist.GetMeanError()] if self.fname.find("LiHMuon_03172") >= 0: mom = [momhist.GetMean(), momhist.GetMeanError()] elif self.fname.find("LiHMuon_03200") >= 0: mom = [momhist.GetMean(), momhist.GetMeanError()] elif self.fname.find("LiHMuon_03240") >= 0: mom = [momhist.GetMean(), momhist.GetMeanError()] rms = [momhist.GetRMS(), momhist.GetRMSError()] summary = [] syssummary = [] def sigfig(x): if math.fabs(x) > 1e-5: return int(math.ceil(math.fabs(math.log(math.fabs(x), 10)))) else: return 1 # syssummary.append("p (MeV/c) & "+self.histvarnames[0]+"&"+self.histvarnames[1]+"&"+self.histvarnames[3]+"\\\\") if pname != "Truth": for sys in self.sysFiles: # if sys[3] == 'Material': stindx = 1 # else: stindx = 0 # print sys[3], self.histstatenames[stindx] difference0 = self.RMSsysdiff[self.histvarnames[0]][ self.histstatenames[stindx]][sys[3]] difference1 = self.RMSsysdiff[self.histvarnames[1]][ self.histstatenames[stindx]][sys[3]] difference3 = self.RMSsysdiff[self.histvarnames[3]][ self.histstatenames[stindx]][sys[3]] syserr0 = self.RMSsyserr[self.histvarnames[0]][ self.histstatenames[stindx]][sys[3]] syserr1 = self.RMSsyserr[self.histvarnames[1]][ self.histstatenames[stindx]][sys[3]] syserr3 = self.RMSsyserr[self.histvarnames[3]][ self.histstatenames[stindx]][sys[3]] relerr0 = syserr0 / self.RMS[self.histvarnames[0]][ self.histstatenames[0]] relerr1 = syserr1 / self.RMS[self.histvarnames[1]][ self.histstatenames[0]] relerr3 = syserr3 / self.RMS[self.histvarnames[3]][ self.histstatenames[0]] syssummary.append(str(round(mom[0],sigfig(mom[1])))+"$\pm$"+str(round(mom[1],sigfig(mom[1])))+\ " & "+str(round(difference0,sigfig(difference0)))+\ " & "+str(round(syserr0,sigfig(syserr0)))+\ " & "+str(round(relerr0,sigfig(relerr0)))+\ " & "+str(round(difference1,sigfig(difference1)))+\ " & "+str(round(syserr1,sigfig(syserr1)))+\ " & "+str(round(relerr1,sigfig(relerr1)))+\ " & "+str(round(difference3,sigfig(difference3)))+\ " & "+str(round(syserr3,sigfig(syserr3)))+\ " & "+str(round(relerr3,sigfig(relerr3)))+"\\\\") # print syssummary[-1] syssummary.append(str(round(mom[0],2))+"$\pm$"+str(round(mom[1],2))+\ " & "+ str(round(rms[0],2))+"$\pm$"+str(round(rms[1],2))+\ " & "+str(round(self.RMSsysdiff[self.histvarnames[0]][self.histstatenames[0]]['Sum'],2))+\ " & "+str(round(self.RMSsyserr[self.histvarnames[0]][self.histstatenames[0]]['Sum'],2))+\ " & "+str(round(self.RMSsyserr[self.histvarnames[0]][self.histstatenames[0]]['Sum']/self.RMS[self.histvarnames[0]][self.histstatenames[0]],2))+\ " & "+str(round(self.RMSsysdiff[self.histvarnames[1]][self.histstatenames[0]]['Sum'],2))+\ " & "+str(round(self.RMSsyserr[self.histvarnames[1]][self.histstatenames[0]]['Sum'],2))+\ " & "+str(round(self.RMSsyserr[self.histvarnames[1]][self.histstatenames[0]]['Sum']/self.RMS[self.histvarnames[1]][self.histstatenames[0]],2))+\ " & "+str(round(self.RMSsysdiff[self.histvarnames[3]][self.histstatenames[0]]['Sum'],2))+ " & "+str(round(self.RMSsyserr[self.histvarnames[3]][self.histstatenames[0]]['Sum'],2))+ " & "+str(round(self.RMSsyserr[self.histvarnames[3]][self.histstatenames[0]]['Sum']/self.RMS[self.histvarnames[2]][self.histstatenames[0]],2))+"\\\\") # summary.append("p (MeV/c) & &"+str(self.histstatenames[0])+" & "+str(self.histstatenames[1])+" & $\chi^{2}$/ndf & "+ # +str(self.histstatenames[2])+" & $\chi^{2}$/ndf \\\\") # print mom, self.RMS, self.RMSErr, self.Chi2 for histvar in self.histvarnames: summary.append(str(round(mom[0],2))+"$\pm$"+str(round(mom[1],2))+\ "& $\ "+histvar+"$ & "+str(round(self.RMS[histvar][self.histstatenames[0]],2))+ \ "$\pm$"+str(round(self.RMSErr[histvar][self.histstatenames[0]],2))+ \ "$\pm$"+str(round(self.RMSsyserr[histvar][self.histstatenames[0]]["Sum"],2))+ \ " & "+str(round(self.RMS[histvar][self.histstatenames[1]],2))+ \ "$\pm$"+str(round(self.RMSErr[histvar][self.histstatenames[1]],2))+ \ # "$\pm$"+str(round(self.RMSsyserr[histvar][self.histstatenames[1]]["Sum"],2))+\ " & "+str(round(self.Chi2[histvar][self.histstatenames[1]][0],1))+ \ " / "+ str(self.Chi2[histvar][self.histstatenames[1]][1])+ \ " & "+str(round(self.RMS[histvar][self.histstatenames[2]],2))+ \ "$\pm$"+str(round(self.RMSErr[histvar][self.histstatenames[2]],2))+ \ #"$\pm$"+str(round(self.RMSsyserr[histvar][self.histstatenames[2]]["Sum"],2))+\ " & "+str(round(self.Chi2[histvar][self.histstatenames[2]][0],1))+ \ " / "+ str(self.Chi2[histvar][self.histstatenames[2]][1]) +"\\\\") # print summary[-1] f.Close() return [summary, syssummary]
rootDir = TDirectory() overDir = TDirectory() rootDir = inFile.FindObjectAny(cscRootName) overDir = rootDir.FindObjectAny(overviewName) hist = TH1D() #Page 1 hist = overDir.Get("h_csc_calib_numSignificant") hist.Draw() cnv.Print(outFileName + "(", outFileType) #Page 2 cnv.Clear() cnv.Divide(2, 2) i = 1 cnv.cd(i) hist = overDir.Get("h_csc_calib_pedCompareOverview") hist.Draw() i += 1 cnv.cd(i) hist = overDir.Get("h_csc_calib_noiseCompareOverview") hist.Draw() i += 1 cnv.cd(i) hist = overDir.Get("h_csc_calib_pedChi2Overview")
def plotAndFit(self): # Plot self.setstyle() # Get D0 histogram out of file 'loadFile' h1 = self.loadFile.Get('hist') h1.UseCurrentStyle() # Plot D0 mass distribution before fit c1 = TCanvas('c1', 'Canvas1', 0, 0, 700, 500) c1.Clear() h1.Draw() c1.Update() c1.SaveAs('D0.pdf') # Fit h1 with Breit-Wigner distribution breitW = TF1('breitW', BreitWig, 1.7, 2.0, 5) breitW.SetNpx(1000) fkt = TF1("fkt", "gaus", 1.8, 1.95) # The value given in breitW.SetParameter(1, 1.8648) is the # PDG value of the D0 mass breitW.SetParameter(0, 0.05) # Width breitW.SetParameter(1, 1.8648) # Mean Value breitW.SetParameter(2, 0.1) # Norm breitW.SetParameter(3, 1.75) # Bkg parameter 1 breitW.SetParameter(4, 0.0) # Bkg parameter 2 gStyle.SetStatH(0.4) # Choose fit options: Print fit probability, # chisquare/number of degress of freedom, # errors, name/value of parameters gStyle.SetOptFit(1111) h1.UseCurrentStyle() # Start fit with breitW h1.Fit("breitW") self.breitWFit = breitW.Clone() #h1.Fit("fkt") #mean = fkt.GetParameter(1) #error = fkt.GetParError(1) #fkt.SetLineColor(3) #fkt.Draw("SAME") breitW.Draw("same") c1.Update() c1.SaveAs("Fit_gaus.pdf") print "width=", breitW.GetParameter(0), " mean=", breitW.GetParameter( 1) #print "sigma=",fkt.GetParameter(2)," gauss_mean=",mean x_min = 1.8 x_max = 1.95 # def sigToBkgRatio(self, x_min, x_max): ges = self.breitWFit.Integral(x_min, x_max) bkg_fkt_1 = TF1("bkg_fkt_1", bkg1, 1.7, 2.0, 5) bkg_fkt_2 = TF1("bkg_fkt_2", bkg2, 1.7, 2.0, 5) bkg_fkt_3 = TF1("bkg_fkt_3", bkg3, 1.7, 2.0, 5) print par bkg_1 = bkg_fkt_1.Integral(x_min, x_max) print bkg_1 bkg_2 = bkg_fkt_2.Integral(x_min, x_max) print bkg_2 bkg_3 = bkg_fkt_3.Integral(x_min, x_max) print bkg_3 sbr1 = (ges - bkg_1) / bkg_1 sbr2 = (ges - bkg_2) / bkg_2 sbr3 = (ges - bkg_3) / bkg_3 print "signal-to-background ratio between ", x_min, " and ", x_max, " for linear background:\n (S/B)_lin= ", sbr1, "\n\n\n" print "signal-to-background ratio between ", x_min, " and ", x_max, " for exponential background:\n (S/B)_exp= ", sbr2, "\n\n\n" print "signal-to-background ratio between ", x_min, " and ", x_max, " for quadratic background:\n (S/B)_quad= ", sbr2, "\n\n\n"
def getUnscaledPDFs(ma=0, makePlots=False): """ Generate a set of TH1D's to be turned into RooDataHist objects. Be careful they have the same axis limits and binning as the RooDataSet. Takes axion mass (in keV) as a parameter. """ from ROOT import TFile, TH1D, gROOT # output files rOut = "%s/data/specPDFs.root" % dsi.latSWDir tf = TFile(rOut,"RECREATE") td = gROOT.CurrentDirectory() # print("Generating unscaled PDFs, eLo %.1f eHi %.1f epb %.2f: %s" % (eLo, eHi, epb, rOut)) # === 1. axion flux # axion flux scale. # NOTE: to do the fit and set a new limit, we set g_ae=1. # To plot an expected flux, we would use a real value. # Redondo's note: I calculated the flux using gae = 0.511*10^-10 # for other values of gae use: FLUX = Table*[gae/(0.511*10^-10)]^2 gae = 1 gRat = (gae / 5.11e-11) redondoScale = 1e19 * gRat**2 # convert table to [flux / (keV cm^2 d)] axData = [] with open("%s/data/redondoFlux.txt" % dsi.latSWDir) as f1: # 23577 entries lines = f1.readlines()[11:] for line in lines: data = line.split() axData.append([float(data[0]),float(data[1])]) axData = np.array(axData) def sig_ae(E,m): """ E, m are in units of keV. must multiply result by sig_pe """ beta = (1 - m**2./E**2.)**(1./2) return (1 - (1./3.)*beta**(2./3.)) * (3. * E**2.) / (16. * np.pi * (1./137.) * 511.**2. * beta) # === 2. ge photoelectric xs phoData = [] with open("%s/data/ge76peXS.txt" % dsi.latSWDir) as f2: # 2499 entries, 0.01 kev intervals lines = f2.readlines() for line in lines: data = line.split() phoData.append([float(data[0]),float(data[1])]) phoData = np.array(phoData) # === 3. tritium tritData = [] with open("%s/data/TritiumSpectrum.txt" % dsi.latSWDir) as f3: # 20000 entries lines = f3.readlines()[1:] for line in lines: data = line.split() conv = float(data[2]) # raw spectrum convolved w/ ge cross section if conv < 0: conv = 0. tritData.append([float(data[1]),conv]) tritData = np.array(tritData) # NOTE: check sandbox/th1.py for examples of manually filling TH1D's and verifying wl.GetHisto and wl.npTH1D. # ROOT output h1 = TH1D("h1","photoelectric",nBP,pLo,pHi) # [cm^2 / kg] h2 = TH1D("h2","axioelectric",nBP,pLo,pHi) # [cm^2 / kg] h3 = TH1D("h3","axion flux, gae=1",nBP,pLo,pHi) # [cts / (keV cm^2 d)] h4 = TH1D("h4","convolved flux",nBP,pLo,pHi) # [cts / (keV d kg)] h5 = TH1D("h5","tritium",nBP,pLo,pHi) # [cts] (normalized to 1) # manually fill ROOT histos (don't normalize yet) for iB in range(nBP+1): ctr = (iB + 0.5)*ppb + pLo bLo, bHi = ctr - ppb/2, ctr + ppb/2 with warnings.catch_warnings(): warnings.simplefilter("ignore",category=RuntimeWarning) # if ma>0, we ignore entries with E <= m. # photoelectric x-section [cm^2 / kg] idx = np.where((phoData[:,0] >= bLo) & (phoData[:,0] < bHi)) pho = np.mean(phoData[idx][:,1]) * 1000 if np.isnan(pho) or len(phoData[idx][:,1]) == 0: pho = 0. if phoData[idx][:,1].any() <= ma: pho = 0. h1.SetBinContent(iB+1,pho) # axioelectric x-section [cm^2 / kg] if ctr > ma: axio = pho * sig_ae(ctr, ma) else: axio=0. h2.SetBinContent(iB+1,axio) # axion flux [flux / (cm^2 d keV)] idx = np.where((axData[:,0] >= bLo) & (axData[:,0] < bHi)) flux = np.mean(axData[idx][:,1]) * redondoScale if np.isnan(flux): flux = 0. h3.SetBinContent(iB+1, flux) # YES, adding 1 here. keeps the 6.6 keV line in the proper place for all binnings. # it must have to do w/ the way i'm reading in the data from the text files ... # axion flux PDF [flux / (keV d kg)] axConv = axio * flux h4.SetBinContent(iB+1, axConv) # tritium idx = np.where((tritData[:,0] >= bLo) & (tritData[:,0] <= bHi)) trit = np.mean(tritData[idx][:,1]) if np.isnan(trit): trit = 0. h5.SetBinContent(iB+1, trit) # Pb210 (from separate file) tf2 = TFile("%s/data/Pb210PDFs.root" % dsi.latSWDir) h6 = tf2.Get("hPb210TDL") # with TDL h7 = tf2.Get("hPb210") # without TDL h6.SetName("h6") h7.SetName("h7") if makePlots: # === 1. verify the numpy histogram and ROOT histogram give the same output. OK x, h210, xpb = wl.npTH1D(h7) iE = np.where((x > 45) & (x < 48)) plt.plot(x[iE], h210[iE], ls='steps', lw=3, c='b') plt.xlabel("Energy (keV)", ha='right', x=1) plt.tight_layout() plt.savefig("%s/plots/sf-pk210.pdf" % dsi.latSWDir) from ROOT import TCanvas c = TCanvas() h7.GetXaxis().SetTitle("Energy (keV)") h7.GetXaxis().SetRangeUser(45, 48) h7.Draw('hist') c.Print('%s/plots/sf-pb210th1d.pdf' % dsi.latSWDir) # === 2. print ROOT histos to match w/ numpy histos c.Clear(); h1.Draw("hist"); c.Print("%s/plots/root-sigGe.pdf" % dsi.latSWDir) c.Clear(); h2.Draw("hist"); c.Print("%s/plots/root-sigAe.pdf" % dsi.latSWDir) c.Clear(); h3.Draw("hist"); c.Print("%s/plots/root-axFlux.pdf" % dsi.latSWDir) c.Clear(); h4.Draw("hist"); c.Print("%s/plots/root-axPDF.pdf" % dsi.latSWDir) c.Clear(); h5.Draw("hist"); c.Print("%s/plots/root-trit.pdf" % dsi.latSWDir) c.Clear(); h6.Draw("hist"); c.Print("%s/plots/root-pb210TDL.pdf" % dsi.latSWDir) c.Clear(); h7.Draw("hist"); c.Print("%s/plots/root-pb210.pdf" % dsi.latSWDir) gROOT.cd(td.GetPath()) h1.Write() h2.Write() h3.Write() h4.Write() h5.Write() h6.Write() h7.Write() tf.Close()
def plot_multiple_components(work_dir, root_file_path, plot_config, info_file_path, info_file_index=0): import os import pandas as pd # - Plotting style myStyle = TStyle("myStyle", "My own Root Style") myStyle.SetTitleSize(0.3) myStyle.SetTitleX(0.3) myStyle.SetTitleXOffset(1.5) myStyle.SetTitleYOffset(1.5) myStyle.SetTitleSize(0.04, "xy") myStyle.SetOptStat(0) myStyle.SetCanvasColor(0) myStyle.SetFrameBorderMode(0) gROOT.SetStyle("myStyle") line_width = 2 text_size = 0.025 df = pd.read_table(info_file_path, delim_whitespace=True) print("plot_config['components']", plot_config['components']) nComponents = len(plot_config['components']) th = nested_dict(3, dict) tf = TFile(root_file_path) print(plot_config) for level, level_opts in plot_config['levels'].iteritems(): for type, type_opts in plot_config['types'].iteritems(): for component, comp_opts in plot_config['components'].iteritems(): print("xsec_{}".format(comp_opts['name'])) xsec = df.loc[info_file_index, "xsec_{}".format( comp_opts['name'])] * comp_opts['K_factor'] * 1000 th[component][type][level] = tf.Get( plot_config['histo_name_template_format'].format( comp_opts['name'], type, level)) print('histo name: {}'.format( plot_config['histo_name_template_format'].format( comp_opts['name'], type, level))) print('level: {}'.format(level)) w = get_weight_to_xsec( xsec, comp_opts['nEvents'] ) # xsec * 1000 to convert from pb to fb th[component][type][level].Scale(w, 'width') th[component][type][level].SetLineColor(comp_opts['color']) th[component][type][level].SetMarkerColor(comp_opts['color']) th[component][type][level].SetLineWidth( comp_opts['line_width']) th[component][type][level].SetLineStyle( comp_opts['line_style']) print('\nw[key]:', w) # - Plotting c = TCanvas('canvas', 'canvas', 800, 600) th[plot_config['first_plot']][type][level].GetXaxis().SetTitle( type_opts['xlabel']) th[plot_config['first_plot']][type][level].GetYaxis().SetTitle( type_opts['ylabel']) # int = {} # int['gg'] = th['gg'][type][level].Integral(bin1, bin2, 'width' ) # int['qq'] = th['qq'][type][level].Integral(bin1, bin2, 'width' ) # # print("integral1: {:.2f}".format(int['gg']) ) # print("integral2: {:.2f}".format(int['qq']) ) #th[ plot_config['first_plot'] ][type][level].GetYaxis().SetRangeUser(-1.5,8) if 'ranges' in plot_config: th[plot_config['first_plot']][type][level].GetYaxis( ).SetRangeUser(plot_config['ranges']['y_min'], plot_config['ranges']['y_max']) #th['all'].GetXaxis().SetRangeUser(200,1000) #th['all'].GetYaxis().SetRangeUser(0.0006,20) #h_all.GetYaxis().SetRangeUser(200,1000) gPad.SetLeftMargin(0.14) gPad.SetBottomMargin(0.13) gPad.SetRightMargin(0.03) gPad.SetTopMargin(0.03) label_posx, label_posy = plot_config['label'][ 'pos_x'], plot_config['label']['pos_y'] label_int_posx, label_int_posy = plot_config['label_int'][ 'pos_x'], plot_config['label_int']['pos_y'] text_vshift = plot_config['label']['shift_y'] labels = [] labels_int = [] labels_param = [] labels.append( TLatex(plot_config['label']['pos_x'], plot_config['label']['pos_y'], plot_config['label']['text'])) labels[0].SetNDC(True) labels[0].SetTextSize(plot_config['label']['text_size']) for i, (comp, comp_opts) in enumerate( plot_config['components'].iteritems(), 1): xsec = df.loc[info_file_index, "xsec_{}".format( comp_opts['name'])] * comp_opts['K_factor'] * 1000 text = "#sigma_{{tot}}({}) = {:.2f} fb".format( comp_opts['label'], xsec) labels.append( TLatex(label_posx, label_posy - i * text_vshift, text)) labels[i].SetNDC(True) labels[i].SetTextSize(plot_config['label']['text_size']) labels_int.append( TLatex(plot_config['label_int']['pos_x'], plot_config['label_int']['pos_y'], level_opts['name'])) labels_int[0].SetNDC(True) labels_int[0].SetTextSize(plot_config['label_int']['text_size']) for i, (comp, comp_opts) in enumerate( plot_config['components'].iteritems(), 1): bin1 = th[comp_opts['name']][type][level].FindBin(000.0) bin2 = th[comp_opts['name']][type][level].FindBin(1500.0) xsec = th[comp_opts['name']][type][level].Integral( bin1, bin2, 'width') text = "#sigma_{{int}}({}) = {:.2f} fb".format( comp_opts['label'], xsec) labels_int.append( TLatex(label_int_posx, label_int_posy - i * text_vshift, text)) labels_int[i].SetNDC(True) labels_int[i].SetTextSize( plot_config['label_int']['text_size']) # for i, (row, row_opts) in enumerate(plot_config['label_param']['rows'].iteritems(), 0): # text = row_opts['text'].format( df.loc[info_file_index, row_opts['input'][0]], # df.loc[info_file_index, row_opts['input'][1]]) # labels_param.append( TLatex(plot_config['label_param']['pos_x'], plot_config['label_param']['pos_y']-i*plot_config['label_param']['shift_y'], text ) ) # labels_param[i].SetNDC( True ) # labels_param[i].SetTextSize( plot_config['label_param']['text_size'] ) # text2 = "#sigma_{{tot}}(gg #rightarrow Zh)={:.2f} fb".format( xsec['gg'] ) # text2 = "#sigma_{{tot}}(gg #rightarrow Zh)={:.2f} fb".format( xsec['gg'] ) # text3 = "#sigma_{{tot}}(qq #rightarrow Zh)={:.2f} fb".format( xsec['qq'] ) # text4 = "#sigma_{{integral}}(gg #rightarrow Zh)={:.2f} fb".format( int['gg'] ) # text5 = "#sigma_{{integral}}(qq #rightarrow Zh)={:.2f} fb".format( int['qq'] ) # label1 = TLatex(label_posx, label_posy, text1 ) # label1.SetNDC( True ) # label1.SetTextSize( text_size ) # label2 = TLatex(label_posx, label_posy-text_vshift, text2 ) # label2.SetNDC( True ) # label2.SetTextSize( text_size ) # label3 = TLatex(label_posx, label_posy-2.0*text_vshift, text3 ) # label3.SetNDC( True ) # label3.SetTextSize( text_size ) # label4 = TLatex(label_posx, label_posy-3.0*text_vshift, text4 ) # label4.SetNDC( True ) # label4.SetTextSize( text_size ) # label5 = TLatex(label_posx, label_posy-4.0*text_vshift, text5 ) # label5.SetNDC( True ) # label5.SetTextSize( text_size ) legend = TLegend(plot_config['legend']['bl_corner_x'], plot_config['legend']['bl_corner_y'], plot_config['legend']['tr_corner_x'], plot_config['legend']['tr_corner_y']) legend.SetTextSize(plot_config['legend']['textsize']) for comp, comp_opts in plot_config['components'].iteritems(): legend.AddEntry(th[comp][type][level], comp_opts['label']) # - Label #label = TLatex( plot_config['label']['pos_x'], plot_config['label']['pos_y'], # plot_config['label']['text'] ) #label.SetNDC( True ) #label.SetTextSize( plot_config['label']['text_size'] ) ############################ ## --- Linear plot c.SetLogy(0) #th['all'].GetYaxis().SetRangeUser(0.0006,3.5) th[plot_config['first_plot']][type][level].Draw("hist") for comp in plot_config['remaining_plots']: th[comp][type][level].Draw("SAME hist") for label in labels: label.Draw("SAME") for label in labels_int: label.Draw("SAME") # for label in labels_param: # label.Draw("SAME") # label1.Draw("SAME") # label2.Draw("SAME") # label3.Draw("SAME") # label4.Draw("SAME") # label5.Draw("SAME") legend.Draw("SAME") # label.Draw("SAME") c.SaveAs( os.path.join(work_dir, '{}_{}_lin.pdf'.format(type_opts['name'], level))) ############################ ## --- Log plot c.Clear() c.SetLogy() th[plot_config['first_plot']][type][level].Draw("hist") for comp in plot_config['remaining_plots']: th[comp][type][level].Draw("SAME hist") # label1.Draw("SAME") # label2.Draw("SAME") # label3.Draw("SAME") # label4.Draw("SAME") # label5.Draw("SAME") legend.Draw("SAME") c.SaveAs( os.path.join(work_dir, '{}_{}_log.pdf'.format(type_opts['name'], level)))
def plot_all_components(workdir, root_file_path, info_file, lumi): import pandas as pd import os histo_names = { 'all': 'gg_zh1_all_{}_generator-level', 'SM_only': 'gg_zh1_SM_only_{}_generator-level', 'A_only': 'gg_zh1_A_only_{}_generator-level', 'qq': 'qq_zh1_{}_generator-level' } labels = { 'all': 'gg - all', 'SM_only': 'gg - SM only.', 'A_only': 'gg - A only', 'qq': 'qq' } types = { 'mZh': { 'xlabel': 'm_{inv}(Zh) [GeV]', 'name': 'mZh' }, 'h_pt': { 'xlabel': 'p_{T}(h) [GeV]', 'name': 'h_Pt' }, 'Z_pt': { 'xlabel': 'p_{T}(Z) [GeV]', 'name': 'Z_Pt' } } info = pd.read_csv(info_file, delim_whitespace=True) info['xsec_gg_Zh_qq'] = 0.74829 * info['sinba'] * info['sinba'] info['xsec_gg_Zh_all'] = info['xsec_gg_Zh_all'] * 2.0 info['xsec_gg_Zh_SM_only'] = info['xsec_gg_Zh_SM_only'] * 2.0 info['xsec_gg_Zh_A_only'] = info['xsec_gg_Zh_A_only'] * 2.0 w = {'all': None, 'SM_only': None, 'A_only': None, 'qq': None} th = {'all': None, 'SM_only': None, 'A_only': None, 'qq': None} tf = TFile(root_file) for type in types.keys(): for component in histo_names.keys(): th[component] = tf.Get(histo_names[component].format(type)) xsec = info['xsec_gg_Zh_{}'.format(component)] w = get_weight_to_xsec( xsec * 1000, 10000) # xsec * 1000 to convert from pb to fb th[component].Scale(w, "width") th[component].SetLineColor(style_color[component]) th[component].SetMarkerColor(style_color[component]) th[component].SetLineWidth(line_width) print('\nw[key]:', w) # - Plotting c = TCanvas('canvas', 'canvas', 800, 600) th['qq'].GetXaxis().SetTitle(types[type]['xlabel']) th['qq'].GetYaxis().SetTitle('d#sigma/dm_{Zh^{0}} [fb/bin]') #th['all'].GetXaxis().SetRangeUser(200,1000) #th['all'].GetYaxis().SetRangeUser(0.0006,20) #h_all.GetYaxis().SetRangeUser(200,1000) gPad.SetLeftMargin(0.14) gPad.SetBottomMargin(0.15) label_posx, label_posy = 0.63, 0.6 text1 = "2HDM Type-II" text2 = "m_{{A}}={:.0f} GeV #Gamma_{{A}}={:.3f} GeV".format( info.mA[0], info.Gamma_A[0]) text3 = "cos(#beta-#alpha)={:.2f} tan(#beta)={:.2f}".format( info.cba[0], info.tb[0]) text_vshift = 0.06 label1 = TLatex(label_posx, label_posy, text1) label1.SetNDC(True) label1.SetTextSize(text_size) label2 = TLatex(label_posx, label_posy - text_vshift, text2) label2.SetNDC(True) label2.SetTextSize(text_size) label3 = TLatex(label_posx, label_posy - 2.0 * text_vshift, text3) label3.SetNDC(True) label3.SetTextSize(text_size) legend = TLegend( 0.65, 0.70, 0.85, 0.82, ) legend.SetTextSize(0.035) for key in histo_names.keys(): legend.AddEntry(th[key], labels[key]) ############################ ## --- Linear plot c.SetLogy(0) #th['all'].GetYaxis().SetRangeUser(0.0006,3.5) th['qq'].Draw("SAME hist") th['all'].Draw("SAME hist") th['SM_only'].Draw("SAME hist") th['A_only'].Draw("SAME hist") #h_A_only.Draw("SAME") label1.Draw("SAME") label2.Draw("SAME") label3.Draw("SAME") legend.Draw("SAME") c.SaveAs( os.path.join(workdir, '{}_lin.pdf'.format(types[type]['name']))) ############################ ## --- Log plot c.Clear() c.SetLogy() th['qq'].Draw("hist") th['all'].Draw("SAME hist") th['SM_only'].Draw("SAME hist") th['A_only'].Draw("SAME hist") label1.Draw("SAME") label2.Draw("SAME") label3.Draw("SAME") legend.Draw("SAME") c.SaveAs( os.path.join(workdir, '{}_log.pdf'.format(types[type]['name'])))
stack.GetXaxis().SetTitleSize(0.055) stack.GetYaxis().SetTitleSize(0.055) stack.GetXaxis().CenterTitle(True) stack.GetYaxis().CenterTitle(True) stack.GetXaxis().SetRangeUser(40, l_pTbin[-1]) stack.SetMinimum(0) stack.SetMaximum(2) stack.GetXaxis().SetNdivisions(5, 5, 1) #canvas.SetGridx() #canvas.SetGridy() CMSextraText = "#scale[0.8]{%s}" % (Common.getCMSextraText(isSimOnly=True)) # CMS label CMS_lumi.CMS_lumi(pad=canvas, iPeriod=0, iPosX=0, CMSextraText=CMSextraText, lumiText=Common.getLumitext(era)) canvas.SaveAs(outDir + "/tauPromptRate_pT.pdf") canvas.Clear() legend.Clear() stack.Clear()
'kkMass', 'bdMass', 'kpiMass', 'bdbarMass', 'kpibarMass', ] fIn = TFile.Open('result_flatNtuple.root') hDict = {hName: {} for hName in histNames} for hName in histNames: for dName in datasetNames: hDict[hName].update({dName: fIn.Get(dName + "_" + hName)}) canv = TCanvas('c1', 'c1', 1600, 1000) for hname, hdic in hDict.iteritems(): canv.Clear() canv.Divide(2, 1) pad1 = canv.GetPad(1) pad2 = canv.GetPad(2) pad1.SetLogy(True) pad2.SetLogy(False) canv.SaveAs('hi.{0}.pdf['.format(hname)) for nname, hist in hdic.iteritems(): print nname + "_" + hname hist.SetTitle(nname) hist.GetXaxis().SetTitle(hname) pad1.cd() hist.Draw() pad2.cd() hist.Draw() canv.SaveAs('hi.{0}.pdf'.format(hname))
def draw_momenta_ks3pi(datatype, mode, label, test): modekey = tools.get_modekey(mode) sname = attr.modes[modekey]['sname'].lower() figname = '%s_momenta' % sname figpath = os.path.join(attr.figpath, label, 'trkmtm') epsfile = set_file(extbase=figpath, comname=figname, ext='eps') tab = DHadTable() tab.row_append(['Name', 'Data/MC']) # -------------------------------------------------- h_pks = {} h_ppi1 = {} h_ppi2 = {} h_ppim = {} h_pks_c = {} h_ppi1_c = {} h_ppi2_c = {} h_ppim_c = {} for datatype in datatype.split('/'): selfile = get_selfile(datatype, mode, label, test=test) selfile = selfile.replace('/trkmtm/', '/trkmtm2/') f = TFile(selfile) if datatype == 'signal' or datatype == 'generic': tp = 'mc' if datatype == 'data': tp = 'data' h_pks[tp] = f.Get('h_pks' + tp) h_ppi1[tp] = f.Get('h_ppi1' + tp) h_ppi2[tp] = f.Get('h_ppi2' + tp) h_ppim[tp] = f.Get('h_ppim' + tp) h_pks_c[tp] = f.Get('h_pk_c' + tp) h_ppi1_c[tp] = f.Get('h_ppi1_c' + tp) h_ppi2_c[tp] = f.Get('h_ppi2_c' + tp) h_ppim_c[tp] = f.Get('h_ppim_c' + tp) f.Clear() ratio = h_pks['data'].Integral() / h_pks['mc'].Integral() c1 = TCanvas('c1', 'canvas', 900, 900) c1.Divide(2, 2) c1.cd(1) h_pks['data'].Draw('PE') h_pks['mc'].Scale(ratio) h_pks['mc'].Draw('SAME') c1.cd(2) h_ppi1['data'].Draw('PE') h_ppi1['mc'].Scale(ratio) h_ppi1['mc'].Draw('SAME') c1.cd(3) h_ppi2['data'].Draw('PE') h_ppi2['mc'].Scale(ratio) h_ppi2['mc'].Draw('SAME') c1.cd(4) h_ppim['data'].Draw('PE') h_ppim['mc'].Scale(ratio) h_ppim['mc'].Draw('SAME') c1.Print(epsfile) tools.eps2pdf(epsfile) # -------------------------------------------------- c1.Clear() c1.Divide(2, 2) c1.cd(1) h_pks_c['data'].Draw('PE') lratio = h_pks_c['data'].Integral() / h_pks_c['mc'].Integral() #print 'k:', lratio/ratio tab.row_append(['K', lratio / ratio]) h_pks_c['mc'].Scale(lratio) h_pks_c['mc'].Draw('SAME,HIST') c1.cd(2) h_ppi1_c['data'].Draw('PE') lratio = h_ppi1_c['data'].Integral() / h_ppi1_c['mc'].Integral() #print 'pi1:', lratio/ratio tab.row_append(['pi1', lratio / ratio]) h_ppi1_c['mc'].Scale(lratio) h_ppi1_c['mc'].Draw('SAME,HIST') c1.cd(3) h_ppi2_c['data'].Draw('PE') lratio = h_ppi2_c['data'].Integral() / h_ppi2_c['mc'].Integral() #print 'pi2:', lratio/ratio tab.row_append(['pi2', lratio / ratio]) h_ppi2_c['mc'].Scale(lratio) h_ppi2_c['mc'].Draw('SAME,HIST') c1.cd(4) h_ppim_c['data'].Draw('PE') lratio = h_ppim_c['data'].Integral() / h_ppim_c['mc'].Integral() #print 'pim:', lratio/ratio tab.row_append(['pim', lratio / ratio]) h_ppim_c['mc'].Scale(lratio) h_ppim_c['mc'].Draw('SAME,HIST') tab.column_trim('Data/MC', rnd='.0001') figname = '%s_momentacor' % sname epsfile = set_file(extbase=figpath, comname=figname, ext='eps') c1.Print(epsfile) tools.eps2pdf(epsfile) tabname = '%s_syst' % sname tab.output(tabname, label=label, export_html=False)
maxy = 100. minratio = 0.8 maxratio = 1.2 h1.SetTitle("") h1.GetXaxis().SetTitle("p_{T} [GeV]") h1.GetYaxis().SetTitle("#sigma (p_{T}) > p_{T}^{min}) (pb)") h1.GetXaxis().SetRangeUser(minx, maxx) h1.SetMinimum(miny) #h1.SetMaximum(maxy) c1 = TCanvas("c1", "ratio 1") c1.Clear() rp1 = TRatioPlot(h1, h0) rp1.Draw() rp1.SetSplitFraction(0.5) rp1.GetLowerRefGraph().SetMinimum(minratio) rp1.GetLowerRefGraph().SetMaximum(maxratio) rp1.GetLowerRefGraph().SetLineColor(ROOT.kBlue) rp1.GetLowerRefGraph().SetMarkerColor(ROOT.kBlue) rp1.GetLowerRefGraph().SetLineWidth(2) rp1.GetUpperPad().cd() h2.Draw("same hist")
def plotModel(): from ROOT import TFile, TH1D tf = TFile("%s/data/latDS%s.root" % (dsi.latSWDir,''.join([str(d) for d in dsList]))) tt = tf.Get("skimTree") tCut = "isEnr==1" if enr is True else "isEnr==0" hitE = ROOT.RooRealVar("trapENFCal", "Energy", eLo, eHi, "keV") hEnr = ROOT.RooRealVar("isEnr", "isEnr", 0, 1, "") # hitW = ROOT.RooRealVar("weight", "weight", 1, 1000, "") fData = ROOT.RooDataSet("data", "data", tt, ROOT.RooArgSet(hitE, hEnr), tCut) # fData = ROOT.RooDataSet("data", "data", tt, ROOT.RooArgSet(hitE, hEnr, hitW), "", "weight") nData = fData.numEntries() fitWorkspace = ROOT.RooWorkspace("fitWorkspace","Fit Workspace") getattr(fitWorkspace,'import')(hitE) getattr(fitWorkspace,'import')(fData) # getattr(fitWorkspace,'import')(fWeight) tf2 = TFile("%s/data/specPDFs.root" % dsi.latSWDir) pdfList = ROOT.RooArgList("shapes") # tritium nTr = 1000 hTr = tf2.Get("h5") if eff: hTr = getEffCorrTH1D(hTr, pLo, pHi, nBP) trNum = ROOT.RooRealVar("amp-trit", "amp-trit", nTr, 0., 50000.) trDH = ROOT.RooDataHist("tr", "tr", ROOT.RooArgList(hitE), RF.Import(hTr)) hitE.setRange(eLo, eHi) trPdf = ROOT.RooHistPdf("trPdf", "trPdf", ROOT.RooArgSet(hitE), trDH, 2) trExt = ROOT.RooExtendPdf("ext-trit", "ext-trit", trPdf, trNum) pdfList.add(trExt) # flat bg nBk = 1000 hBkg = getBkgPDF(eff) bkgNum = ROOT.RooRealVar("amp-bkg", "amp-bkg", nBk, 0., 10000.) bkgDH = ROOT.RooDataHist("bkg", "bkg", ROOT.RooArgList(hitE), RF.Import(hBkg)) hitE.setRange(eLo, eHi) bkgPdf = ROOT.RooHistPdf("bkgPdf", "bkgPdf", ROOT.RooArgSet(hitE), bkgDH, 2) bkgExt = ROOT.RooExtendPdf("ext-bkg", "ext-bkg", bkgPdf, bkgNum) pdfList.add(bkgExt) # 68ge peak nPk = 100 hPk = peakPDF(10.37, getSigma(10.37), "68GeK", eff) pkDH = ROOT.RooDataHist("pk", "pk", ROOT.RooArgList(hitE), RF.Import(hPk)) hitE.setRange(eLo, eHi) pkPdf = ROOT.RooHistPdf("pkPdf", "pkPdf", ROOT.RooArgSet(hitE), pkDH, 2) pkNum = ROOT.RooRealVar("amp-68GeK", "amp-68GeK", nPk, 0.0, 1000.) pkExt = ROOT.RooExtendPdf("ext-68GeK", "ext-68GeK", pkPdf, pkNum) pdfList.add(pkExt) model = ROOT.RooAddPdf("model","total pdf",pdfList) # rooplot before fitting fSpec = hitE.frame(RF.Range(eLo,eHi), RF.Bins(int((eHi-eLo)/epb))) # wouter's note: DON'T DELETE # "the default behavior is when you plot a p.d.f. on an empty frame it is # plotted with unit normalization. When you plot it on a frame with data in # it, it will normalize to the number of events in that dataset." # (then after you do a fit, the pdf normalization changes again ...) fData.plotOn(fSpec) # bkgExt.plotOn(fSpec) # pkExt.plotOn(fSpec) # 1 -- individual components at their initial fit values # use this one for one component (you have to divide by (bin width of orig pdf) in numpy when you plot, but not when you integrate) trExt.plotOn(fSpec, RF.LineColor(ROOT.kMagenta), RF.Normalization(nTr, ROOT.RooAbsReal.Raw)) bkgExt.plotOn(fSpec, RF.LineColor(ROOT.kGreen), RF.Normalization(nBk, ROOT.RooAbsReal.Raw)) pkExt.plotOn(fSpec, RF.LineColor(ROOT.kBlue), RF.Normalization(nPk, ROOT.RooAbsReal.Raw)) # 2 -- the model, normalized according to the total number of counts in a really fking stupid way # model.plotOn(fSpec, RF.LineColor(ROOT.kRed)) # model.plotOn(fSpec, RF.Components("ext-trit"), RF.LineColor(ROOT.kMagenta), RF.Name("ext-trit")) # model.plotOn(fSpec, RF.Components("ext-bkg"), RF.LineColor(ROOT.kGreen), RF.Name("ext-bkg")) # model.plotOn(fSpec, RF.Components("ext-68GeK"), RF.LineColor(ROOT.kBlue), RF.Name("ext-68GeK")) from ROOT import TCanvas c = TCanvas("c","c", 1400, 1000) fSpec.SetTitle("") fSpec.Draw() c.Print("%s/plots/spectrum-before.pdf" % dsi.latSWDir) c.Clear() # === replicate the rooplot with numpy (no weights) === tCut = "isEnr" if enr else "!isEnr" tCut += " && trapENFCal >= %.1f && trapENFCal <= %.1f" % (eLo, eHi) n = tt.Draw("trapENFCal", tCut, "goff") trapE = tt.GetV1() trapE = [trapE[i] for i in range(n)] x, hData = wl.GetHisto(trapE, eLo, eHi, epb) # plt.plot(x, hData, ls='steps', c='b') # normal histo hErr = np.asarray([np.sqrt(h) for h in hData]) # statistical error plt.errorbar(x, hData, yerr=hErr, c='k', ms=5, linewidth=0.5, fmt='.', capsize=1, zorder=1) # pretty convincing rooplot fake # get (eff-corrected) histos and normalize them to the global energy range x1, y1, _ = wl.npTH1D(hTr) x2, y2, _ = wl.npTH1D(hBkg) x3, y3, _ = wl.npTH1D(hPk) x1, y1 = normPDF(x1, y1, eLo, eHi) x2, y2 = normPDF(x2, y2, eLo, eHi) x3, y3 = normPDF(x3, y3, eLo, eHi) # === 1. plot individual components of the model # *** NOTE: to plot, divide by (bin width when generated). to integrate, don't. *** plt.plot(x1, y1 * nTr / ppb, ls='steps', c='m', lw=2, label="trit init: %d int: %d" % (nTr, np.sum(y1 * nTr))) plt.plot(x2, y2 * nBk / epb, ls='steps', c='g', lw=2, label="bkg init: %d int: %d" % (nBk, np.sum(y2 * nBk))) plt.plot(x3, y3 * nPk / ppb, ls='steps', c='b', lw=2, label="68GeK init: %d int: %d" % (nPk, np.sum(y3 * nPk))) # === 2. replicate the stupid way a rooplot normalizes multiple pdf's based on the number of data counts (before a fit) # nModel = 3 # yTot = np.add(y1, y2, y3) # yTot *= nData/nModel # plt.plot(x1, yTot, ls='steps', c='r', lw=2, label="total, nData %d sum %d max %.1f" % (nData, int(np.sum(yTot)), np.amax(yTot))) # plt.plot(x1, y1 * nData/2, ls='steps', c='b', lw=2, label="tritium") # plt.plot(x2, y2 * nData/2, ls='steps', c='g', lw=2, label="bkg") # === 3. check a peak which was generated with a different binning than the global binning # print(np.sum(y3)) # this is 1 # print(np.sum(y3 * (epb/xpb3))) # this is bigger than 1, but matches the way rooplot normalizes it when plotted by itself. fk rooplot # print(np.sum(y3 * nPk)) # plt.plot(x3, y3 * nPk / xpb3, ls='steps', label="pk init value: %d int: %d" % (nPk, np.sum(y3 * nPk))) plt.xlabel("Energy (keV)", ha='right', x=1) plt.ylabel("Counts / %.1f keV" % epb, ha='right', y=1) plt.legend(loc=1) plt.xlim(eLo, eHi) plt.ylim(ymin=0) plt.tight_layout() # plt.show() plt.savefig("%s/plots/sf4-mplplot.pdf" % dsi.latSWDir) # === alright, now run the fit and check the plot again minimizer = ROOT.RooMinimizer( model.createNLL(fData, RF.NumCPU(2,0), RF.Extended(True)) ) minimizer.setPrintLevel(-1) minimizer.setStrategy(2) minimizer.migrad() fitResult = minimizer.save() # according to the internet, covQual==3 is a good indicator that it converged print("Fitter is done. Fit Cov Qual:", fitResult.covQual()) # save workspace to a TFile getattr(fitWorkspace,'import')(fitResult) getattr(fitWorkspace,'import')(model) tf3 = TFile("%s/data/fitWorkspace.root" % dsi.latSWDir,"RECREATE") fitWorkspace.Write() tf3.Close()
def sysCompare(): parser = argparse.ArgumentParser( description='Produce Systematic Uncertainties From Varied inputs.') parser.add_argument( '-d', '--directory', required=False, default="", type=str, help= "Directory for comparison plots. If none given, plots are not saved") parser.add_argument( '-l', '--list', required=False, type=str, default="", help= "List of histograms/graphs to use. If not given, all objects are used." ) parser.add_argument('-o', '--output', required=True, type=str, help="Output file to produce") # parser.add_argument('-a','--all',requred=False,type=bool,help="whether to use all valid objects in the first file") parser.add_argument('-r', '--ratioMode', required=False, type=bool, default=False, help="Whether to produce plots of the ratios") parser.add_argument( '-c', '--centerValueMode', required=False, default=0, type=int, help= "Whether to use the first file as the central value (0) or the average (1)" ) parser.add_argument('-t', '--titles', required=True, type=str, nargs='+', help="Titles for each file") parser.add_argument('-f', '--files', metavar='Files', required=True, type=str, nargs='+', help="Files to use") parser.add_argument('-D', '--DeletePoints', required=False, default=0, type=int, help="Delete the beginning N points for TGraphs") # parser.add_argument('files',metavar='Files',type=str,nargs='+',help="Files to use") parser.add_argument('-T', '--OverallTitle', required=False, type=str, default="", help="Title to appear in legend headers") parser.add_argument('-y', '--LogY', required=False, type=int, default=False, help="Whether to force the plots to have log Y.") args = parser.parse_args() setStyle() stringListOfHists = args.list #list of hists/graphs listOfHists = stringListOfHists.split() # bUseAllObjs=False # if (len(listOfHists) == 0): # bUseAllObjs=True bUseAllObjs = (len(listOfHists) == 0) bRatioMode = args.ratioMode if (bRatioMode): print("Ratio Mode is enabled.") #array of files fileNames = args.files fileTitles = args.titles directory = args.directory outputFileName = args.output CenterValueMode = args.centerValueMode LogYMode = False LogYMode = args.LogY numDelete = args.DeletePoints print("List of hists/graphs to use:"), print(listOfHists) if (bUseAllObjs): print("Will use all valid objects found in the first file") else: print("List of files to use:") print(fileNames) print("That is %d files" % (len(fileNames))) print("List of titles:") print(fileTitles) print("That is %d titles" % (len(fileTitles))) if (len(fileNames) != len(fileTitles)): print("Error: mismatch in number of files vs titles") exit(1) print("Primary file:") print(fileNames[0]) if (numDelete > 0): print("Will delete the first %d points from TGraphs" % (numDelete)) # list of files files = [] titlesToFileNames = zip(fileTitles, fileNames) titlesToFileNames = set(titlesToFileNames) print(titlesToFileNames) fileNamesToTitles = zip(fileNames, fileTitles) fileNamesToTitles = dict(fileNamesToTitles) filesToTitles = {} cleanTitlesToFiles = {} filesToCleanTitles = {} for filename in fileNames: print("Opening File %s" % (filename)) tfile = TFile.Open(filename, "READ") print("opened file %s" % (tfile.GetName())) files.append(tfile) filesToTitles[tfile] = fileNamesToTitles[filename] # clean name that can be added to root object names cleanedTitle = cleanName(fileNamesToTitles[filename]) filesToCleanTitles[tfile] = cleanedTitle primaryFile = files[0] if (directory == ""): print("No directory given. Output plots will not be saved") else: print("Directory for output %s" % (directory)) # create output file here, before changing directory outputFile = TFile.Open(outputFileName, "RECREATE") OverallTitle = args.OverallTitle canvas = TCanvas("canvas", "canvas", c_width, c_height) if (directory != ""): # check if the directory exists if (not os.path.isdir(directory)): os.makedirs(directory) os.chdir(directory) # This is where it gets easier than compare: just need to find the objects in the files if (bUseAllObjs): print("Building list of objects") #primaryObj ListOfKeys = primaryFile.GetListOfKeys() for key in ListOfKeys: print("Adding item %s" % (key.GetName())) listOfHists.append(key.GetName()) for objName in listOfHists: print("Starting the thing for object %s" % (objName)) #print(" Looking in file %s" % (primaryFile.GetName())) primaryObj = primaryFile.Get(objName) if (not primaryObj): print("Could not find object %s in file %s" % (objName, fileNames[0])) else: print("obj title = %s" % (primaryObj.GetTitle())) objectTitle = primaryObj.GetTitle() listOfObjs = [] for tfile in files: localObj = tfile.Get(objName) if (not localObj): print("Could not find object %s in file %s" % (objName, tfile)) exit(1) else: print("obj title = %s" % (localObj.GetTitle())) localObj.SetName("%s_%s" % (objName, filesToCleanTitles[tfile])) print(" name set to %s" % (localObj.GetName())) localObj.SetTitle(filesToTitles[tfile]) print(" title set to %s" % (localObj.GetTitle())) listOfObjs.append(localObj) legX = 0.6 legY = 0.22 legWidth = 0.2 #0.45 legHeight = 0.4 #0.225 # legend for comparison # leg = TLegend(legX,legY,legX+legWidth,legY+legHeight) # legend for SysUncert # leg2 = TLegend(legX,legY,legX+legWidth,legY+legHeight) # legend for comparison leg = TLegend(legWidth, legHeight, legWidth, legHeight) if (OverallTitle != ""): leg.SetHeader(OverallTitle, "c") # legend for SysUncert leg2 = TLegend(legWidth, legHeight, legWidth, legHeight) # Set Properties and include in legend for i in range(len(listOfObjs)): localObj = listOfObjs[i] localTitle = fileTitles[i] color = ROOT.kBlack if (i != 0): if (useCustomColor): color = GetCustomColor(i) else: color = colorList[i] if (i >= len(markerList)): # This check code is redundant for now, in case I # want to switch so something fancier than just looping # marker styles markerStyle = markerList[i % len(markerList)] else: markerStyle = markerList[i] localObj.SetMarkerColor(color) localObj.SetLineColor(color) localObj.SetMarkerStyle(markerStyle) localObj.SetTitle(localTitle) localObj.SetFillColor(0) leg.AddEntry(localObj, localTitle, "LP") leg2.AddEntry(listOfObjs[0], fileTitles[0], "LP") # Two key cases: TGraph or TH1 # For TGraphs we want to do a multigraph # for th1, we have to manually track the y-limits iObjType = GetObjType(listOfObjs[0]) # tgraph : 1 # th1 : 2 # other : 0 canvas.Clear() canvas.cd() if (iObjType == 0): # Not coded yet print("Object is of a type I don't have comparison code for (yet)") continue if (iObjType == 1): # TGraph # Get object name #objName = "TestGraph" # Reset the margins gPad.SetTopMargin(fDefaultTopMargin) gPad.SetLeftMargin(fDefaultLeftMargin) gPad.SetBottomMargin(fDefaultBottomMargin) gPad.SetRightMargin(fDefaultRightMargin) mg = TMultiGraph() for j in range(numDelete): print("Deleting a point from object %s" % (primaryObj.GetName())) primaryObj.RemovePoint(0) # remove the first point mg.SetTitle(primaryObj.GetTitle()) mg.GetXaxis().SetTitle(primaryObj.GetXaxis().GetTitle()) mg.GetYaxis().SetTitle(primaryObj.GetYaxis().GetTitle()) # FIXME is this going to double deleting points from the primary object for lobj in listOfObjs: print("Object starts with %d points" % (lobj.GetN())) for j in range(numDelete): print("Deleting a point from object %s" % (lobj.GetName())) lobj.RemovePoint(0) # remove the first point print("Object ends with %d points" % (lobj.GetN())) mg.Add(lobj) mg.Draw("ALP") mg.GetXaxis().SetLabelSize(AxisLabelSizeX) mg.GetYaxis().SetLabelSize(AxisLabelSizeY) #leg.Draw("SAME") legtest = gPad.BuildLegend() legtest.Draw("SAME") if (directory != ""): canvas.Print("%s_Cmp.pdf" % (objName)) canvas.Print("%s_Cmp.png" % (objName)) canvas.Print("%s_Cmp.C" % (objName)) # Now produce systematic uncertainties (for TGraphErrors (sysUncertObj, ListOfRangeHists) = ProduceSystematicFromGraphs(listOfObjs) sysUncertObj.SetName("%s_SysErr" % (objName)) #sysUncertObj.SetName(objName) totalUncertObj = ProduceTotalErrorGraphs(primaryObj, sysUncertObj) totalUncertObj.SetName(objName) #totalUncertObj.SetName("%s_TotalErr" % (objName)) sysUncertObj.SetTitle("Systematic Uncertainty") sysUncertObj.SetFillColor(ROOT.kBlue) sysUncertObj.SetFillStyle(3002) sysUncertObj.Draw("ALP[]5") # reset the primary objects title primaryObj.Draw("LP") leg2.AddEntry(sysUncertObj, "Systematic Uncertainty", "F") #mg.Draw("LP") #leg.Draw("SAME") leg2.Draw("SAME") # not using the autobuild legend here. could get a good location # from the comparison plot #legtest2=gPad.BuildLegend() #legtest2.Draw("SAME") if (directory != ""): canvas.Print("%s_SysUncert.pdf" % (objName)) canvas.Print("%s_SysUncert.png" % (objName)) outputFile.Add(sysUncertObj) outputFile.Add(totalUncertObj) for hist in ListOfRangeHists: outputFile.Add(hist) # Now plot them both in a split canvas canvas.Clear() canvas.Divide(1, 2, 0.01, 0.0) canvas.cd(1) mg.Draw("ALP X+") gPad.SetTopMargin(0.0) leg.Draw("SAME") gPad.SetBottomMargin(0.0) canvas.cd(2) sysUncertObj.Draw("ALP[]5") gPad.SetTopMargin(0.0) gPad.SetBottomMargin(0.1) primaryObj.Draw("LP") leg2.Draw("SAME") if (directory != ""): canvas.Print("%s_SysUncert_Cmp.pdf" % (objName)) canvas.Print("%s_SysUncert_Cmp.png" % (objName)) canvas.Print("%s_SysUncert_Cmp.C" % (objName)) canvas.Clear() canvas.Divide(1, 2, 0.01, 0.0) ratioMg = TMultiGraph() legRatio = TLegend(2 * legWidth, legHeight, 2 * legWidth, legHeight) #RatioArray=[] # Make, draw ratios if (bRatioMode): max_ratio_y = 1. min_ratio_y = 1. nPointsFirst = primaryObj.GetN() bDivisionPossible = True for lobj in listOfObjs: if (nPointsFirst != lobj.GetN()): bDivisionPossible = False if (bDivisionPossible): numObjects = 0 for lobj in listOfObjs: numObjects = numObjects + 1 if (lobj == primaryObj): print("Avoided TGraph over itself using object") continue if (lobj.GetName() == primaryObj.GetName()): print("Avoided TGraph over itself using name") continue if (numObjects == 1): print("Avoided TGraph over itself using index") continue ratioName = "%s_Ratio" % (lobj.GetName()) lRatio = DivideTGraphErrors(lobj, primaryObj, ratioName) lRatio.SetTitle("%s / %s" % (lobj.GetTitle(), fileTitles[0])) ratioMg.Add(lRatio) legRatio.AddEntry(lRatio, lRatio.GetTitle(), "LP") # RatioArray.append(lRatio) ratioMg.GetYaxis().SetTitle("Ratio over (%s)" % (fileTitles[0])) canvas.cd(1) mg.Draw("ALP X+") gPad.SetTopMargin(0.0) leg.Draw("SAME") gPad.SetBottomMargin(0.0) canvas.cd(2) ratioMg.Draw("ALP") legRatio.Draw("SAME") if (directory != ""): canvas.Print("%s_Ratio.pdf" % (objName)) canvas.Print("%s_Ratio.png" % (objName)) canvas.Print("%s_Ratio.C" % (objName)) # for histograms, also draw a plot with each of them # separately? Useful if the fit functions are visible if (iObjType == 2): # TH1 primaryObj.Draw() # if (LogYMode): # canvas.SetLogy(1) primaryObj.GetYaxis().UnZoom() primaryObj.GetXaxis().SetLabelSize(AxisLabelSizeX) primaryObj.GetYaxis().SetLabelSize(AxisLabelSizeY) primaryObj.GetXaxis().SetTitleSize(kDefaultTitleSizeX) primaryObj.GetYaxis().SetTitleSize(kDefaultTitleSizeY) primaryObj.GetXaxis().SetTitleOffset(kDefaultTitleOffsetX) primaryObj.GetYaxis().SetTitleOffset(kDefaultTitleOffsetY) primaryObj.SetMarkerSize(kMarkerSize) fYMin = GetMinValue(primaryObj) fYMax = GetMaxValue(primaryObj) for lobj in listOfObjs: lobj.SetMarkerSize(kMarkerSize) lobj.Draw("SAME") fYMin = min(fYMin, GetMinValue(lobj)) fYMax = max(fYMax, GetMaxValue(lobj)) if (LogYMode != 0): (fYMin, fYMax) = ExpandRange(fYMin, fYMax) leg.Draw("SAME") primaryObj.GetYaxis().SetRangeUser(fYMin, fYMax) if (LogYMode == 1): canvas.SetLogy(1) primaryObj.GetYaxis().UnZoom() # FIXME temp # primaryObj.GetXaxis().SetRangeUser(0,25) # Draw a title tp = TPaveText(0.3, 0.91, 0.7, 0.99, "NDC") if (objectTitle == ""): tp.AddText(objName) else: tp.AddText(objectTitle) tp.Draw("SAME") if (directory != ""): canvas.Print("%s_Cmp.pdf" % (objName)) canvas.Print("%s_Cmp.png" % (objName)) canvas.Print("%s_Cmp.C" % (objName)) sysUncertObj = ProduceSystematicFromHists(listOfObjs) #sysUncertObj.SetName(objName) # This sets the name to be that of the original object sysUncertObj.SetName("%s_SysErr" % (objName)) # This adds the label SysErr totalUncertObj = ProduceTotalErrorHists(primaryObj, sysUncertObj) totalUncertObj.SetName(objName) #totalUncertObj.SetName("%s_TotalErr" % (objName)) sysUncertObj.SetFillColor(ROOT.kBlue) sysUncertObj.SetFillStyle(3002) #sysUncertObj.Draw("E2") sysUncertObj.Draw("E4") primaryObj.Draw("SAME") leg2.AddEntry(sysUncertObj, "Systematic Uncertainty", "F") leg2.Draw("SAME") if (directory != ""): canvas.Print("%s_SysUncert.pdf" % (objName)) canvas.Print("%s_SysUncert.png" % (objName)) outputFile.Add(sysUncertObj) outputFile.Add(totalUncertObj) if (bRatioMode): max_ratio_y = 1. min_ratio_y = 1. nBinsFirst = primaryObj.GetNbinsX() bDivisionPossible = False for hobj in listOfObjs: if (nBinsFirst == hobj.GetNbinsX()): bDivisionPossible = True if (bDivisionPossible): RatioArray = [] #canvas.Divide(1,2,canvSmall,canvSmall) #canvas.cd(1) #gPad.SetBottomMargin(small) #primaryObj.Draw() # Build the ratios. # Draw the thing for lobj in listOfObjs: lRatio = lobj.Clone("%s_Ratio" % (lobj.GetName())) if (bBinomialDivision): lRatio.Divide(lRatio, primaryObj, 1.0, 1.0, "B") else: lRatio.Divide(primaryObj) #lRatio.GetFunction(“myFunction”).SetBit(TF1::kNotDraw) localRatioMin = lRatio.GetBinContent( lRatio.GetMinimumBin()) localRatioMax = lRatio.GetBinContent( lRatio.GetMaximumBin()) if (localRatioMin < min_ratio_y): min_ratio_y = localRatioMin if (localRatioMax > max_ratio_y): max_ratio_y = localRatioMax RatioArray.append(lRatio) # magic adjustments to ratio min/max if (min_ratio_y != 0): if (max_ratio_y > 1. / min_ratio_y): min_ratio_y = 1. / max_ratio_y else: max_ratio_y = 1. / min_ratio_y if (max_ratio_y > 20.): max_ratio_y = 1.3 min_ratio_y = 1.0 / 1.3 #min_ratio_y = pow(min_ratio_y,1.6) #max_ratio_y = pow(max_ratio_y,1.6) legRatio = TLegend(legWidth, legHeight, legWidth, legHeight) RatioArray[0].Draw("HIST E") RatioArray[0].GetYaxis().SetRangeUser(min_ratio_y, max_ratio_y) for lRatio in RatioArray: lRatio.Draw("SAME HIST E") legRatio.AddEntry(lRatio, lRatio.GetTitle(), "LP") legRatio.Draw("SAME") if (directory != ""): canvas.Print("%s_Ratio.pdf" % (objName)) canvas.Print("%s_Ratio.png" % (objName)) canvas.Print("%s_Ratio.C" % (objName)) # primaryObj.Draw() # for lobj in listOfObjs: # lobj.Draw("SAME") # canvas.Print("Test.pdf") outputFile.Write()
def plotFit(): from ROOT import TFile, TCanvas, TH1D f = TFile("%s/data/fitWorkspace.root" % dsi.latSWDir) fitWorkspace = f.Get("fitWorkspace") fData = fitWorkspace.allData().front() fitResult = fitWorkspace.allGenericObjects().front() nPars = fitResult.floatParsFinal().getSize() hitE = fitWorkspace.var("trapENFCal") model = fitWorkspace.pdf("model") # fitWorkspace.Print() # plot data fSpec = hitE.frame(RF.Range(eLo,eHi), RF.Bins(nB)) fData.plotOn(fSpec) # plot model and components model.plotOn(fSpec, RF.LineColor(ROOT.kRed), RF.Name("FullModel")) c = TCanvas("c","c", 1400, 1000) fSpec.SetTitle("") fSpec.Draw() c.Print("%s/plots/spectrum-after.pdf" % dsi.latSWDir) c.Clear() # get fit results fitVals = {} for i in range(nPars): fp = fitResult.floatParsFinal() name = fp.at(i).GetName() fitVal = fp.at(i).getValV() fitErr = fp.at(i).getError() print("%s fitVal %.2f error %.2f" % (name, fitVal, fitErr)) if name == "amp-68GeK": nPk = fitVal if name == "amp-bkg": nBk = fitVal if name == "amp-trit": nTr = fitVal # === duplicate the rooplot in matplotlib === plt.close() tf = TFile("%s/data/latDS%s.root" % (dsi.latSWDir,''.join([str(d) for d in dsList]))) tt = tf.Get("skimTree") tCut = "isEnr==1" if enr is True else "isEnr==0" tCut = "isEnr" if enr else "!isEnr" tCut += " && trapENFCal >= %.1f && trapENFCal <= %.1f" % (eLo, eHi) n = tt.Draw("trapENFCal", tCut, "goff") trapE = tt.GetV1() trapE = [trapE[i] for i in range(n)] x, hData = wl.GetHisto(trapE, eLo, eHi, epb) # plt.plot(x, hData, ls='steps', c='b') # normal histo hErr = np.asarray([np.sqrt(h) for h in hData]) # statistical error plt.errorbar(x, hData, yerr=hErr, c='k', ms=5, linewidth=0.5, fmt='.', capsize=1, zorder=1) # pretty convincing rooplot fake # plot the model components and total tf2 = TFile("%s/data/specPDFs.root" % dsi.latSWDir) # get (eff-corrected) histos and normalize to 1 in the global energy range hTr = tf2.Get("h5") if eff: hTr = getEffCorrTH1D(hTr, pLo, pHi, nBP) hBkg = getBkgPDF(eff) hPk = peakPDF(10.37, getSigma(10.37), "68GeK", eff) x1, y1, xpb1 = wl.npTH1D(hTr) x2, y2, xpb2 = wl.npTH1D(hBkg) x3, y3, xpb3 = wl.npTH1D(hPk) x1, y1 = normPDF(x1, y1, eLo, eHi) x2, y2 = normPDF(x2, y2, eLo, eHi) x3, y3 = normPDF(x3, y3, eLo, eHi) nTot = nTr + nBk + nPk if abs(nTr - np.sum(y1 * nTr)) > 3: print("Error in trit: nTr %d cts in curve %d" % (nTr, np.sum(y1*nTr))) if abs(nTr - np.sum(y2 * nTr)) > 3: print("Error in bkg: nTr %d cts in curve %d" % (nTr, np.sum(y2*nTr))) if abs(nTr - np.sum(y3 * nTr)) > 3: print("Error in peak: nTr %d cts in curve %d" % (nTr, np.sum(y3*nTr))) # === reverse the efficiency correction to get the "true" number of counts y1c = nTr * getEffCorr(x1, y1, inv=True) y2c = nBk * getEffCorr(x2, y2, inv=True) y3c = nPk * getEffCorr(x3, y3, inv=True) nTotC = np.sum(y1c) + np.sum(y2c) + np.sum(y3c) # === plot total model pdfs = [[x1,y1,xpb1,nTr],[x2,y2,xpb2,nBk],[x3,y3,xpb3,nPk]] xT, yT = getTotalModel(pdfs, eLo, eHi, epb, smooth=True) plt.step(xT, yT, c='b', lw=2, label="Raw (no eff. corr): %d cts" % nTot) # === plot components of the (uncorrected) model # *** NOTE: to plot after the fit, multiply by (global bin width / bin width when generated). to integrate, don't. *** plt.step(x1, y1 * nTr * (epb/ppb), c='m', lw=2, alpha=0.7, label="Tritium: %d cts" % nTr) plt.step(x2, y2 * nBk * (epb/epb), c='g', lw=2, alpha=0.7, label="Bkg: %d cts" % nBk) plt.step(x3, y3 * nPk * (epb/ppb), c='c', lw=2, alpha=0.7, label="68GeK %d cts" % nPk) # === plot efficiency corrected final model pdfs = [[x1,y1c,xpb1,nTr],[x2,y2c,xpb2,nBk],[x3,y3c,xpb3,nPk]] xTc, yTc = getTotalModel(pdfs, eLo, eHi, epb, smooth=True, amp=False) plt.step(xTc, yTc, c='r', lw=3, label="Efficiency corrected: %d cts" % nTotC) # === plot components of the corrected model # plt.step(x1, y1c * (epb/ppb), c='orange', lw=2, alpha=0.7, label="trit fit: %d corr: %d" % (nTr, np.sum(y1c))) # plt.step(x2, y2c * (epb/epb), c='orange', lw=2, alpha=0.7, label="bkg fit: %d corr: %d" % (nBk, np.sum(y2c))) # plt.step(x3, y3c * (epb/ppb), c='orange', lw=2, alpha=0.7, label="peak fit: %d corr: %d" % (nPk, np.sum(y3c))) plt.xlabel("Energy (keV)", ha='right', x=1) plt.ylabel("Counts / %.1f keV" % epb, ha='right', y=1) plt.legend(loc=1, fontsize=12) plt.xlim(eLo, eHi) plt.ylim(ymin=0) plt.tight_layout() # plt.show() plt.savefig("%s/plots/sf4-mplafter.pdf" % dsi.latSWDir)
def plot_fits(canvas, histos, x_range, x_bin_step, title_formatter, save_name, label, y_fit_range, y_range=None, title=None, xtitle=None, ytitle=None, hline=None): canvas.Clear() canvas.Divide(2,3) root_is_dumb = [] for i in range(1,7): canvas.cd(i) title = title_formatter.format(i) graph, slices, fits = fit_slices(histos.get(title, default_histo2d), x_range, x_bin_step, y_fit_range=y_fit_range) graph.SetMarkerStyle(8) graph.SetMarkerSize(1) if y_range: graph.GetHistogram().SetMinimum(y_range[0]) graph.GetHistogram().SetMaximum(y_range[1]) graph.Draw('AP') root_is_dumb.append(graph) else: graph.Draw('AP') root_is_dumb.append(graph) if hline: line = TLine(x_range[0], hline, x_range[1], hline) line.SetLineStyle(8) line.SetLineWidth(1) line.Draw() root_is_dumb.append(line) if title: label.DrawLatex(0.1, 0.925, title) if xtitle: label.DrawLatex(0.5, 0.015, xtitle) if ytitle: label.SetTextAngle(90) label.DrawLatex(0.035, 0.5, ytitle) label.SetTextAngle(0) canvas.Print(save_name) # For the slices slice_can = TCanvas('slice_can', 'slice_can', 1200, 1600) slice_pdfname = title_formatter.split('_{}')[0] + '_slices.pdf' slice_can.Print(slice_pdfname + '[') for i in range(1,7): title = title_formatter.format(i) graph, slices, fits = fit_slices(histos.get(title, default_histo2d), x_range, x_bin_step, y_fit_range) # Size of slices page nrows = 5 ncols = int(np.ceil(len(slices) / nrows) + 1) slice_can.Clear() slice_can.Divide(ncols, nrows) for j, (s,f) in enumerate(zip(slices, fits)): slice_can.cd(j+1) s.Draw() lab.DrawLatex(0.15, 0.88, '#mu = {0:6.4f}, #sigma = {1:6.4f}'.format(f.GetParameter(1), f.GetParameter(2))) slice_can.Print(slice_pdfname) slice_can.Print(slice_pdfname + ']')
def RunRPFCode(fCTask, fOutputDir, fOutputFile): # logging.basicConfig(level="DEBUG") fNumTriggers = fCTask.GetNumTriggers() print("Found the number of triggers = %f" % (fNumTriggers)) fObservable = fCTask.GetObservable() nObsBins = fCTask.GetNObsBins() fFlowTermMode = fCTask.GetFlowTermModeAssoc() nFixV40Last = fCTask.GetFixV4Threshold() iV1Mode = fCTask.GetFlowV1Mode() iV5Mode = fCTask.GetFlowV5Mode() iV6TMode = fCTask.GetFlowV6TMode() iV6AMode = fCTask.GetFlowV6AMode() fObsBins = [0, 1. / 6, 2. / 6, 3. / 6, 4. / 6, 5. / 6, 1.] # FIXME do the new zt bins if fObservable == 0: print("Error: trigger pt is the observable, haven't coded bins in yet") if fObservable == 1: print("Using z_T as the observable") if fObservable == 2: fObsBins = [0.2, 0.4, 0.8, 1.5, 2.5, 4, 7, 11, 17] #fObsBins=[0.15,0.4,0.8,1.45,2.5,4.2,6.95,11.4,18.6] print("Using associated pt as the observable") # assoc pt print("This analysis is in observable %d, with %d bins" % (fObservable, nObsBins)) iCentBin = fCTask.GetCentBin() print(" Centrality Bin %d" % (iCentBin)) iEPRSet = fCTask.GetEPRSet() fUseEPRSet = fEPRes_Set_0 if iEPRSet == 3: fUseEPRSet = fEPRes_Set_3 res_par = { "R22": fUseEPRSet[iCentBin][1], "R42": fUseEPRSet[iCentBin][3], "R62": fUseEPRSet[iCentBin][5], "R82": 0.0 } # res_par = {"R22": fEPRes_Set_0[iCentBin][1], "R42" : fEPRes_Set_0[iCentBin][3], "R62": fEPRes_Set_0[iCentBin][5], "R82": 0.0} print("Resolution parameters:") print(res_par) # enableInclusiveFit=False # enableRPDepFit=False # enableReduxFit=False # useMinos=False nRebin = 1 MCRescale = -1 if (fCTask.GetMCGenMode()): MCRescale = fCTask.GetMCRescaleFactor() # MCRescale=MCRescaleValue nRebin = 2 * nRebin # Initializing some TGraphs # Py_B_TGraph: TGraphErrors # Graphs for basic RPF Py_ChiSq_TGraph = TGraphErrors(nObsBins) Py_ChiSq_TGraph.SetName("Py_ChiSq_TGraph") Py_B_TGraph = TGraphErrors(nObsBins) Py_B_TGraph.SetName("Py_B_TGraph") Py_V1_TGraph = TGraphErrors(nObsBins) Py_V1_TGraph.SetName("Py_V1_TGraph") Py_V2T_TGraph = TGraphErrors(nObsBins) Py_V2T_TGraph.SetName("Py_V2T_TGraph") Py_V2A_TGraph = TGraphErrors(nObsBins) Py_V2A_TGraph.SetName("Py_V2A_TGraph") Py_V3_TGraph = TGraphErrors(nObsBins) Py_V3_TGraph.SetName("Py_V3_TGraph") Py_V4T_TGraph = TGraphErrors(nObsBins) Py_V4T_TGraph.SetName("Py_V4T_TGraph") Py_V4A_TGraph = TGraphErrors(nObsBins) Py_V4A_TGraph.SetName("Py_V4A_TGraph") #Py_TGraphs= [Py_B_TGraph, Py_V2T_TGraph, Py_V2A_TGraph, Py_V3_TGraph, Py_V4T_TGraph, Py_V4A_TGraph] Py_TGraphs = [ Py_B_TGraph, Py_V1_TGraph, Py_V2T_TGraph, Py_V2A_TGraph, Py_V3_TGraph, Py_V4T_TGraph, Py_V4A_TGraph ] # Graphs for RPDep Fit Py_RPDep_ChiSq_TGraph = TGraphErrors(nObsBins) Py_RPDep_ChiSq_TGraph.SetName("Py_RPDep_ChiSq_TGraph") Py_RPDep_B_TGraph = TGraphErrors(nObsBins) Py_RPDep_B_TGraph.SetName("Py_RPDep_B_TGraph") Py_RPDep_V1_TGraph = TGraphErrors(nObsBins) Py_RPDep_V1_TGraph.SetName("Py_RPDep_V1_TGraph") Py_RPDep_V2T_TGraph = TGraphErrors(nObsBins) Py_RPDep_V2T_TGraph.SetName("Py_RPDep_V2T_TGraph") Py_RPDep_V2A_TGraph = TGraphErrors(nObsBins) Py_RPDep_V2A_TGraph.SetName("Py_RPDep_V2A_TGraph") Py_RPDep_V3_TGraph = TGraphErrors(nObsBins) Py_RPDep_V3_TGraph.SetName("Py_RPDep_V3_TGraph") Py_RPDep_V4T_TGraph = TGraphErrors(nObsBins) Py_RPDep_V4T_TGraph.SetName("Py_RPDep_V4T_TGraph") Py_RPDep_V4A_TGraph = TGraphErrors(nObsBins) Py_RPDep_V4A_TGraph.SetName("Py_RPDep_V4A_TGraph") # Reaction Plane Dependent Signal fit unique parameters # Yields Py_RPDep_IP_YieldNS = TGraphErrors(nObsBins) Py_RPDep_IP_YieldNS.SetName("Py_RPDep_IP_YieldNS") Py_RPDep_MP_YieldNS = TGraphErrors(nObsBins) Py_RPDep_MP_YieldNS.SetName("Py_RPDep_MP_YieldNS") Py_RPDep_OP_YieldNS = TGraphErrors(nObsBins) Py_RPDep_OP_YieldNS.SetName("Py_RPDep_OP_YieldNS") Py_RPDep_IP_YieldAS = TGraphErrors(nObsBins) Py_RPDep_IP_YieldAS.SetName("Py_RPDep_IP_YieldAS") Py_RPDep_MP_YieldAS = TGraphErrors(nObsBins) Py_RPDep_MP_YieldAS.SetName("Py_RPDep_MP_YieldAS") Py_RPDep_OP_YieldAS = TGraphErrors(nObsBins) Py_RPDep_OP_YieldAS.SetName("Py_RPDep_OP_YieldAS") # Sigmas Py_RPDep_IP_SigmaNS = TGraphErrors(nObsBins) Py_RPDep_IP_SigmaNS.SetName("Py_RPDep_IP_SigmaNS") Py_RPDep_MP_SigmaNS = TGraphErrors(nObsBins) Py_RPDep_MP_SigmaNS.SetName("Py_RPDep_MP_SigmaNS") Py_RPDep_OP_SigmaNS = TGraphErrors(nObsBins) Py_RPDep_OP_SigmaNS.SetName("Py_RPDep_OP_SigmaNS") Py_RPDep_IP_SigmaAS = TGraphErrors(nObsBins) Py_RPDep_IP_SigmaAS.SetName("Py_RPDep_IP_SigmaAS") Py_RPDep_MP_SigmaAS = TGraphErrors(nObsBins) Py_RPDep_MP_SigmaAS.SetName("Py_RPDep_MP_SigmaAS") Py_RPDep_OP_SigmaAS = TGraphErrors(nObsBins) Py_RPDep_OP_SigmaAS.SetName("Py_RPDep_OP_SigmaAS") Py_RPDep_TGraphs = [ Py_RPDep_B_TGraph, Py_RPDep_V1_TGraph, Py_RPDep_V2T_TGraph, Py_RPDep_V2A_TGraph, Py_RPDep_V3_TGraph, Py_RPDep_V4T_TGraph, Py_RPDep_V4A_TGraph ] # Load the Vn TGraphs for triggers and tracks FlowV2TGraph = fCTask.GetTriggerV2() FlowV2AGraph = fCTask.GetTrackV2() FlowV4TGraph = fCTask.GetTriggerV4() FlowV4AGraph = fCTask.GetTrackV4() for iObsBin in range(nObsBins): print("Doing the thing for %s bin %d" % (fCTask.GetObservableName(), iObsBin)) ObsBinCenter = 0.5 * (fObsBins[iObsBin] + fObsBins[iObsBin + 1]) ObsBinWidth = 0.5 * (fObsBins[iObsBin + 1] - fObsBins[iObsBin]) UseLogLikelihood = False if (iObsBin > nSkipLast): continue # UseLogLikelihood=True # Clone these all first so they are not alterred in the original program SigInPlaneHistOrig = fCTask.GetNearEtaDPhiProjEP(iObsBin, 0) SigMidPlaneHistOrig = fCTask.GetNearEtaDPhiProjEP(iObsBin, 1) SigOutPlaneHistOrig = fCTask.GetNearEtaDPhiProjEP(iObsBin, 2) SigInclusiveHistOrig = fCTask.GetNearEtaDPhiProjAll(iObsBin) BkgInPlaneHistOrig = fCTask.GetFarEtaDPhiProjEP(iObsBin, 0) BkgMidPlaneHistOrig = fCTask.GetFarEtaDPhiProjEP(iObsBin, 1) BkgOutPlaneHistOrig = fCTask.GetFarEtaDPhiProjEP(iObsBin, 2) BkgInclusiveHistOrig = fCTask.GetFarEtaDPhiProjAll(iObsBin) SigInPlaneHist = SigInPlaneHistOrig.Clone( "%s_Clone" % (SigInPlaneHistOrig.GetName())) SigMidPlaneHist = SigMidPlaneHistOrig.Clone( "%s_Clone" % (SigMidPlaneHistOrig.GetName())) SigOutPlaneHist = SigOutPlaneHistOrig.Clone( "%s_Clone" % (SigOutPlaneHistOrig.GetName())) SigInclusiveHist = SigInclusiveHistOrig.Clone( "%s_Clone" % (SigInclusiveHistOrig.GetName())) BkgInPlaneHist = BkgInPlaneHistOrig.Clone( "%s_Clone" % (BkgInPlaneHistOrig.GetName())) BkgMidPlaneHist = BkgMidPlaneHistOrig.Clone( "%s_Clone" % (BkgMidPlaneHistOrig.GetName())) BkgOutPlaneHist = BkgOutPlaneHistOrig.Clone( "%s_Clone" % (BkgOutPlaneHistOrig.GetName())) BkgInclusiveHist = BkgInclusiveHistOrig.Clone( "%s_Clone" % (BkgInclusiveHistOrig.GetName())) ListOfHists = [ SigInPlaneHist, SigMidPlaneHist, SigOutPlaneHist, SigInclusiveHist, BkgInPlaneHist, BkgMidPlaneHist, BkgOutPlaneHist, BkgInclusiveHist ] # Rescaling by Number of Triggers for numerical betterness SigInPlaneHist.Scale(fNumTriggers) SigMidPlaneHist.Scale(fNumTriggers) SigOutPlaneHist.Scale(fNumTriggers) SigInclusiveHist.Scale(fNumTriggers * 3) BkgInPlaneHist.Scale(fNumTriggers) BkgMidPlaneHist.Scale(fNumTriggers) BkgOutPlaneHist.Scale(fNumTriggers) BkgInclusiveHist.Scale( fNumTriggers * 3) # to correct for previous graphical downscale # FIXME rescale in case of MC # Weighting causes all the histograms to have fractional entries even when not divided by num triggers (which also has fractional weighting) # if MC is done without reweighting, this would be unnecessary (and wrong) # could define a rescale if (MCRescale > 0): for hist in ListOfHists: hist.Scale(MCRescale) if (nRebin > 1): for hist in ListOfHists: hist.Rebin(nRebin) # Fitting just the background rp_fit = three_orientations.BackgroundFit( resolution_parameters=res_par, use_log_likelihood=UseLogLikelihood, signal_region=(0, 0.8), background_region=(0.8, 1.35), use_minos=useMinos) # Fitting the background and signal regions. Same yield parameters across RPs? rp_fit_IncSig = three_orientations.InclusiveSignalFit( resolution_parameters=res_par, use_log_likelihood=UseLogLikelihood, signal_region=(0, 0.8), background_region=(0.8, 1.35), use_minos=useMinos) rp_fit_RPSig = three_orientations.SignalFit( resolution_parameters=res_par, use_log_likelihood=UseLogLikelihood, signal_region=(0, 0.8), background_region=(0.8, 1.35), use_minos=useMinos) rp_fit_Redux = three_orientations.BackgroundFit( resolution_parameters=res_par, use_log_likelihood=UseLogLikelihood, signal_region=(0, 0.8), background_region=(0.8, 1.35), use_minos=useMinos) dataBkg = { "background": { "in_plane": BkgInPlaneHist, "mid_plane": BkgMidPlaneHist, "out_of_plane": BkgOutPlaneHist, "inclusive": BkgInclusiveHist } } dataFull = { "background": { "in_plane": BkgInPlaneHist, "mid_plane": BkgMidPlaneHist, "out_of_plane": BkgOutPlaneHist, "inclusive": BkgInclusiveHist }, "signal": { "in_plane": SigInPlaneHist, "mid_plane": SigMidPlaneHist, "out_of_plane": SigOutPlaneHist, "inclusive": SigInclusiveHist } } print("Done loading the histograms?", flush=True) print(BkgInPlaneHist) print("Fitting the background dominated region only") # draw_fit expects data to be in pachyderm histogram1D format # Estimate variables? # B is approximately 1/pi times the average value of the histogram in [-pi/2,pi/2] # FIXME implement this maxValue = 0.0 for hist in ListOfHists: localMaxValue = hist.GetBinContent(hist.GetMaximumBin()) if (localMaxValue > maxValue): maxValue = localMaxValue # magic number for safety maxValue = 1.05 * maxValue # the near side has the addition of the maxv2t = fCTask.GetGlobalV2TMax() maxv2a = fCTask.GetGlobalV2AMax() maxv3 = fCTask.GetGlobalV3Max() maxv4t = fCTask.GetGlobalV4TMax() maxv4a = fCTask.GetGlobalV4AMax() MyDefaultArgs = { "limit_B": [0., maxValue], "limit_v2_t": [0.0, maxv2t], "limit_v2_a": [0.0, maxv2a], "limit_v3": [0.0, maxv3], "limit_v4_t": [0.0, maxv4t], "limit_v4_a": [0.0, maxv4a] } #MyDefaultArgs={"limit_B": [0.,maxValue],"limit_v2_a": [0.0, 0.5] } # MyDefaultArgs={"limit_B": [0.,1e7],"limit_v2_a": [0.0, 0.5],"fix_v3":True,"v3":0.0} # Getting the initial values. #FlowV2TValue = FlowV2TGraph.GetY()[] # Trigger pt bin #FlowV4TValue = FlowV4TGraph.GetY()[] # Trigger pt bin #FlowV3Value = # good luck with this one #FlowV2AValue = FlowV2AGraph.GetY()[iObsBin] #FlowV4AValue = FlowV4AGraph.GetY()[iObsBin] # FIXME could use spline interpolation # Need #fPtAMin = fTrackPtProjectionSE->GetXaxis()->GetBinLowEdge(iObsBin+1); #fPtAMax = fTrackPtProjectionSE->GetXaxis()->GetBinUpEdge(iObsBin+1); # or write a function for the c++ task that returns the value # may also want one for the error # FlowV2AValue = FlowV2AGraph.Eval(pTA_Value) # FlowV4AValue = FlowV4AGraph.Eval(pTA_Value) FlowV2AError = 0 FlowV2AValue = fCTask.GetFlowVNAFromObsBin(2, iObsBin) FlowV2AError = fCTask.GetFlowVNAeFromObsBin(2, iObsBin) print("Found Flow V2 = %f +- %f" % (FlowV2AValue, FlowV2AError)) FlowV4AError = 0 FlowV4AValue = fCTask.GetFlowVNAFromObsBin(4, iObsBin) FlowV4AError = fCTask.GetFlowVNAeFromObsBin(4, iObsBin) print("Found Flow V4 = %f +- %f" % (FlowV4AValue, FlowV4AError)) # Now apply fFlowTermMode # 0 = no action # 1 = Fix V2A, V4A to interpolation of flow grpahs # 2 = Limit V2A, V4A to flow graph interpolation +- sigma print(" FTM=%d" % fFlowTermMode) if (fFlowTermMode == 0): # Setting initial values MyDefaultArgs['v2_a'] = FlowV2AValue MyDefaultArgs['v4_a'] = FlowV4AValue if (fFlowTermMode == 1): MyDefaultArgs["fix_v2_a"] = True MyDefaultArgs['v2_a'] = FlowV2AValue MyDefaultArgs["fix_v4_a"] = True MyDefaultArgs['v4_a'] = FlowV4AValue if (fFlowTermMode == 2): MyDefaultArgs['v2_a'] = FlowV2AValue MyDefaultArgs['v4_a'] = FlowV4AValue MyDefaultArgs['limit_v2_a'] = [ FlowV2AValue - FlowV2AError, FlowV2AValue + FlowV2AError ] MyDefaultArgs['limit_v4_a'] = [ FlowV4AValue - FlowV4AError, FlowV4AValue + FlowV4AError ] # Update this guess MyDefaultArgs['v3'] = 0.01 # this version would need access to the functors. # if (fCTask.GetInitV2T() > -1): # MyDefaultArgs['v2_t'] = fCTask.GetInitV2T() # if (fCTask.GetInitV2A() > -1): # MyDefaultArgs['v2_a'] = fCTask.GetInitV2A() # if (fCTask.GetInitV3() > -1): # MyDefaultArgs['v3'] = fCTask.GetInitV3() # if (fCTask.GetInitV4T() > -1): # MyDefaultArgs['v4_t'] = fCTask.GetInitV4T() # if (fCTask.GetInitV4A() > -1): # MyDefaultArgs['v4_a'] = fCTask.GetInitV4A() # Switch on V1,V5,V6T,V6A terms if requested # iV1Mode iV5Mode iV6TMode iV6AMode # v5,v6 not implemented here if (iV1Mode == 1): MyDefaultArgs["fix_v1"] = False MyDefaultArgs["v1"] = 0.0 if (iObsBin >= 4): print("doing the fix thing") for data in dataFull: print("data = %s" % (data)) # background or signal ... for entry in dataFull[data]: print(entry) FixErrorsOnHist(dataFull[data][entry]) if (iObsBin >= nFixV40Last): MyDefaultArgs["fix_v4_t"] = True MyDefaultArgs['v4_t'] = 0.0 if (fFlowTermMode != 1): # Checking if already fixed to a value MyDefaultArgs["fix_v4_a"] = True MyDefaultArgs['v4_a'] = 0.0 # could also fix off the v6,v5 MyUserArgs = {} #MyUserArgs={"v4_t": 0,"fix_v4_t": True,"v4_a": 0,"fix_v4_a": True} InclusiveUserArgs = {} RPDepUserArgs = {} # Get initial parameters from RP Dep Fit ReduxUserArgs = {} MyUserArgs.update(MyDefaultArgs) InclusiveUserArgs.update(MyDefaultArgs) RPDepUserArgs.update(MyDefaultArgs) ReduxUserArgs.update(MyDefaultArgs) print("Fitting ObsBin %d with user args:" % (iObsBin)) print(MyUserArgs) # The Fitting is done here # (success,data_BkgFit,_) = rp_fit.fit(data=dataBkg, user_arguments=MyUserArgs) try: (success, data_BkgFit, _) = rp_fit.fit(data=dataFull, user_arguments=MyUserArgs) except FitFailed: # except pachyderm.fit.base.FitFailed: print("Caught a Fit failed exception. Continuing") continue except RuntimeError: print("Caught a run-time error. Continuing") continue print("Finished doing the fit, maybe") print("Fit result: {fit_result}".format(fit_result=rp_fit.fit_result)) BkgFitResults = rp_fit.fit_result BkgChiSquare = BkgFitResults.minimum_val BkgNDOF = BkgFitResults.nDOF Py_ChiSq_TGraph.SetPoint(iObsBin, ObsBinCenter, BkgChiSquare / BkgNDOF) Py_ChiSq_TGraph.SetPointError(iObsBin, ObsBinWidth, 0) for j in range(len(Py_TGraphs)): Py_Val = BkgFitResults.values_at_minimum[PyParamNames[j]] Py_Err = BkgFitResults.errors_on_parameters[PyParamNames[j]] Py_TGraphs[j].SetPoint(iObsBin, ObsBinCenter, Py_Val) Py_TGraphs[j].SetPointError(iObsBin, ObsBinWidth, Py_Err) # Storing result for next fit InclusiveUserArgs[PyParamNames[j]] = Py_Val RPDepUserArgs[PyParamNames[j]] = Py_Val # Also Set some reasonable starting values for parameters RPDepUserArgs["in_plane_ns_sigma"] = 0.36 RPDepUserArgs["in_plane_as_sigma"] = 0.42 RPDepUserArgs["mid_plane_ns_sigma"] = 0.36 RPDepUserArgs["mid_plane_as_sigma"] = 0.42 RPDepUserArgs["out_of_plane_ns_sigma"] = 0.36 RPDepUserArgs["out_of_plane_as_sigma"] = 0.42 #------------------------------------------------------------------------------------------------------------------ #| 0 | in_plane_ns_amplitude | 0.52E4 | 0.03E4 | | | 0 | 1e+07 | | #| 1 | in_plane_as_amplitude | 530 | 310 | | | 0 | 1e+07 | | #| 2 | in_plane_ns_sigma | 0.363 | 0.023 | | | 0.02 | 0.7 | | #| 3 | in_plane_as_sigma | 0.42 | 0.17 | | | 0.02 | 0.7 | | #| 4 | in_plane_signal_pedestal | 0.0 | 1.0 | | | | | yes | #| 5 | B | 1.891E5 | 0.002E5 | | | 0 | 1e+07 | | #| 6 | v2_t | 0.547E-1 | 0.007E-1 | | | 0.001 | 0.2 | | #| 7 | v2_a | 3.000E-2 | 0.008E-2 | | | 0.03 | 0.5 | | #| 8 | v4_t | 0.005E-4 | 0.714E-4 | | | 0 | 0.5 | | #| 9 | v4_a | 0.005E-4 | 2.563E-4 | | | 0 | 0.5 | | #| 10| v1 | 0.000E1 | 0.000E1 | | | -1 | 1 | yes | #| 11| v3 | 2.5E-3 | 0.6E-3 | | | -1 | 1 | | #| 12| mid_plane_ns_amplitude | 0.88E4 | 0.03E4 | | | 0 | 1e+07 | | #| 13| mid_plane_as_amplitude | 0.49E4 | 0.03E4 | | | 0 | 1e+07 | | #| 14| mid_plane_ns_sigma | 0.467 | 0.022 | | | 0.02 | 0.7 | | #| 15| mid_plane_as_sigma | 0.700 | 0.027 | | | 0.02 | 0.7 | | #| 16| mid_plane_signal_pedestal | 0.0 | 1.0 | | | | | yes | #| 17| out_of_plane_ns_amplitude | 0.76E4 | 0.03E4 | | | 0 | 1e+07 | | #| 18| out_of_plane_as_amplitude | 0.361E4 | 0.031E4 | | | 0 | 1e+07 | | #| 19| out_of_plane_ns_sigma | 0.436 | 0.019 | | | 0.02 | 0.7 | | #| 20| out_of_plane_as_sigma | 0.45 | 0.04 | | | 0.02 | 0.7 | | #| 21| out_of_plane_signal_pedestal | 0.0 | 1.0 | | | | | yes | #------------------------------------------------------------------------------------------------------------------ # Py_B = BkgFitResults.values_at_minimum["B"] # Py_B_Err = BkgFitResults.errors_on_parameters["B"] # Py_B_TGraph.SetPoint(iObsBin,ObsBinCenter,Py_B) # Py_B_TGraph.SetPointError(iObsBin,ObsBinWidth,Py_B_Err) # Py_V2T = BkgFitResults.values_at_minimum["v2_t"] # Py_V2T_Err = BkgFitResults.errors_on_parameters["v2_t"] # Py_V2T_TGraph.SetPoint(iObsBin,ObsBinCenter,Py_V2T) # Py_V2T_TGraph.SetPointError(iObsBin,ObsBinWidth,Py_V2T_Err) fit_label = "Test" filename = "%s/PyRPF_BkgFit_ObsBin%d.pdf" % (fOutputDir, iObsBin) plot.draw_fit(rp_fit=rp_fit, data=data_BkgFit, fit_label=fit_label, filename=filename) filename = "%s/PyRPF_BkgFit_ObsBin%d.png" % (fOutputDir, iObsBin) plot.draw_fit(rp_fit=rp_fit, data=data_BkgFit, fit_label=fit_label, filename=filename) # filename="%s/PyRPF_BkgFit_PlotAll_ObsBin%d.pdf" % (fOutputDir,iObsBin) # plot.draw_fit(rp_fit=rp_fit,data=dataFull,fit_label=fit_label,filename=filename) # plot.fit_draw_func(data=data_BkgFit,fit_label=fit_label,filename=filename) if (enableInclusiveFit): # Settings for inclusive fit InclusiveUserArgs["fix_v2_t"] = True InclusiveUserArgs["fix_v2_a"] = True # could estimate yields based on integrals and the B parameter found earlier print(str(InclusiveUserArgs)) print("Fitting background dominated and signal regions") (success_IncSig, data_IncSig, _) = rp_fit_IncSig.fit(data=dataFull, user_arguments=InclusiveUserArgs) # try: # (success_IncSig,data_IncSig,_) = rp_fit_IncSig.fit(data=dataFull,user_arguments=InclusiveUserArgs) # except pachyderm.fit.base.FitFailed: # print("Da Fit Failed") print("Fit result: {fit_result}".format( fit_result=rp_fit_IncSig.fit_result)) filename = "%s/PyRPF_IncFit_ObsBin%d.pdf" % (fOutputDir, iObsBin) plot.draw_fit(rp_fit=rp_fit_IncSig, data=data_IncSig, fit_label=fit_label, filename=filename) if (enableRPDepFit): print( "Fitting background dominated and signal regions with RP dependent signal" ) (success_RPSig, data_RPSig, _) = rp_fit_RPSig.fit(data=dataFull, user_arguments=RPDepUserArgs) filename = "%s/PyRPF_RPDepF_ObsBin%d.pdf" % (fOutputDir, iObsBin) plot.draw_fit(rp_fit=rp_fit_RPSig, data=data_RPSig, fit_label=fit_label, filename=filename) RPDepBkgFitResults = rp_fit_RPSig.fit_result RPDepChiSquare = RPDepBkgFitResults.minimum_val RPDepNDOF = RPDepBkgFitResults.nDOF Py_RPDep_ChiSq_TGraph.SetPoint(iObsBin, ObsBinCenter, RPDepChiSquare / RPDepNDOF) Py_RPDep_ChiSq_TGraph.SetPointError(iObsBin, ObsBinWidth, 0) # Save fit parameters if (enableRPDepFit): for j in range(len(Py_RPDep_TGraphs)): Py_Val = RPDepBkgFitResults.values_at_minimum[ PyParamNames[j]] Py_Err = RPDepBkgFitResults.errors_on_parameters[ PyParamNames[j]] Py_RPDep_TGraphs[j].SetPoint(iObsBin, ObsBinCenter, Py_Val) Py_RPDep_TGraphs[j].SetPointError(iObsBin, ObsBinWidth, Py_Err) if (enableReduxFit): for key in rp_fit_RPSig.fit_result.values_at_minimum: if key in BkgFitResults.parameters: print("Loading parameter %s" % (str(key))) ReduxUserArgs[ key] = rp_fit_RPSig.fit_result.values_at_minimum[ key] print( "Fitting Background dominated only with initial parameters from RP Dep Fit (Redux)" ) (success_Redux, data_Redux, _) = rp_fit_Redux.fit(data=dataFull, user_arguments=ReduxUserArgs) filename = "%s/PyRPF_Redux_ObsBin%d.pdf" % (fOutputDir, iObsBin) plot.draw_fit(rp_fit=rp_fit_Redux, data=data_Redux, fit_label=fit_label, filename=filename) # End of obs bin loop print("Finished the Observable Bin Loop") # Get Vn from C++ Code C_B_TGraph = fCTask.GetParamGraph(0) C_V1_TGraph = fCTask.GetParamGraph(1) C_V2T_TGraph = fCTask.GetParamGraph(2) C_V2A_TGraph = fCTask.GetParamGraph(3) C_V3_TGraph = fCTask.GetParamGraph(4) C_V4T_TGraph = fCTask.GetParamGraph(5) C_V4A_TGraph = fCTask.GetParamGraph(6) #C_TGraphs=[C_B_TGraph, C_V2T_TGraph, C_V2A_TGraph, C_V3_TGraph, C_V4T_TGraph, C_V4A_TGraph] C_TGraphs = [ C_B_TGraph, C_V1_TGraph, C_V2T_TGraph, C_V2A_TGraph, C_V3_TGraph, C_V4T_TGraph, C_V4A_TGraph ] for graph in C_TGraphs: graph.SetLineColor(CColor) graph.SetMarkerColor(CColor) graph.SetMarkerStyle(CMarkerStyle) for graph in Py_TGraphs: for i in range(1 + nObsBins - nSkipLast): graph.RemovePoint(nSkipLast + 1) graph.SetLineColor(PyColor) graph.SetMarkerColor(PyColor) graph.SetMarkerStyle(PyMarkerStyle) # graph.GetXaxis().SetTitle("z_{T}") if (enableRPDepFit): for graph in Py_RPDep_TGraphs: for i in range(1 + nObsBins - nSkipLast): graph.RemovePoint(nSkipLast + 1) graph.SetLineColor(PyRPDepColor) graph.SetMarkerColor(PyRPDepColor) graph.SetMarkerStyle(PyRPDepMarkerStyle) # graph.GetXaxis().SetTitle("z_{T}") c1 = TCanvas("c1", "c1", 900, 600) c1.cd() # FIXME note that B value must be scaled by num_triggers to be compared # Comparing CTask and Python Bkg Parameters MultiGraphs = [] MergedList = tuple(zip(C_TGraphs, Py_TGraphs)) for i in range(len(MergedList)): print("i = %d" % (i)) (CGraph, PyGraph) = MergedList[i] c1.Clear() tmg = TMultiGraph() # tmg.Add(CGraph,"lp") # tmg.Add(PyGraph,"lp") tmg.Add(CGraph.Clone(), "lp") tmg.Add(PyGraph.Clone(), "lp") # gROOT.SetOwnership(CGraph,False) # gROOT.SetOwnership(PyGraph,False) tmg.Draw("a") tmg.SetName("tmg_%d" % (i)) tmg.SetTitle(CGraph.GetTitle()) tmg.GetXaxis().SetTitle(CGraph.GetXaxis().GetTitle()) PyGraph.SetTitle(CGraph.GetTitle()) PyGraph.GetXaxis().SetTitle(CGraph.GetXaxis().GetTitle()) MultiGraphs.append(tmg) filename = "RPF_Comp_Param_%s" % (ParamNames[i]) c1.Print("%s/%s.pdf" % (fOutputDir, filename)) c1.Print("%s/CFiles/%s.C" % (fOutputDir, filename)) # Comparing Python BkgOnly and InclusiveSignal PyMultiGraphs = [] MergedList = tuple(zip(Py_TGraphs, Py_RPDep_TGraphs)) for i in range(len(MergedList)): print("i = %d" % (i)) (PyBkgGraph, PySigGraph) = MergedList[i] c1.Clear() tmg = TMultiGraph() #tmg.Add(PyBkgGraph,"lp") #tmg.Add(PySigGraph,"lp") tmg.Add(PyBkgGraph.Clone(), "lp") tmg.Add(PySigGraph.Clone(), "lp") tmg.Draw("a") tmg.SetName("tmg_%d" % (i)) tmg.SetTitle(PyBkgGraph.GetTitle()) tmg.GetXaxis().SetTitle(PyBkgGraph.GetXaxis().GetTitle()) PySigGraph.SetTitle(PyBkgGraph.GetTitle()) PySigGraph.GetXaxis().SetTitle(PyBkgGraph.GetXaxis().GetTitle()) PyMultiGraphs.append(tmg) filename = "RPF_CompBkgSig_Param_%s" % (ParamNames[i]) c1.Print("%s/%s.pdf" % (fOutputDir, filename)) c1.Print("%s/CFiles/%s.C" % (fOutputDir, filename)) # Drawing the ChiSquare Graphs # print("Saving to file %s" % (fOutputFile)) # OutFile = TFile(fOutputFile,"UPDATE") # print("Opened file %s" % (OutFile.GetName())) print("Trying to get the output file from the c++ task") OutFile = fCTask.GetOutputFile() # for graph in Py_TGraphs: # OutFile.Add(graph) # graph.Write() # if (enableRPDepFit): # for graph in Py_RPDep_TGraphs: # OutFile.Add(graph) # graph.Write() # Add the chisq/ndf and parameter graphs to the CTask fCTask.InputPyBkgChiSqGraph(Py_ChiSq_TGraph) print("About to add %d PyBkg Graphs to CTask" % (len(Py_TGraphs))) for i in range(len(Py_TGraphs)): print("Adding graph [ %d ] = %s" % (i, Py_TGraphs[i].GetName())) fCTask.InputPyBkgParamGraph(i, Py_TGraphs[i]) if (enableRPDepFit): print("About to add %d PyRODepBkg Graphs to CTask" % (len(Py_RPDep_TGraphs))) fCTask.InputPyRPSChiSqGraph(Py_RPDep_ChiSq_TGraph) for i in range(len(Py_RPDep_TGraphs)): fCTask.InputPyRPSParamGraph(i, Py_RPDep_TGraphs[i]) print("about to try deleting recursive objects") del MultiGraphs del PyMultiGraphs # Input the Covariance matrices # Should maybe find a nice way to save the covariance matrices # TH2D ? print("Writing File...") # OutFile.Write() print("Successfully wrote file! (maybe)") # OutFile.Close() print("Closed file!") print("=======================================================") print("Done with the python part") print("=======================================================") fCTask.Run_Part2()
def plot_qcd( infile, rebin=1, bkg_file="../hh2bbbb_limit/classifier_reports/reports_no_bias_corr_SM_mixing_fix/BM0/20171120-160644-bm0.root" ): f = TFile.Open(infile) f2 = TFile.Open(bkg_file) H_ref = 800 W_ref = 800 W = W_ref H = H_ref iPos = 11 iPeriod = 4 c1 = TCanvas("c1", "QCD MC distributions", H_ref, W_ref) setTDRStyle() T = 0.08 * H_ref B = 0.12 * H_ref L = 0.12 * W_ref R = 0.04 * W_ref c1.SetFillColor(0) c1.SetBorderMode(0) c1.SetFrameFillStyle(0) c1.SetFrameBorderMode(0) c1.SetLeftMargin(L / W) c1.SetRightMargin(R / W) c1.SetTopMargin(T / H) c1.SetBottomMargin(B / H) #c1.SetBottomMargin( 0 ) pad1 = TPad("pad1", "pad1", 0, 0.4, 1, 1.0) pad1.SetTopMargin(0.1) pad1.SetBottomMargin(0.03) pad1.Draw() pad1.cd() #pad1.SetLogy() bkg = f2.Get("bkg_appl/classifier-20171120-160644-bm0_bkg_appl") bkg_corr = get_bias_corrected_histo(bkg) for b in range(1, bkg.GetNbinsX() + 1): print bkg.GetBinContent(b), bkg_corr.GetBinContent(b) bkg.Scale(1 / bkg.Integral()) bkg.Rebin(rebin) bkg.SetMaximum(0.12) #bkg.SetLineWidth(2) #c.SetLogY() bkg_corr.Scale(1 / bkg_corr.Integral()) bkg_corr.Rebin(rebin) bkg.GetYaxis().SetTitleSize(20) bkg.GetYaxis().SetTitleFont(43) bkg.GetYaxis().SetTitleOffset(1.40) bkg.GetYaxis().SetLabelFont(43) bkg.GetYaxis().SetLabelSize(18) #bkg.GetYaxis().SetTitle("Events") pad1.SetTickx(1) CMS_lumi(pad1, iPeriod, iPos) legend = setLegend(0.7, 0.60, 0.90, 0.85) #leg.SetTextSize(0.033); #leg.SetFillColor(0); #leg.SetNColumns(1); #pl2.SetHeader(training); bkg.Draw("e1") bkg.GetXaxis().SetTitle("BDT") legend.AddEntry(bkg, "Mixed data", "p") total_mc = f.Get( "QCD_HT2000toInf_m_%s/classifier-20171120-160644-bm0_QCD_HT2000toInf_m_%s" % ("bbbb", "bbbb")) total_mc.Reset() assert total_mc.Integral() == 0 for state in ["bbcc", "bbll", "bbbb", "cccc"]: hist = f.Get( "QCD_HT2000toInf_m_%s/classifier-20171120-160644-bm0_QCD_HT2000toInf_m_%s" % (state, state)) hist.Reset() assert hist.Integral() == 0 for htrange in [ "700to1000", "1000to1500", "1500to2000", "2000toInf", "200to300", "300to500", "500to700" ]: sampname = "QCD_HT%s" % htrange qcdname = "%s_m" % sampname #print "QCD_all_%s/classifier-20171120-160644-bm0_QCD_all_%s" % (state, state) myhist = f.Get("%s_%s/classifier-20171120-160644-bm0_%s_%s" % (qcdname, state, qcdname, state)) samples_std = samples[sampname] samples_ext = samples["%s_ext" % sampname] n_events = samples_std["nevents"] + samples_ext["nevents"] xs_br = samples_ext["xsec_br"] scalef = xs_br / n_events myhist.Scale(scalef) hist.Add(myhist) total_mc.Add(hist) total_mc.Rebin(rebin) total_mc.SetLineColor(ROOT.kBlue) total_mc.SetLineWidth(1) total_mc.SetMarkerStyle(8) total_mc.SetMarkerColor(ROOT.kBlue) total_mc.Scale(1 / total_mc.Integral()) total_mc.Draw("E1 SAME") legend.AddEntry(total_mc, "QCD MC", "p") for bin in range(1, bkg.GetNbinsX() + 1): print bin, bkg.GetBinContent(bin), bkg_corr.GetBinContent(bin) bkg_corr.SetLineColor(ROOT.kRed) bkg_corr.SetMarkerColor(ROOT.kRed) bkg_corr.Draw("E1 same") legend.AddEntry(bkg_corr, "Mixed data bias corrected", "p") #ks = hlist[0][0].KolmogorovTest(hlist[1][0]) # print("KS: ", ks) # print("Chi2: ", hlist[0][0].Chi2Test(hlist[1][0], "UU NORM")) latex = TLatex() latex.SetNDC() latex.SetTextSize(0.035) latex.SetTextColor(1) latex.SetTextFont(42) latex.SetTextAlign(33) legend.Draw("same") #if(ymax > 1000): TGaxis.SetMaxDigits(3) """for i in range(len(hs)): hs[i].SetMaximum(ymax) herr[i].SetMaximum(ymax) plotH(hlist[i], hs[i], herr[i], dofill[i], residuals) if i == len(hs) - 1: herr[i].Draw("Esameaxis") """ bkg.GetXaxis().SetLabelSize(0.) legend.Draw("same") c1.cd() pad2 = TPad("pad2", "pad2", 0, 0.05, 1, 0.4) pad2.SetTopMargin(0.) pad2.SetBottomMargin(0.2) pad2.Draw() pad2.cd() ratio = bkg.Clone("ratio") ratio.Divide(total_mc) ratio.SetMinimum(0.) ratio.SetMaximum(2.) ratio_corr = bkg_corr.Clone("ratio_corr") ratio_corr.Divide(total_mc) """h_err = total_mc.Clone("error_bar") h_err.GetXaxis().SetRangeUser(0, 1) #h_err.Reset() #herr.Rebin(rebin) h_err.GetXaxis().SetTitle("BDT classifier") h_err.SetFillStyle(3005) h_err.SetFillColor(ROOT.kBlue) h_err.SetLineColor(922) h_err.SetLineWidth(0) h_err.SetMarkerSize(0) h_err.SetMarkerColor(922) #h_err.SetMinimum(0.) #h_sig.SetLineStyle(1) #h_sig.SetLineWidth(2) #h_sig.SetLineColor(sam_opt["sig"]['linecolor']) #Set error centered at zero as requested by ARC for ibin in range(1, h_err.GetNbinsX()+1): h_err.SetBinContent(ibin, 0. ) #If not loading already morphed fit results if postfit_file == None: for ibin in range(1, h_err.GetNbinsX()+1): h_err.SetBinError(ibin, math.sqrt((err * h_err.GetBinContent(ibin))**2 + h_data_bkg.GetBinError(ibin)**2) ) else: for ibin in range(1, h_err.GetNbinsX()+1): if not only_bias_unc: h_err.SetBinError(ibin, math.sqrt(h_sig.GetBinError(ibin)**2 + h_data_bkg.GetBinError(ibin)**2) ) else: h_err.SetBinError(ibin, math.sqrt(h_data_bkg.GetBinError(ibin)**2) ) return h_data_bkg, h_sig, h_err """ ratio.Draw("e1") ratio_corr.Draw("e1 same") l = TLine(0., 1., 1., 1.) l.SetLineStyle(3) l.Draw("same") """leg_coords = 0.65,0.2,0.9,0.4 if "legpos" in hsOpt: if hsOpt["legpos"] == "top": leg_coords = 0.65,0.78,0.9,1. elif hsOpt["legpos"] == "left" or hsOpt["legpos"] == "topleft": leg_coords = 0.1,0.78,0.35,1. elif hsOpt["legpos"] == "middle": leg_coords = 0.47,0.0,0.63,0.25 leg = TLegend(*leg_coords) leg.SetTextSize(0.05) leg.AddEntry(h_data_bkg, "Data - fitted background", "p") leg.AddEntry(h_sig, "HH4b fitted") leg.AddEntry(hlist[0][-1], "HH4b fitted x5") leg.AddEntry(h_error, "Total uncertainty") leg.Draw("same")""" c1.SaveAs("qcd_plot.png") c1.SaveAs("qcd_plot.pdf") c1.Clear() f.Close()
color_draw(histos['sim']['histos_w_pass_angle_FTOF'], kRed, "same") latex.DrawLatex(0.45, 0.02, 'W (GeV/c^{2})') latex.DrawLatex(0.3, 0.95, 'Sim w/ Proton in FTOF') latex.SetTextColor(kBlue) latex.DrawLatex(0.75, 0.85, 'w/ proton') latex.SetTextColor(kRed) latex.DrawLatex(0.75, 0.80, 'w/ #Delta #phi cut') latex.SetTextColor(kBlack) can.Print('w_' + args.output_prefix + '.pdf') # ----------------------------------------------------------- # Plot delta phi # ----------------------------------------------------------- can.Clear() can.Divide(2,2) can.cd(1) histos['data']['histos_theta_gamma_CTOF'].SetLineColor(kBlack) histos['data']['histos_theta_gamma_CTOF'].SetFillColorAlpha(kGray,1.0) histos['data']['histos_theta_gamma_CTOF'].Draw() histos['data']['histos_theta_gamma_pass_angle_CTOF'].SetLineColor(kBlack) histos['data']['histos_theta_gamma_pass_angle_CTOF'].SetFillColorAlpha(kRed,1.0) histos['data']['histos_theta_gamma_pass_angle_CTOF'].Draw('same') latex.DrawLatex(0.45, 0.02, '#theta_{#gamma} (deg)') latex.SetTextColor(kRed) latex.DrawLatex(0.15, 0.85, 'w/ #Delta #phi cut') latex.SetTextColor(kBlack) latex.DrawLatex(0.3, 0.95, 'Data w/ Proton in CTOF')
def draw_effmomenta_kkpi(figpath, effs, sname, h_pkp, h_pkm, h_ppi): figname = '%s_effmomenta_plain' % sname epsfile = set_file(extbase=figpath, comname=figname, ext='eps') ratio = 1 c1 = TCanvas('c1', 'canvas', 900, 850) c1.Divide(2,2) c1.cd(1) h_pkm['mctruth'].Draw('PE') h_pkm['mc'].Scale(ratio) h_pkm['mc'].Draw('SAME') c1.cd(2) h_pkp['mctruth'].Draw('PE') h_pkp['mc'].Scale(ratio) h_pkp['mc'].Draw('SAME') c1.cd(3) c1.cd(4) h_ppi['mctruth'].Draw('PE') h_ppi['mc'].Scale(ratio) h_ppi['mc'].Draw('SAME') c1.Print(epsfile) c1.Clear() tools.eps2pdf(epsfile) # ------------------------------------- figname = '%s_effmomenta' % sname epsfile = set_file(extbase=figpath, comname=figname, ext='eps') for type in ('mc', 'mctruth'): h_pkm[type].Sumw2() h_pkp[type].Sumw2() h_ppi[type].Sumw2() loceffs = {} c1.Clear() c1.Divide(2,2) c1.cd(1) clone = h_pkm['mctruth'].Clone() clone.Divide(h_pkm['mc'], h_pkm['mctruth'], 1, 1, 'B') clone.SetTitle('Efficiency as a function of K^{-} momentum') clone.Draw('PE') #clone.Print('all') for i in range(1, 1+clone.GetNbinsX()): loceffs[clone.GetBinCenter(i)] = clone.GetBinContent(i) effs['km']=loceffs c1.cd(2) clone = h_pkp['mctruth'].Clone() clone.Divide(h_pkp['mc'], h_pkp['mctruth'], 1, 1, 'B') clone.SetTitle('Efficiency as a function of K^{+} momentum') clone.Draw('PE') loceffs.clear() for i in range(1, 1+clone.GetNbinsX()): loceffs[clone.GetBinCenter(i)] = clone.GetBinContent(i) effs['kp']=loceffs c1.cd(3) c1.cd(4) clone = h_ppi['mctruth'].Clone() clone.Divide(h_ppi['mc'], h_ppi['mctruth'], 1, 1, 'B') clone.SetTitle('Efficiency as a function of #pi^{+} momentum') clone.Draw('PE') loceffs.clear() for i in range(1, 1+clone.GetNbinsX()): loceffs[clone.GetBinCenter(i)] = clone.GetBinContent(i) effs['pi']=loceffs boxes_num = effs['boxes_num'] boxes_denom = effs['boxes_denom'] effs['total'] = boxes_num/boxes_denom effs.close() c1.Print(epsfile) c1.Clear() tools.eps2pdf(epsfile)
def plot_fits_data_sim(canvas, histos_sim, histos_data, x_range, x_bin_step, title_formatter, save_name, label, y_fit_range, y_range=None, title=None, xtitle=None, ytitle=None, hline=None, output_txt=None, start_range=1, end_range=7, manual_fit_par=None, xshift=False): canvas.Clear() canvas.Divide(3, 2) root_is_dumb = [] #add can shift if starting at non traditional value of 1. can_shift = 0 if start_range < 0: can_shift = -start_range + 1 if start_range == 0: can_shift = 1 print(' need to shift canvas by {}'.format(can_shift)) for i in range(start_range, end_range): #len(histos)): canvas.cd(i + can_shift) title_temp = title_formatter.format(i) graph_sim, slices_sim, fits_sim = fit_slices( histos_sim.get(title_temp, default_histo2d), x_range, x_bin_step, y_fit_range, True, 'sim') graph_sim.SetMarkerStyle(8) graph_sim.SetMarkerSize(1) graph_sim.SetMarkerColor(kRed) graph_data, slices_data, fits_data = fit_slices( histos_data.get(title_temp, default_histo2d), x_range, x_bin_step, y_fit_range, False, 'data') graph_data.SetMarkerStyle(9) graph_data.SetMarkerSize(1) graph_data.SetMarkerColor(kBlack) multi_graph_data_sim = TMultiGraph() multi_graph_data_sim.Add(graph_data) multi_graph_data_sim.Add(graph_sim) multi_graph_data_sim.Draw('AP') multi_graph_data_sim.GetHistogram().GetXaxis().SetRangeUser( x_range[0], x_range[1]) can.Update() if y_range: multi_graph_data_sim.GetHistogram().SetMinimum(y_range[0]) multi_graph_data_sim.GetHistogram().SetMaximum(y_range[1]) multi_graph_data_sim.Draw('AP') root_is_dumb.append(multi_graph_data_sim) else: multi_graph_data_sim.Draw('AP') root_is_dumb.append(multi_graph_data_sim) if hline: line = TLine(x_range[0], hline, x_range[1], hline) line.SetLineStyle(8) line.SetLineWidth(2) line.SetLineColor(kRed) line.Draw() root_is_dumb.append(line) if title: multi_graph_data_sim.SetTitle("") label.DrawLatex(0.1, 0.925, title + str(i)) if xtitle: label.DrawLatex(0.5, 0.015, xtitle) if ytitle: label.SetTextAngle(90) label.DrawLatex(0.035, 0.5, ytitle) label.SetTextAngle(0) canvas.Print(save_name) # For the slices slice_can = TCanvas('slice_can', 'slice_can', 1200, 1600) slice_pdfname = title_formatter.split('_{}')[0] + '_slices.pdf' slice_can.Print(slice_pdfname + '[') for i in range(start_range, end_range): #usually 1,7 title = title_formatter.format(i) graph_sim, slices_sim, fits_sim, = fit_slices( histos_sim.get(title, default_histo2d), x_range, x_bin_step, y_fit_range, xshift, 'sim') # Size of slices page nrows = 5 ncols = int(np.ceil(len(slices_sim) / nrows) + 1) slice_can.Clear() slice_can.Divide(ncols, nrows) for j, (s, f) in enumerate(zip(slices_sim, fits_sim)): slice_can.cd(j + 1) s.Draw() lab.DrawLatex( 0.15, 0.84, '#mu = {0:6.4f}, #sigma = {1:6.4f}'.format( f.GetParameter(1), f.GetParameter(2))) slice_can.Print(slice_pdfname) slice_can.Print(slice_pdfname + ']')
def process(self, set): self.message('Processing channel %s' % set['channel']) # Setting the indir directory indir = '%s/scratch/%s/YieldHistograms' % (Common.NeatDirectory, self.getParameter('input')) if self.isParameter('xcheck'): indir = '%s/scratch/%s/XCheckHistograms/%s' % ( Common.NeatDirectory, self.getParameter('input'), self.getParameter('xcheck')) # Setting the self.__outdir directory outdir = '%s/scratch/%s/YieldPlots' % (Common.NeatDirectory, self.getParameter('input')) if self.isParameter('xcheck'): outdir = '%s/scratch/%s/XCheckPlots/%s' % ( Common.NeatDirectory, self.getParameter('input'), self.getParameter('xcheck')) # Check for output directory if not os.path.exists(outdir): os.makedirs(outdir) # Create the list of sample samples = None if type(Common.YieldSignals) == list: samples = Common.YieldBackgrounds + Common.YieldSignals else: samples = Common.YieldBackgrounds + [Common.YieldSignals] samples = samples + [Common.Data] # Set canvas canvas = TCanvas("canvas", "Data/MC for NEAT discriminator.", 1000, 800) canvas.Clear() canvas.SetLeftMargin(0.14) canvas.SetRightMargin(0.05) canvas.SetBottomMargin(0.13) canvas.SetTopMargin(0.03) # Loop over the different binning for bin in Common.Binning: # Histogram holder histograms = {} stack = THStack('stack', '%s' % set['channel']) # Legend leg = TLegend(1.0, 0.6, 0.6, 1.0) leg.SetNColumns(2) # Loop over signal+backgorund files for sample in samples: # Merge to create the combined channels # self.merge(sample, set, indir) # Input file with histograms infile = '%s/%s.root' % (indir, Common.filename(set, sample)) print 'Processing sample:', infile # Open the file with histograms file = TFile(infile) # Reading and clonning the histogram histogram = file.Get('%s_%s' % (Common.NeatOutputName, bin)) # Check for signal and background samples if sample in Common.YieldSignals: # Add all the signal samples if not 'signals' in histograms: histograms['signals'] = histogram.Clone() else: histograms['signals'].Add(histogram) elif sample in Common.YieldBackgrounds: # Add all the background samples if not 'backgrounds' in histograms: histograms['backgrounds'] = histogram.Clone() else: histograms['backgrounds'].Add(histogram) # Do not add data to the stack if sample != Common.Data: histogram.SetLineColor(Common.ColorCodes[sample]) histogram.SetFillColor(Common.ColorCodes[sample]) histograms[sample] = histogram.Clone() stack.Add(histograms[sample]) leg.AddEntry(histogram, '%s' % sample, 'f') else: histogram.SetLineColor(Common.ColorCodes[sample]) histogram.SetMarkerStyle(8) histograms[sample] = histogram.Clone() # Creating the data/mc # Stack axes only exist after drawing stack.Draw('hist') # Compute user range maxdata = histograms[Common.Data].GetMaximum() + histograms[ Common.Data].GetBinError( histograms[Common.Data].GetMaximumBin()) maxcount = max(stack.GetMaximum(), maxdata) signalName = '' # Signal name if type(Common.YieldSignals) == list: signalName = ''.join(Common.YieldSignals) else: signalName = Common.YieldSignals histograms[Common.Data].GetXaxis().SetTitle('%s NEAT output' % signalName) histograms[Common.Data].GetYaxis().SetTitle('Event Yield') histograms[Common.Data].GetYaxis().SetTitleOffset(1.2) histograms[Common.Data].GetYaxis().SetRangeUser(0, 1.1 * maxcount) histograms[Common.Data].SetMarkerSize(3) if histograms[Common.Data].GetNbinsX() >= 50: histograms[Common.Data].SetMarkerSize(2) histograms[Common.Data].SetLineWidth(3) histograms[Common.Data].SetMarkerStyle(8) histograms[Common.Data].Draw('e1') stack.Draw('samehist') # Draw data point histograms[Common.Data].Draw('e1,same') # Draw legend if self.getParameter('discriminator-legends', 'false') == 'true': leg.Draw() # Draw text text = TLatex() text.SetTextFont(62) text.SetTextAlign(32) text.SetNDC() text.SetTextSize(0.050) text.DrawLatex( 0.94, 0.94 - 0 * 0.05, 'CMS %s fb^{-1}' % Common.Luminosity[set['channel']]) text.SetTextColor(13) text.DrawLatex(0.94, 0.94 - 1 * 0.05, '%s' % Common.Labels[set['channel']]) # Update canvas canvas.Update() # Save the canvas outfile = '%s/%s_%s' % (outdir, Common.filename(set), bin) canvas.SaveAs('%s.eps' % outfile) canvas.SaveAs('%s.png' % outfile) # Creating the discriminator shapes # No profile plots for xchecks if self.isParameter('xcheck'): continue # Normalizing the histograms histograms['signals'].Scale(1. / histograms['signals'].Integral()) # Normalizing the histograms histograms['backgrounds'].Scale( 1. / histograms['backgrounds'].Integral()) # Legend #if self.isParameter('legend'): leg2 = TLegend(1.0, 0.8, 0.8, 1.0) #else: #leg2 = TLegend(0,0,0,0) # Signal is blue and solid fill style histograms['signals'].SetLineColor(ROOT.kBlue) histograms['signals'].SetFillColor(ROOT.kBlue) histograms['signals'].SetFillStyle(3002) histograms['signals'].SetLineWidth(3) histograms['signals'].GetXaxis().SetTitle('%s NEAT output' % signalName) leg2.AddEntry(histograms['signals'], 'Signal', 'f') # Background is red and filled with diagonal lines histograms['backgrounds'].SetLineColor(ROOT.kRed) histograms['backgrounds'].SetFillColor(ROOT.kRed) histograms['backgrounds'].SetFillStyle(3004) histograms['backgrounds'].SetLineWidth(3) histograms['backgrounds'].GetXaxis().SetTitle('%s NEAT output' % signalName) leg2.AddEntry(histograms['backgrounds'], 'Background', 'f') # Plot first the one with largest amplitud smax = histograms['signals'].GetMaximum() bmax = histograms['backgrounds'].GetMaximum() if smax > bmax: histograms['signals'].Draw('hist') histograms['backgrounds'].Draw('samehist') else: histograms['backgrounds'].Draw('hist') histograms['signals'].Draw('samehist') if self.getParameter('profile-legends', 'true') == 'true': leg2.Draw() # Save the canvas outfile = '%s/profiles_%s_%s' % (outdir, set['channel'], bin) canvas.SaveAs('%s.eps' % outfile) canvas.SaveAs('%s.png' % outfile)
def DCRComp(run1Start,run1End,run2Start,run2End,filePath): # Setup variables firstruns = "" secondruns = ""# Used for each run in the first run range calibrationSkim1 = TChain("skimTree","skimTree") calibrationSkim2 = TChain("skimTree","skimTree") # Destination folder, trying to prevent a common formatting error if (filePath[-1] != '/'): filePath += '/' fileName = filePath + "DCRcompforRuns%d-%d_and_%d-%d.pdf" % (run1Start,run1End,run2Start,run2End) # Add functionality for multiple datasets? # NOTE: Be wary of comparing across multiple datasets. This hasn't been tested yet # So far, only datasets 5 and 6 are known by the program (see below) skimPath1 = "/global/project/projectdirs/majorana/data/mjd/surfmjd/analysis/skim/" skimPath2 = "/global/project/projectdirs/majorana/data/mjd/surfmjd/analysis/skim/" runBoundariesDS5cM1 = [24305,24318,24539,24552,24761,24775,24971,24984,25248,25261,25479,25492] runBoundariesDS5cM2 = [23960,23970,24322,24332,24556,24567,24779,24789,24988,24998,25265,25275,25496,25506] # MAKE SURE THIS HAS THE CORRECT CUTS cuts = "trapENFCal < 2635 && trapENFCal > 2605 && isGood && !wfDCBits && !muVeto && mH == 1" # If there is only one module being calibrated in DS5 cutStr5cM1 = " && C == 1" cutStr5cM2 = " && C == 2" if (run1Start <= 25507 and run1Start > 18712): # DS5c skimPath1 += "DS5ccal/GAT-v01-07-164-g8e0a877/skimDS5_" if (run1Start in runBoundariesDS5cM1): cuts += cutStr5cM1 if (run1Start in runBoundariesDS5cM2): cuts += cutStr5cM2 else: raise ValueError('First run boundary is not in either DS5c calibration range') if (run1Start > 25507): # DS6 skimPath1 += "DS6cal/GAT-v01-06/skimDS6_" if (run2Start <= 25507 and run2Start > 18712): # DS5c skimPath2 += "DS5ccal/GAT-v01-07-164-g8e0a877/skimDS5_" if (run1Start in runBoundariesDS5cM1): cuts += cutStr5cM1 if (run1Start in runBoundariesDS5cM2): cuts += cutStr5cM2 else: raise ValueError('First run boundary is not in either DS5c calibration range') if (run2Start > 25507): # DS6 skimPath2 += "DS6cal/GAT-v01-06/skimDS6_" cutStr = cuts # Build the first run range's TChain for i in range(run1Start, run1End+1): firstruns = skimPath1 + "run%d_small.root" % i calibrationSkim1.Add(firstruns,0) # Build the second run range's TChain for i in range(run2Start, run2End+1): secondruns = skimPath2 + "run%d_small.root" % i calibrationSkim2.Add(secondruns,0) # channel list from the built data in P3LTP # I don't need to use this now, but can check against it # IE check that every channel in this list has data # and that every channel with data is in this list chanList = [680,681,678,679,674,675,672,673,632,633,630,631,626,627,690,691,692,693,648,649,640,641,642,643,664,665,662,663,660,661,658,659,616,617,610,611,608,609,584,585,600,601,598,599,592,593,696,697,624,625,628,629,688,689,694,695,614,615,1122,1123,1108,1109,1106,1107,1128,1129,1204,1205,1110,1111,1126,1127,1124,1125,1202,1203,1170,1171,1172,1173,1174,1175,1176,1177,1168,1169,1120,1121,1206,1207,1208,1209,1232,1233,1236,1237,1238,1239,1234,1235,1328,1329,1298,1299,1296,1297,1302,1303,1332,1333,1268,1269,1304,1305,1330,1331] # Test case channel list #chanList = [1204,1174,1173] # Draw the whole dcr distribution vs channel # make sure there are enough bins that each channel is one bin c2 = TCanvas('c2','c2',400,400) drawStr = "dcr90:channel>>bighist1" bighist1 = TH2F("bighist1", "All Channels DCR Comparison",800,550,1350,1000,-0.001,0.001) calibrationSkim1.Draw(drawStr,cutStr,"COLZ") bighist1.SetLineColor(30) bighist1.SetStats(False) bighist1.GetXaxis().SetTitle("Channel") bighist1.GetYaxis().SetTitle("DCR99") bighist1.GetYaxis().SetTitleOffset(2.1) drawStr = "dcr90:channel>>bighist2" bighist2 = TH2F("bighist2", "All Channels DCR Comparison",800,550,1350,1000,-0.001,0.001) calibrationSkim2.Draw(drawStr,cutStr, "SAME COLZ") bighist2.SetLineColor(45) bighist2.SetStats(False) bighist2.GetXaxis().SetTitle("Channel") bighist2.GetYaxis().SetTitle("DCR99") bighist2.GetYaxis().SetTitleOffset(2.1) c2.SetLogz() gPad.SetLeftMargin(0.15) gPad.SetRightMargin(0.12) # Fix the different z scales maxz = max(bighist1.GetMaximum(),bighist2.GetMaximum()) bighist1.GetZaxis().SetRangeUser(0,maxz) bighist2.GetZaxis().SetRangeUser(0,maxz) c2.Update() # Save the big TH2F as a root file rootfileName = filePath + "DCRcomp%d-%d_and_%d-%d.root" % (run1Start, run1End, run2Start, run2End) c2.SaveAs(rootfileName) # Start the big pdf c2.Print(fileName + "(","Title: All Channels") c2.Close() # TCanvas for the individual channels c3 = TCanvas('c3','c3', 400, 400) c3.cd() # Make txt file for chanList problems txtFileTitle = "channelProblemsforRuns%d-%d_and_%d-%d.txt" % (run1Start,run1End,run2Start,run2End) chanFile = open(txtFileTitle,"w+") # Want a hist of all DCR90 efficiencies for the run range # Only do this for the second run range (so I'm not doubling the plot) # Remove entries from this if they don't have any results active_channels = [680,681,678,679,674,675,672,673,632,633,630,631,626,627,690,691,692,693,648,649,640,641,642,643,664,665,662,663,660,661,658,659,616,617,610,611,608,609,584,585,600,601,598,599,592,593,696,697,624,625,628,629,688,689,694,695,614,615,1122,1123,1108,1109,1106,1107,1128,1129,1204,1205,1110,1111,1126,1127,1124,1125,1202,1203,1170,1171,1172,1173,1174,1175,1176,1177,1168,1169,1120,1121,1206,1207,1208,1209,1232,1233,1236,1237,1238,1239,1234,1235,1328,1329,1298,1299,1296,1297,1302,1303,1332,1333,1268,1269,1304,1305,1330,1331] active_channels = sorted(active_channels) active_chan_effs = [] # Channel loop for channel in range(550,1351): # Conver channel to a bin number for slices bigX = bighist1.GetXaxis() currentbin = bigX.FindBin(channel) # Hack to get all the titles properly in the pdf canvastitle = "Channel %d" % channel bighist1.SetTitle(canvastitle) bighist2.SetTitle(canvastitle) # Get slices / bin content for each channel (the x axis of bighist) histname1 = "hist1_chan%d" % channel histname2 = "hist2_chan%d" % channel hist1 = bighist1.ProjectionY(histname1,currentbin,currentbin,"d") hist2 = bighist2.ProjectionY(histname2,currentbin,currentbin,"d") # Only proceed if the hists aren't empty if ((hist1.GetEntries() == 0 and hist2.GetEntries() == 0)): # Check that every empty channel isn't in chanList if (channel in chanList): chanFile.write("Channel %d has no output for runs %d - %d and %d - %d \n" % (channel, run1Start, run1End, run2Start, run2End)) active_channels.remove(channel) else: # Make sure every channel with output is in chanList if (channel not in chanList): chanFile.write("Channel %d is not in chanList \n" % channel) c3.Clear() c3.SetTitle(canvastitle) hstack = THStack("hstack","") hstack.SetTitle(canvastitle) hist1.SetLineColor(40) gPad.SetLogy() hstack.Add(hist1) # Get the dcr99 value by integrating the hist xaxis1 = hist1.GetXaxis() minbin = xaxis1.FindBin(-0.001) maxbin = xaxis1.FindBin(0.001) zerobin = xaxis1.FindBin(0) goodintegral1 = hist1.Integral(minbin,zerobin) totalintegral1 = hist1.Integral(minbin,maxbin) # Don't divide by zero if (goodintegral1 != 0): dcr90_1 = goodintegral1/totalintegral1 else : dcr90_1 = 0 hist2.SetLineColor(30) hstack.Add(hist2) # Get the second dcr99 value xaxis2 = hist2.GetXaxis() minbin = xaxis2.FindBin(-0.001) maxbin = xaxis2.FindBin(0.001) zerobin = xaxis2.FindBin(0) goodintegral2 = hist2.Integral(minbin,zerobin) totalintegral2 = hist2.Integral(minbin,maxbin) # Don't divide by zero if (goodintegral2 != 0): dcr90_2 = goodintegral2/totalintegral2 else: dcr90_2 = 0 active_chan_effs.append(dcr90_2) # Formatting for the THStack hstack.Draw() xaxisStack = hstack.GetXaxis() xaxisStack.SetTitle("DCR99") hstack.GetYaxis().SetTitle("Count") xaxisStack.SetLabelSize(0.02) xaxisStack.SetTitleOffset(1.2) # Draw vertical line at dcr90 = 0 for reference c3.Update() ymax = max(hist1.GetMaximum(), hist2.GetMaximum()) vert = TLine(0,0,0,ymax) vert.SetLineColor(2) vert.Draw() # Draw the legend chanlegend = TLegend(0.6,0.8,0.9,0.9) entry1 = chanlegend.AddEntry(hist1, "#splitline{Run Range %d - %d}{%f Efficiency}" % (run1Start, run1End, dcr90_1), "l") entry2 = chanlegend.AddEntry(hist2, "#splitline{Run Range %d - %d}{%f Efficiency}" % (run2Start, run2End, dcr90_2), "l") # If the Efficiencies differ by more than 1%, make their text red if (abs(dcr90_1 - dcr90_2) > 0.01): entry1.SetTextColor(2) entry2.SetTextColor(2) entry1.SetTextSize(0.02) entry2.SetTextSize(0.02) chanlegend.Draw() # Save as one big pdf file pagetitle = "Title: " + canvastitle c3.Update() c3.Print(fileName,pagetitle) # Close the pdf with a blank page c3.Clear() c3.Print(fileName + ")","I need to close the document somehow") c3.Close() # I want a spreadsheet of all the DCR90 efficiencies # Will make a file for each calibration run set, and then concatenate them later spreadsheetfilename = filePath + "eff_sheet_%d-%d.txt" % (run2Start,run2End) spreadsheet = open(spreadsheetfilename,"w+") # Make the dcr90 efficiency plot for the second run set only c_eff = TCanvas('c_eff','c_eff',800,400) c_eff.cd() # active_channels should be the x-axis labels # active_chan_effs should be the y-values num_chan = len(active_channels) eff_hist = TH1F('eff_hist',"DCR99 Efficiencies for Calibration Runs %d-%d" % (run2Start, run2End),num_chan,0,num_chan) eff_xaxis = eff_hist.GetXaxis() # Input data and write to the spreadsheet for i in range(0,num_chan): eff_xaxis.SetBinLabel(i+1, str(active_channels[i])) eff_hist.SetBinContent(i+1,active_chan_effs[i]) spreadsheet.write("%d %d %d %f \n" % (run2Start,run2End,active_channels[i],active_chan_effs[i])) c_eff.SetGridx() eff_hist.SetStats(False) eff_xaxis.SetLabelSize(0.03) eff_xaxis.SetTitle("Channel") eff_xaxis.SetTitleOffset(1.4) eff_hist.GetYaxis().SetTitle("DCR99 Efficiency") eff_hist.Draw() c_eff.Update() # Save eff_fileName = filePath + "DCR99_Efficiencies_Runs_%d-%d.pdf" % (run2Start,run2End) eff_title = "DCR99 Efficiencies for Calibration Runs %d-%d" % (run2Start, run2End) c_eff.SaveAs(eff_fileName,eff_title) c_eff.Close() spreadsheet.close()
isFullTuple = False events = inFile.Get("cut") if events == None: isFullTuple = True events = inFile.Get("ntuple") events2 = inFile2.Get("ntuple") c = TCanvas("c", "c", 1200, 900) c.Print(remainder[0] + ".pdf[") outfile = TFile(remainder[0] + ".root", "RECREATE") #exppol3=TF1("exppol3","exp(pol3(0))",0,100) exppol4 = TF1("exppol4", "min(exp(pol3(0)),2)", 0, 100) #exppol4.SetParameters(0,0,-0.001,0) c.Clear() c.Divide(1, 2) c.cd(1) gPad.SetLogy(1) events2.Draw("triEndZ>>prodZ(50,0,100)", "triP>0.8*2.3", "colz,goff") prodZ = gDirectory.Get("prodZ") prodZ.SetTitle("Generated heavy photons") prodZ.GetXaxis().SetTitle("vertex Z [mm]") prodZ.GetYaxis().SetTitle("events/bin") prodZ.Fit("expo", "L") prodZ.Draw() prodZ.Write("prodz") c.cd(2) events2.Draw("triP>>hnew(100,0,2.5)", "", "colz,goff") hnew = gDirectory.Get("hnew") hnew.SetTitle("")
def drawDCS(fdbi,webdcs,chamber,c=None): if (c==None): c=TCanvas() jdict={} jdict['settings']={} gStyle.SetOptFit(1) c.Clear() #c.Divide(2,4) conn = sqlite3.connect(fdbi) conn.text_factory = str curs = conn.cursor() sql_dcs="select ATT,DEAD,TRET,TCOAX,TRIGGER,ACTIVE,START from webdcs WHERE dcs=%d" % webdcs curs.execute(sql_dcs) v=curs.fetchall() att=-1 dthr=-1 dead=-1 trig="UNKNOwN" if (len(v)<1): return jdict['settings']['dcs']=webdcs jdict['settings']['att']=v[0][0] jdict['settings']['dead']=v[0][1] jdict['settings']['tret']=v[0][2] jdict['settings']['tcoax']=v[0][3] jdict['settings']['trigger']=v[0][4] jdict['settings']['active']=v[0][5] jdict['settings']['start']=v[0][6] dirout="./results/dcs/%d_ATT%3.1f_DT%d_THR%d_%s/chamber%d/" % (webdcs,v[0][0],v[0][1],v[0][2]-500,v[0][4].replace('"','').replace(' ','_'),chamber) os.system("mkdir -p %s" % dirout) fout=open(dirout+"summary%d_ATT%3.1f_DT%d_THR%d_%s.txt" % (webdcs,v[0][0],v[0][1],v[0][2]-500,v[0][4].replace('"','').replace(' ','_')),"w") for x in v: fout.write("Cuts: %5.2f %d %d %d %s %d %s\n" % x) att=x[0] dead=x[1] dthr=x[2]-500 trig=x[4] sql_query=" select EFFCOR,DEFFC,(SELECT HV FROM runs WHERE runs.RUN=corana.RUN),DAQFEBRATE,DAQEFFLOSS,RUN,NCEVT,EFFC,CSIZE,NCLUS,XYFEBRATE,EFFBACK from corana WHERE RUN IN (SELECT RUN FROM runs WHERE DCS=%d) AND CHAMBER=%d" % (webdcs,chamber) curs.execute(sql_query) vo=curs.fetchall() if (len(vo)<1): return; hv=[] eff=[] dhv=[] deff=[] febrate=[] dfebrate=[] effloss=[] csize=[] nclus=[] effback=[] jdict['runs']={} jdict['runs']['effcor']=[] jdict['runs']['hv']=[] jdict['runs']['febrate']=[] jdict['runs']['effloss']=[] jdict['runs']['runid']=[] jdict['runs']['nevt']=[] jdict['runs']['effclu']=[] jdict['runs']['csize']=[] jdict['runs']['nclus']=[] jdict['runs']['xyrate']=[] jdict['runs']['effback']=[] for x in vo: if (x[1]==0): continue fout.write("Results: %5.2f %5.2f %5.2f %5.2f %5.2f %d %d %5.2f %5.2f %5.2f %5.2f %5.2f \n" % x) hv.append(x[2]) dhv.append(10.) eff.append(x[0]) deff.append(x[1]) febrate.append(x[3]) effloss.append(x[4]) csize.append(x[8]) nclus.append(x[9]) effback.append(x[11]) jdict['runs']['effcor'].append(x[0]) jdict['runs']['hv'].append(x[2]) jdict['runs']['febrate'].append(x[3]) jdict['runs']['effloss'].append(x[4]) jdict['runs']['runid'].append(x[5]) jdict['runs']['nevt'].append(x[6]) jdict['runs']['effclu'].append(x[7]) jdict['runs']['csize'].append(x[8]) jdict['runs']['nclus'].append(x[9]) jdict['runs']['xyrate'].append(x[10]) jdict['runs']['effback'].append(x[11]) stitle="DCS%d_TRG%s_ATT%3.1f_THR%d_DT%d_CH%d" % (webdcs,trig,att,dthr,dead,chamber) gr=buildTGraph("effi",hv,dhv,eff,deff,"HV effective (V)","efficiency (%)") func = TF1("func", "([0]/(1+ TMath::Exp(-[1]*(x-[2]))))", 6500,8200) func.SetParameters(90, 9.E-3, 7000) print(100, 9.E-3, 7100) gr.Fit(func,"","",6700,8200) hv95=func.GetX(func.GetParameter(0)*0.95) hv99=func.GetX(func.GetParameter(0)*0.99) print("HV95",hv95,hv95+150,hv99,hv99-hv95) wp=hv99 jdict['fit']={} jdict['fit']['Efficiency']=func.GetParameter(0) jdict['fit']['Slope']=func.GetParameter(1) jdict['fit']['HV50']=func.GetParameter(2) jdict['fit']['HV95']=hv95 jdict['fit']['HV99']=wp fout.write("FIT results: %f %f %f %f %f \n" % (func.GetParameter(0),func.GetParameter(1),func.GetParameter(2),hv95,wp)) title=stitle+"_HV95_%4.0f_WP_%4.0f" % (hv95,wp) gr.SetTitle(title) gStyle.SetStatX(0.85) gStyle.SetStatY(0.7) c.cd(1) gr.Draw("AP") c.Update() #val=raw_input() c.SaveAs(dirout+"%s.png" % title) tgr=[] tgr.append(buildTGraph1('FEB Rate',hv,febrate,'HV eff (V)','Rate (Hz/cm^2)')) tgr.append(buildTGraph1('Dead Time Loss',hv,effloss,'HV eff (V)','Dead time loss (%)')) tgr.append(buildTGraph1('Cluster Size',hv,csize,'HV eff (V)','Cluster size')) tgr.append(buildTGraph1('Cluster Number',hv,nclus,'HV eff (V)','Clusters')) tgr.append(buildTGraph1('Background Efficiency',hv,effback,'HV eff (V)','Efficiency (%)')) #val=raw_input() # grb=buildTGraph("background",hv,dhv,febrate,dfebrate,"HV effective (V)","FEB rate (Hz/cm^2)") # title=stitle # grb.SetTitle(title) # gStyle.SetStatX(0.85) # gStyle.SetStatY(0.7) # c.cd(2) # grb.Draw("AP") # c.Update() # c.SaveAs(dirout+"%s.png" % title) #val=raw_input() sql_query="select * from RESULTS WHERE CTIME>=(SELECT CFIRST FROM webdcs WHERE DCS=%d) AND CTIME<=(SELECT CLAST FROM webdcs WHERE DCS=%d)-200 AND HARDWARE='BMP'" % (webdcs,webdcs) curs.execute(sql_query) v=curs.fetchall() bmp=[] for x in v: bmp.append([x[3],json.loads(x[4].decode('latin-1').encode("utf-8"))]) #print(bmp) PB=0 TB=0 NB=0 for x in bmp: PB=PB+x[1]['pressure'] TB=TB+x[1]['temperature'] NB=NB+1 if (NB>0): PB=PB/NB TB=TB/NB+273.15 fout.write("Pressure: %d %5.2f %5.2f \n" % (NB,PB,TB)) jdict['BMP']={"P":PB,"T":TB} #val=raw_input() sql_query="select * from RESULTS WHERE CTIME>=(SELECT CFIRST FROM webdcs WHERE DCS=%d) AND CTIME<=(SELECT CLAST FROM webdcs WHERE DCS=%d)-200 AND HARDWARE='SY1527'" % (webdcs,webdcs) curs.execute(sql_query) v=curs.fetchall() a=[] for x in v: a.append([x[3],json.loads(x[4].decode('latin-1').encode("utf-8"))]) #print(a) #val=raw_input() if (chamber==1): chan=[4,5] else: chan=[1,2] idx=0 #tgr=[] jdict['HV']={} for ch in chan: x_t=[] y_vs=[] z_vm=[] w_im=[] g_g=[] chname="" first=0 for x in a: if (x[1]['channels'][ch]['rampup']==0): continue if (float(x[1]['channels'][ch]['vset']) < 5500.0): continue #print(float(x[1]['channels'][ch]['vset'])) if (first==0): first=x[0] x_t.append(x[0]-first) y_vs.append(x[1]['channels'][ch]['vset']) z_vm.append(x[1]['channels'][ch]['vout']) w_im.append(x[1]['channels'][ch]['iout']) g_g.append( x[1]['channels'][ch]['iout']/ x[1]['channels'][ch]['vout']) #print(ch,x) chname=x[1]['channels'][ch]['name'] if (len(x_t)<1): continue dy_vs=[] if (len(y_vs)>1): for i in range(0,len(y_vs)): if (i==0 and i!=len(y_vs)-1): dy_vs.append(y_vs[i+1]-y_vs[i]) continue if (i==len(y_vs)-1 and i!=0): dy_vs.append(y_vs[i]-y_vs[i-1]) continue dy_vs.append((y_vs[i+1]-y_vs[i-1])/2.) else: dy_vs.append(0) vset=[] vmon=[] veff=[] imon=[] nval=0 vmon_sum=0 vset_sum=0 imon_sum=0 for i in range(0,len(y_vs)): if (abs(dy_vs[i])>10): if (nval>0): vmon.append(vmon_sum/nval) vset.append(vset_sum/nval) imon.append(imon_sum/nval) if (NB>0): veff.append(calV(vmon_sum/nval,PB,TB)) else: veff.append(vmon_sum/nval) nval=0 vmon_sum=0 vset_sum=0 imon_sum=0 continue nval=nval+1 vmon_sum=vmon_sum+z_vm[i] vset_sum=vset_sum+y_vs[i] imon_sum=imon_sum+w_im[i] if (nval>0): vmon.append(vmon_sum/nval) vset.append(vset_sum/nval) imon.append(imon_sum/nval) if (NB>0): veff.append(calV(vmon_sum/nval,PB,TB)) else: veff.append(vmon_sum/nval) #print(chname,"VSET",vset) #print(chname,"VMON",vmon) #print(chname,"VEFF",veff) #print(chname,"IMON",imon) fout.write(chname+"\n") jdict['HV'][chname]={} jdict['HV'][chname]['vset']=vset jdict['HV'][chname]['vmon']=vmon jdict['HV'][chname]['imon']=imon jdict['HV'][chname]['veff']=veff for ip in range(0,len(vmon)): fout.write("%5.2f %5.2f %5.2f %5.2f \n" % (vset[ip],vmon[ip],veff[ip],imon[ip])) #tgr.append(buildTGraph1('V set vs t %s' % chname,x_t,y_vs,'t(s)','V set (V)')) tgr.append(buildTGraph1('V Mon vs t %s' % chname,x_t,z_vm,'t(s)','V mon (V)')) tgr.append(buildTGraph1('I Mon vs t %s' % chname,x_t,w_im,'t(s)','I mon ([m]A)')) tgr.append(buildTGraph1('I Mon vs V eff %s' % chname,veff,imon,'V eff(V)','I mon ([m]A)')) icd=3 for x in tgr: c.cd(icd) icd=icd+1 x.Draw("AP") c.Update() c.SaveAs(dirout+"%s.png" % x.GetTitle().replace(" ","_")) #val=raw_input() # resume chans=[] if (chamber == 2): chans=["COAX-BOT","COAX-TOP"] else: chans=["RETURN-BOT","RETURN-TOP"] jdict['AWP']={} jdict['AWP']['hv']=wp ihv=-1 for i in range(0,len(jdict['runs']['hv'])-1): if (wp>jdict['runs']['hv'][i] and wp<=jdict['runs']['hv'][i+1]): ihv=i break if (ihv!=-1): jdict['AWP']['plateau']=jdict['fit']['Efficiency'] jdict['AWP']['febrate']=approx(wp,jdict['runs']['hv'][ihv],jdict['runs']['hv'][ihv+1],jdict['runs']['febrate'][ihv],jdict['runs']['febrate'][ihv+1]) jdict['AWP']['effloss']=approx(wp,jdict['runs']['hv'][ihv],jdict['runs']['hv'][ihv+1],jdict['runs']['effloss'][ihv],jdict['runs']['effloss'][ihv+1]) jdict['AWP']['effcor']=approx(wp,jdict['runs']['hv'][ihv],jdict['runs']['hv'][ihv+1],jdict['runs']['effcor'][ihv],jdict['runs']['effcor'][ihv+1]) jdict['AWP']['csize']=approx(wp,jdict['runs']['hv'][ihv],jdict['runs']['hv'][ihv+1],jdict['runs']['csize'][ihv],jdict['runs']['csize'][ihv+1]) jdict['AWP']['nclus']=approx(wp,jdict['runs']['hv'][ihv],jdict['runs']['hv'][ihv+1],jdict['runs']['nclus'][ihv],jdict['runs']['nclus'][ihv+1]) for x in chans: ihv=-1 if (x in jdict['HV']): for i in range(0,len(jdict['HV'][x]['veff'])-1): if (wp>jdict['HV'][x]['veff'][i] and wp<=jdict['HV'][x]['veff'][i+1]): ihv=i break if (ihv!=-1): jdict['AWP'][x]=approx(wp,jdict['HV'][x]['veff'][ihv],jdict['HV'][x]['veff'][ihv+1],jdict['HV'][x]['imon'][ihv],jdict['HV'][x]['imon'][ihv+1]) itot=0 surf=0 if (chamber==1): surf=13000 if ('RETURN-TOP' in jdict['AWP']): itot=itot+jdict['AWP']['RETURN-TOP'] if ('RETURN-BOT' in jdict['AWP']): itot=itot+jdict['AWP']['RETURN-BOT'] if (chamber==2): surf=15000 if ('COAX-TOP' in jdict['AWP']): itot=itot+jdict['AWP']['COAX-TOP'] if ('COAX-BOT' in jdict['AWP']): itot=itot+jdict['AWP']['COAX-BOT'] if ('febrate' in jdict['AWP']): if (jdict['AWP']['febrate']!=0): jdict['AWP']['ITOT']=itot jdict['AWP']['QSEEN']=itot/jdict['AWP']['febrate']/surf*1E6 fout.write("%d|%5.1f|%5.0f|%5.2f|%5.2f|%5.2f|%5.2f|%5.1f|%5.1f|%5.1f|%5.1f\n" % (webdcs, jdict['settings']['att'], jdict['AWP']['hv'], jdict['AWP']['plateau'], jdict['AWP']['effcor'], jdict['AWP']['effloss'], jdict['AWP']['plateau']-jdict['AWP']['effloss'], jdict['AWP']['febrate'], jdict['AWP']['ITOT'], jdict['AWP']['QSEEN'],jdict['AWP']['csize'])) with open(dirout+'summary.json', 'w') as outfile: dd= json.dumps(jdict,sort_keys=True, indent=2,separators=(',', ': ')) outfile.write(dd) outfile.close() fout.close() return jdict
mean.setConstant(False) gamma.setConstant(False) rPhifit = tot.fitTo(xdataPrompt, Range(phimin, phimax), RooFit.NumCPU(4)) # In[ ]: c = TCanvas("canvas", "canvas", 1200, 800) phiFrame = masskk.frame(Range(phimin, phimax), Normalization((nSig.getValV() + nBkg.getValV()))) xdataPrompt.plotOn(phiFrame) tot.plotOn(phiFrame) phiFrame.Draw() c.SaveAs("phiMassSPlot.png") c.SaveAs("phiMassSPlot.root") c.Clear() # In[ ]: cD = TCanvas("cD", "cD", 750, 600) cD.cd() splot = RooStats.SPlot("sPlot", "sPlot", xdataPrompt, tot, RooArgList(nSig, nBkg)) dstree = xdataPrompt.store().tree() # In[19]: shist = TH1F('shist', 'shist', 500, 4.0, 5.0) # In[20]:
def producePlots(filelist_handle,identifier): """ produce plots for filelist """ files = {} plots = [] failures = [] for line in filelist_handle.readlines(): array = line.split() if len(array) > 3 : name = str(array[0]) size = float(float(array[1])/1000000000000) events = int(array[2]) run = int(array[3]) lumi = int(array[4]) if array[-1] != 'GMT' : failures.append(line) else : thetime = int(time.mktime(time.strptime(' '.join(array[5:]),'%a, %d %b %Y %H:%M:%S %Z'))) if name in files.keys(): files[name]['lumi'].append(lumi) else : entry = {} entry['size'] = size entry['events'] = events entry['run'] = [run] entry['lumi'] = [lumi] entry['time'] = thetime files[name] = entry if debug == 1 : for name in files.keys(): print name,'size:',files[name]['size'],'events:',files[name]['events'],'runs:',','.join(map(str,files[name]['run'])),'lumis:',','.join(map(str,files[name]['lumi'])),'date:',files[name]['time'] # print failures if len(failures) > 0 : print "" print "failures:" for line in failures : print line # define time axis times = [ files[name]['time'] for name in files ] times.sort() first_day = times[0]-times[0]%86400 last_day = times[len(times)-1]+(86400-times[len(times)-1]%86400) days = int((last_day - first_day)/86400.) # determine x axis division if days%10 == 0: bigdiv = 10 smalldiv = days/10 elif days%9 == 0: bigdiv = 9 smalldiv = days/9 elif days%8 == 0: bigdiv = 8 smalldiv = days/8 elif days%7 == 0: bigdiv = 7 smalldiv = days/7 elif days%6 == 0: bigdiv = 6 smalldiv = days/6 elif days%5 == 0: bigdiv = 5 smalldiv = days/5 elif days%4 == 0: bigdiv = 4 smalldiv = days/4 elif days%3 == 0: bigdiv = 3 smalldiv = days/3 elif days%2 == 0: bigdiv = 2 smalldiv = days/2 else : bigdiv = 1 smalldiv = days gROOT.SetBatch(1) myStyle = TStyle("Default","Default"); myStyle.cd(); myStyle.SetFrameBorderMode(0); myStyle.SetCanvasBorderMode(0); myStyle.SetPadBorderMode(0); myStyle.SetPadColor(0); myStyle.SetCanvasColor(0); myStyle.SetTitleFillColor(0); myStyle.SetTitleBorderSize(1); myStyle.SetStatColor(0); myStyle.SetStatBorderSize(1); myStyle.SetOptStat(0); myStyle.SetTimeOffset(first_day+86400) c = TCanvas() c.SetLeftMargin(0.171) c.SetRightMargin(0.04) c.SetBottomMargin(0.093) c.SetTopMargin(0.08) c.SetGridx(1) c.SetGridy(1) # events histogram print 'Drawing histograms for average number of events per day' h_events = TH1D("NumberOfEvents" + identifier,"",days,0.,days*86400.) h_events.GetYaxis().SetTitle("Number of events [1/24h]") h_events.GetYaxis().SetTitleSize(0.08) h_events.GetYaxis().SetTitleColor(4) h_events.GetYaxis().SetTitleOffset(1.14) h_events.GetYaxis().SetLabelSize(0.07) h_events.GetYaxis().SetLabelOffset(0.01) h_events.GetXaxis().SetTimeDisplay(1) h_events.GetXaxis().SetTimeFormat("%m/%d") h_events.GetXaxis().SetLabelSize(0.05) h_events.GetXaxis().SetLabelOffset(0.035) h_events.GetXaxis().SetNdivisions(smalldiv * 100 + bigdiv,0) h_events.SetLineColor(4) h_events.SetLineWidth(3) total_events = 0 for name in files.keys(): total_events += files[name]['events'] h_events.Fill(files[name]['time']-first_day,files[name]['events']) h_events.SetMaximum(h_events.GetMaximum()*1.3) h_events.Draw() t_events = TPaveText(0.2,0.75,0.9,0.9,'brNDC') t_events.AddText('CMS 2010: %s' % identifier) t_events.AddText('Total number of events: %.2E' % (total_events)) t_events.AddText('Last updated on: %s' % (timestring)) t_events.SetFillColor(0) t_events.SetFillStyle(0) t_events.SetBorderSize(0) t_events.SetTextAlign(12) t_events.Draw() name = timefilestring + '_events' + identifier.replace('/','_') + '.png' plots.append(name) c.SaveAs(name) c.Clear() # rate histogram print 'Drawing histograms for average rate per day' h_rate = TH1D("Rate" + identifier,"",days,0.,days*86400.) h_rate.GetYaxis().SetTitle("Average Rate [Hz/24h]") h_rate.GetYaxis().SetTitleSize(0.08) h_rate.GetYaxis().SetTitleColor(4) h_rate.GetYaxis().SetTitleOffset(1.14) h_rate.GetYaxis().SetLabelSize(0.07) h_rate.GetYaxis().SetLabelOffset(0.01) h_rate.GetXaxis().SetTimeDisplay(1) h_rate.GetXaxis().SetTimeFormat("%m/%d") h_rate.GetXaxis().SetLabelSize(0.05) h_rate.GetXaxis().SetLabelOffset(0.035) h_rate.GetXaxis().SetNdivisions(smalldiv * 100 + bigdiv,0) h_rate.SetLineColor(4) h_rate.SetLineWidth(3) h_rate_max = 0. for bin in range(days): rate = h_events.GetBinContent(bin+1)/24./3600. if rate > h_rate_max : h_rate_max = rate h_rate.SetBinContent(bin+1,rate) h_rate.SetMaximum(h_rate_max*1.3) h_rate.Draw() t_rate = TPaveText(0.2,0.75,0.9,0.9,'brNDC') t_rate.AddText('CMS 2010: %s' % identifier) t_rate.AddText('Average rate: %.2f Hz' % (float(total_events)/float(days)/24./3600.)) t_rate.AddText('Last updated on: %s' % (timestring)) t_rate.SetFillColor(0) t_rate.SetFillStyle(0) t_rate.SetBorderSize(0) t_rate.SetTextAlign(12) t_rate.Draw() name = timefilestring + '_rate' + identifier.replace('/','_') + '.png' plots.append(name) c.SaveAs(name) c.Clear() # size histogram print 'Drawing histograms for average size per day' h_size = TH1D("Size"+identifier,"",days,0.,days*86400.) h_size.GetYaxis().SetTitle("Size [TB/24h]") h_size.GetYaxis().SetTitleSize(0.08) h_size.GetYaxis().SetTitleColor(4) h_size.GetYaxis().SetTitleOffset(1.12) h_size.GetYaxis().SetLabelSize(0.07) h_size.GetYaxis().SetLabelOffset(0.01) h_size.GetXaxis().SetTimeDisplay(1) h_size.GetXaxis().SetTimeFormat("%m/%d") h_size.GetXaxis().SetLabelSize(0.05) h_size.GetXaxis().SetLabelOffset(0.035) h_size.GetXaxis().SetNdivisions(smalldiv * 100 + bigdiv,0) h_size.SetLineColor(4) h_size.SetLineWidth(3) total_size = 0. for name in files.keys(): total_size += files[name]['size'] h_size.Fill(files[name]['time']-first_day,files[name]['size']) h_size.SetMaximum(h_size.GetMaximum()*1.3) h_size.Draw() t_size = TPaveText(0.2,0.75,0.9,0.9,'brNDC') t_size.AddText('CMS 2010: %s' % identifier) t_size.AddText('Total size: %.2f TB' % (total_size)) t_size.AddText('Last updated on: %s' % (timestring)) t_size.SetFillColor(0) t_size.SetFillStyle(0) t_size.SetBorderSize(0) t_size.SetTextAlign(12) t_size.Draw() #raw_input('Press Enter...') name = timefilestring + '_size' + identifier.replace('/','_') + '.png' plots.append(name) c.SaveAs(name) c.Clear() return plots