def drawStack(stack, legends, foundVariables, sampleType, subset = 'TestA', dataHist = {}): """Draw the given stack. Keyword arguments: stack -- the stack to be drawn legends -- the accompanying legends foundVariables -- the variables in the stack that are being drawn sampleType -- signal or bkg subset -- acts as extra identifier in the filename (default A) dataHist -- the data histograms (default []) """ c2 = Canvas() c2.cd() xcount = 0 # should create a ratio plot too!!! get teh scaling right... for x in stack: if dataHist: x.Draw() xmax = x.GetHistogram().GetMaximum() dmax = dataHist['data'][xcount].GetMaximum() x.SetMaximum(max(xmax,dmax)*1.1) x.Draw('hist') dataHist['data'][xcount].Draw('same') else: x.Draw('hist') legends[xcount].Draw('same') c2.SaveAs(foundVariables[xcount]+str(sampleType)+str(subset)+'Stack.png') c2.Write() xcount +=1
def makePage( histos, fileName, fileDescr, separateFiles, logscale): """ Prepares a canvas with one histogram per pad """ import rootpy from rootpy.plotting import Hist, Canvas from ROOT import kBlue,gPad log = logging.getLogger('pyroplot') cans = {} log.info( "Drawing histograms .." ) for idx, name in enumerate(sorted(histos.keys())): if separateFiles: log.debug( "Creating new canvas with index %d."%(idx)) c=Canvas( 600, 800) cans[name]=c markCanvas(c, fileName, 0.05, y = 0.009, size = 0.025, color = 2 ) if not separateFiles and (idx)%6 == 0: log.debug( "Creating new canvas with index %d."%(idx/6)) # start a new canvas c=Canvas( 600, 800) cans[fileName+'/'+name]=c c.Divide(2,3) markCanvas(c, fileName, 0.05, y = 0.009, size = 0.025, color = 2 ) # draw the histogram hist = histos[name] log.debug( "Drawing histogram #" + str(idx%6+1) +": " + hist.GetName() + " (" + hist.__class__.__name__ + ") in canvas #" + str(int(idx/6) )) hist.color = kBlue if not separateFiles: c.cd(idx%6+1) if logscale: gPad.SetLogy() plotHistos(histos=[hist],text=name) if fileDescr: markPad(text=fileDescr, x=.14, y=.8, size=0.041, color = 2) return cans
def applyacc(wbin, q2bin, iedges, vedges, weights): # TODO: clone 2d hists to keep snapshots between steps of acceptance # corrections and zero-filling vals = ibins2vals(wbin, q2bin) (wval, wlo, whi) = (vals[0][0], vals[0][1], vals[0][2]) (q2val, q2lo, q2hi) = (vals[1][0], vals[1][1], vals[1][2]) h2s = (hexpsig, hexpsb, h2thr, h2acc) = h2costphis(wbin, q2bin, iedges) h2rec_acor = h2thr.Clone('h2r_acor') h2rec_acor.Multiply(h2acc) inth2r = h2rec_acor.Integral() # integral of reconstructed simulated events h2rec_acor.Divide(h2acc) # thrown events with acceptance holes h2thr.Add(h2rec_acor, -1) # thrown events IN acceptance holes [h2thr.SetBinError(i, j, sqrt(h2thr.GetBinContent(i, j))) for i in range(1, h2thr.GetNbinsX()) for j in range(1, h2thr.GetNbinsY())] hexpsb.Scale(weights[0]) # weightsb hexpsig.Add(hexpsb, -1) inth2e = hexpsig.Integral()/weights[1] # integral of signal region with 3-sigma cut correction hexpsig.Divide(h2acc) h2thr.Scale(inth2e/inth2r) hexpsig.Add(h2thr) cmmp = Canvas(name='hcostphi_%d_%d' % (round(1000*wval), round(1000*q2val)), title='Angular Distributions, W, Q2 = %.3f GeV,%.3f GeV2' % (wval, q2val)) cmmp.Divide(2, 2) for ipad, h2 in enumerate(h2s): cmmp.cd(ipad+1) r.gPad.Update() h2.Draw('colz') wait() return hexpsig
def drawAllTrainStacks(signal, bkg, data, labelCodes, weightsPerSampleA, weightsPerSampleB, corrWeightsA = DEFAULT, corrWeightsB = DEFAULT, subset = 'TrainA'): """Draw all train stacks for signal and bkg at once. Keyword arguments: signal -- the signal sample bkg -- the background sample labelCodes -- the label codes for the type of sample (W, Z for eg.) weightsPerSample -- the XS weight subset -- extra identifier for the filename (default TrainA) """ if corrWeightsA is DEFAULT: corrWeightsA = hstack(signal.returnTestCorrectionWeights('A'), bkg.returnTestCorrectionWeights('A')) if corrWeightsB is DEFAULT: corrWeightsB = hstack(signal.returnTestCorrectionWeights('B'), bkg.returnTestCorrectionWeights('B')) # store all histograms in output.root for x in xrange (0, len(signal.train)): if x == 0: subset = 'A' weightsPerSample = weightsPerSampleA corrWeights = corrWeightsA else: subset = 'B' weightsPerSample = weightsPerSampleB corrWeights = corrWeightsB f = ropen('outputTrain'+str(subset)+'.root','recreate') c1 = Canvas() c1.cd() allStack = [] legendAllStack = [] # get sigA histograms hist, histDictSigA, testAStack, legendSigStack = createHists(signal.returnTrainSample(subset), labelCodes, 'signal', signal.returnTrainSampleLabels(subset), weightsPerSample[0], signal.returnFoundVariables(), allStack, legendAllStack, corrWeights, str('Train'+subset), True) # get bkgA histograms # how to fix legends???? hist2, histDictBkgA, testAStackBkg, legendBkgStack = createHists(bkg.returnTrainSample(subset), labelCodes, 'bkg', bkg.returnTrainSampleLabels(subset), weightsPerSample[1], bkg.returnFoundVariables(), allStack, legendAllStack, corrWeights, str('Train'+subset), True) for hist2idx in xrange(0,len(hist)): legend = Legend(3) legend.AddEntry(hist[hist2idx],'F') legend.AddEntry(hist2[hist2idx],'F') hist[hist2idx].draw('hist') hist[hist2idx].Write() hist2[hist2idx].draw('histsame') hist2[hist2idx].Write() legend.Draw('same') c1.Write() #c1.SaveAs(signal.returnFoundVariables()[hist2idx]+".png") hist2idx+=1 drawStack(testAStack, legendSigStack, signal.returnFoundVariables(), 'Sig', str('Train'+subset)) # draw histograms drawStack(testAStackBkg, legendBkgStack, bkg.returnFoundVariables(), 'Bkg', str('Train'+subset)) drawStack(allStack, legendAllStack, signal.returnFoundVariables(), 'All', str('Train' + subset)) f.close()
def compare (name1, name2, j) : c = Canvas() QCD = ROOT.TFile.Open(name1+".root") Wjet = ROOT.TFile.Open(name2+".root") c.Divide(2,1) c.cd(1) QCD.Draw("AP") Wjet.Draw("AP") c.SaveAs("plots/WeightComparison/WjetProbaDecayChannel_"+str(j)+"_WeightComparison.png")
def drawSigBkgDistrib(sample, classif, foundVariables): idx = 0 c1 = Canvas() c1.cd() histidx = 0 histSig = [] histBkg = [] classif, sample = sc.sortMultiple(classif,sample) sample2 = transpose(sample) binc = bincount(classif) numzero = binc[0] numone = binc[1] global histLimits for x in sample2: x = transpose(x) variableName = foundVariables[histidx] histSig.append(Hist(50,int(histLimits[variableName][0]),int(histLimits[variableName][1]))) histSig[histidx].fill_array(x[:numzero]) histSig[histidx].scale(1.0/histSig[histidx].integral()) histSig[histidx].fillcolor='red' histSig[histidx].SetFillStyle(3004) histSig[histidx].linecolor='red' histSig[histidx].GetXaxis().SetTitle(foundVariables[histidx]) histSig[histidx].GetYaxis().SetTitle('# Events Normalised to 1') histSig[histidx].SetTitle('Background') # histSig[histidx].fillstyle='solid' histSig[histidx].SetStats(0) histBkg.append(Hist(50,int(histLimits[variableName][0]),int(histLimits[variableName][1]))) histBkg[histidx].fill_array(x[numzero:]) histBkg[histidx].scale(1.0/histBkg[histidx].integral()) histBkg[histidx].fillcolor='blue' histBkg[histidx].linecolor='blue' histBkg[histidx].SetFillStyle(3005) histBkg[histidx].GetXaxis().SetTitle(foundVariables[histidx]) histBkg[histidx].GetYaxis().SetTitle('# Events Normalised to 1') histBkg[histidx].SetTitle('Signal') # histBkg[histidx].fillstyle='solid' histBkg[histidx].SetStats(0) leg = Legend(2) leg.AddEntry(histBkg[histidx],'F') leg.AddEntry(histSig[histidx],'F') histBkg[histidx].draw('hist') histSig[histidx].draw('histsame') leg.Draw('same') c1.SaveAs(variableName+"SigBkgComparison.png") histidx+=1
def makePage(histos, fileName, fileDescr, separateFiles, logscale): """ Prepares a canvas with one histogram per pad """ import rootpy from rootpy.plotting import Hist, Canvas from ROOT import kBlue, gPad log = logging.getLogger('pyroplot') cans = {} log.info("Drawing histograms ..") for idx, name in enumerate(sorted(histos.keys())): if separateFiles: log.debug("Creating new canvas with index %d." % (idx)) c = Canvas(600, 800) cans[name] = c markCanvas(c, fileName, 0.05, y=0.009, size=0.025, color=2) if not separateFiles and (idx) % 6 == 0: log.debug("Creating new canvas with index %d." % (idx / 6)) # start a new canvas c = Canvas(600, 800) cans[fileName + '/' + name] = c c.Divide(2, 3) markCanvas(c, fileName, 0.05, y=0.009, size=0.025, color=2) # draw the histogram hist = histos[name] log.debug("Drawing histogram #" + str(idx % 6 + 1) + ": " + hist.GetName() + " (" + hist.__class__.__name__ + ") in canvas #" + str(int(idx / 6))) hist.color = kBlue if not separateFiles: c.cd(idx % 6 + 1) if logscale: gPad.SetLogy() plotHistos(histos=[hist], text=name) if fileDescr: markPad(text=fileDescr, x=.14, y=.8, size=0.041, color=2) return cans
def significance_scan_2d( x, bins=(16, 2000., 6000., 10, 100., 300.), xtitle='#font[12]{H}_{#font[132]{T}} [GeV]', ytitle='#font[12]{E}_{#font[132]{T}}^{#font[132]{miss}} [GeV]', selection='', weight=''): """ The signficance is calculated using Cowan's analytic formula for discovery significance of a signal with background and uncertainty on the background. See: https://www.pp.rhul.ac.uk/~cowan/stat/notes/medsigNote.pdf """ global data global bkgs global sigs global treename global datasearchpath global datadrivensearchpath global bkgsearchpath global sigsearchpath global lumi assert x assert len(bins) == 6 assert bkgs assert sigs assert lumi assert bkgsearchpath assert sigsearchpath save = ['pdf', 'png'] ## only use first sig assert len(sigs) == 1 sig = sigs[0] ## save stuff to bookkeep and return stuff = dict() stuff['x'] = x ## get background histograms h_bkgs = list() n_bkgs = list() if bkgs: for bkg in bkgs: if isinstance(bkg, list): h_subtotal = None for dsid in bkg: assert isinstance(dsid, str) h_bkg = None if dsid.isdigit(): ## mc backgrounds sp = bkgsearchpath % int(dsid) newx = '%s::%s::%s' % (sp, treename, x) h_bkg = ipyhep.tree.project(newx, bins=bins, xtitle=xtitle, ytitle=ytitle, selection=selection, weight=weight) else: ## data-driven backgrounds assert dsid == 'fakes' or dsid == 'efakes' sp = datadrivensearchpath newx = '%s::%s::%s' % (sp, treename, x) h_bkg = ipyhep.tree.project(newx, bins=bins, xtitle=xtitle, ytitle=ytitle, selection=selection, weight=weight) if h_bkg: if h_subtotal: h_subtotal.Add(h_bkg) else: h_subtotal = h_bkg.Clone() if h_subtotal: h_bkgs.append(h_subtotal) dsid = bkg[ 0] ## lists of combined backgrounds use the first dsid n_bkgs.append(dsid) else: dsid = bkg assert isinstance(dsid, str) h_bkg = None if dsid.isdigit(): ## mc backgrounds sp = bkgsearchpath % int(dsid) newx = '%s::%s::%s' % (sp, treename, x) h_bkg = ipyhep.tree.project(newx, bins=bins, xtitle=xtitle, ytitle=ytitle, selection=selection, weight=weight) else: ## data-driven backgrounds assert dsid == 'fakes' or dsid == 'efakes' sp = datadrivensearchpath newx = '%s::%s::%s' % (sp, treename, x) h_bkg = ipyhep.tree.project(newx, bins=bins, xtitle=xtitle, ytitle=ytitle, selection=selection, weight=weight) if h_bkg: h_bkgs.append(h_bkg) n_bkgs.append(dsid) assert h_bkgs ## get signal histograms h_sig = None if sig: dsid = sig sp = sigsearchpath % int(dsid) newx = '%s::%s::%s' % (sp, treename, x) h_sig = ipyhep.tree.project(newx, bins=bins, xtitle=xtitle, ytitle=ytitle, selection=selection, weight=weight) assert h_sig ## scale background histograms if h_bkgs: assert len(h_bkgs) == len(n_bkgs), '%s\n%s' % (h_bkgs, n_bkgs) for h, dsid in zip(h_bkgs, n_bkgs): sf = ipyhep.sampleops.get_sf(dsid) if dsid.isdigit(): sf *= lumi / __ntuple_lumi h.Scale(sf) ## scale signal histogram if h_sig: sf = ipyhep.sampleops.get_sf(sig) sf *= lumi / __ntuple_lumi h_sig.Scale(sf) ## set systematic errors on backgrounds if h_bkgs: assert len(h_bkgs) == len(n_bkgs), '%s\n%s' % (h_bkgs, n_bkgs) # HACK: systematics updated 2017-06-08 for the SUSY diphoton analysis syst_fracs = dict() syst_fracs['407013'] = 0.50 # #gamma#gamma syst_fracs['361039'] = 0.50 # #gammaj+jj syst_fracs['fakes'] = 0.50 # fakes syst_fracs['efakes'] = 0.20 # efakes syst_fracs['301890'] = 0.20 # W#gamma syst_fracs['301899'] = 0.20 # Z#gamma syst_fracs['407022'] = 0.27 # W#gamma#gamma syst_fracs['407025'] = 0.45 # Z#gamma#gamma syst_fracs['407028'] = 0.45 # Z#gamma#gamma for h, dsid in zip(h_bkgs, n_bkgs): nbinsx = h.GetNbinsX() nbinsy = h.GetNbinsY() for i_x in xrange(nbinsx): for i_y in xrange(nbinsy): c = h.GetBinContent(i_x, i_y) e = h.GetBinError(i_x, i_y) syst = syst_fracs.get(dsid, 0.0) assert syst h.SetBinError(i_x, i_y, math.sqrt(e * e + c * c * syst * syst)) ## total background h_bkg_total = None if h_bkgs: for h_bkg in h_bkgs: if h_bkg_total: h_bkg_total.Add(h_bkg) else: h_bkg_total = h_bkg.Clone() assert h_bkg_total ## significance scan h_signif = Hist2D(*bins) h_signif.SetTitle(';%s;%s' % (xtitle, ytitle)) nbinsx = h_signif.GetNbinsX() nbinsy = h_signif.GetNbinsY() for i_x in xrange(1, nbinsx + 1): for i_y in xrange(1, nbinsy + 1): sigma = ROOT.Double(0) s = h_sig.Integral(i_x, nbinsx + 1, i_y, nbinsy + 1) b = h_bkg_total.IntegralAndError(i_x, nbinsx + 1, i_y, nbinsy + 1, sigma) sigma = float(sigma) # if b < 0.10: ## HACK: force background to be at least 0.10 events with 100% uncert. # b = 0.10 # sigma = 0.10 if b < 0.20 and sigma < 0.20: sigma = 0.20 za = _calculate_significance(s, b, sigma) xval = h_signif.GetXaxis().GetBinLowEdge(i_x) yval = h_signif.GetYaxis().GetBinLowEdge(i_y) h_signif.Fill(xval, yval, za) ## make canvas canvas = Canvas(800, 600) canvas.SetRightMargin(0.17) canvas.cd() stuff['canvas'] = canvas ## draw ROOT.gStyle.SetPaintTextFormat("3.1f") h_signif.Draw('COLZ') h_signif2 = h_signif.Clone() h_signif2.SetMarkerSize(1.2) h_signif2.SetMarkerColor(ipyhep.style.black) h_signif2.Draw('TEXT SAME') stuff['h_signif'] = h_signif stuff['h_signif2'] = h_signif2 canvas.Update() ## save figures if save: ipyhep.file.save_figures(canvas, x, save) return stuff
def stack(x, *args, **kwargs): ## parse arguments _data = kwargs.pop('data', None) _bkgs = kwargs.pop('bkgs', None) _sigs = kwargs.pop('sigs', None) _treename = kwargs.pop('treename', None) _datasearchpath = kwargs.pop('datasearchpath', None) _datadrivensearchpath = kwargs.pop('datadrivensearchpath', None) _bkgsearchpath = kwargs.pop('bkgsearchpath', None) _sigsearchpath = kwargs.pop('sigsearchpath', None) _lumi = kwargs.pop('lumi', None) global data global bkgs global sigs global treename global datasearchpath global datadrivensearchpath global bkgsearchpath global sigsearchpath global lumi data = _data or data bkgs = _bkgs or bkgs sigs = _sigs or sigs treename = _treename or treename datasearchpath = _datasearchpath or datasearchpath datadrivensearchpath = _datadrivensearchpath or datadrivensearchpath bkgsearchpath = _bkgsearchpath or bkgsearchpath sigsearchpath = _sigsearchpath or sigsearchpath if _lumi: lumi = float(_lumi) xtitle = kwargs.pop('xtitle', '') ytitle = kwargs.pop('ytitle', '') logx = bool(kwargs.pop('logx', False)) logy = bool(kwargs.pop('logy', False)) blind = kwargs.pop('blind', None) has_blinded_data = False ## save stuff to bookkeep and return stuff = dict() stuff['x'] = x ## get data histogram h_data = None if data: sp = datasearchpath # HACK: just data to True! newx = '%s::%s::%s' % (sp, treename, x) h_data = ipyhep.tree.project(newx, *args, **kwargs) if h_data: stuff['h_data'] = h_data ## blind the data? if h_data and not blind is None: if isinstance(blind, tuple): blind1, blind2 = blind nbins = h_data.GetNbinsX() for i_bin in xrange(1, nbins + 2): # skip underflow (but not overflow) xval1 = h_data.GetXaxis().GetBinLowEdge(i_bin) xval2 = h_data.GetXaxis().GetBinUpEdge(i_bin) if xval1 >= blind1 and xval2 <= blind2: h_data.SetBinContent(i_bin, 0.0) h_data.SetBinError(i_bin, 0.0) has_blinded_data = True else: nbins = h_data.GetNbinsX() for i_bin in xrange(1, nbins + 2): # skip underflow (but not overflow) xval = h_data.GetXaxis().GetBinLowEdge(i_bin) if xval >= blind: h_data.SetBinContent(i_bin, 0.0) h_data.SetBinError(i_bin, 0.0) has_blinded_data = True ## get background histograms h_bkgs = list() n_bkgs = list() if bkgs: for bkg in bkgs: if isinstance(bkg, list): h_subtotal = None for dsid in bkg: assert isinstance(dsid, str) h_bkg = None if dsid.isdigit(): ## mc backgrounds sp = bkgsearchpath % int(dsid) newx = '%s::%s::%s' % (sp, treename, x) h_bkg = ipyhep.tree.project(newx, *args, **kwargs) else: ## data-driven backgrounds assert dsid == 'fakes' or dsid == 'efakes' sp = datadrivensearchpath % dsid newx = '%s::%s::%s' % (sp, treename, x) h_bkg = ipyhep.tree.project(newx, *args, **kwargs) if h_bkg: if h_subtotal: h_subtotal.Add(h_bkg) else: h_subtotal = h_bkg.Clone() if h_subtotal: h_bkgs.append(h_subtotal) dsid = bkg[0] n_bkgs.append(dsid) else: dsid = bkg assert isinstance(dsid, str) h_bkg = None if dsid.isdigit(): ## mc backgrounds sp = bkgsearchpath % int(dsid) newx = '%s::%s::%s' % (sp, treename, x) h_bkg = ipyhep.tree.project(newx, *args, **kwargs) else: ## data-driven backgrounds assert dsid == 'fakes' or dsid == 'efakes' sp = datadrivensearchpath % dsid newx = '%s::%s::%s' % (sp, treename, x) h_bkg = ipyhep.tree.project(newx, *args, **kwargs) if h_bkg: h_bkgs.append(h_bkg) n_bkgs.append(dsid) if h_bkgs: stuff['h_bkgs'] = h_bkgs ## get signal histograms h_sigs = list() n_sigs = list() if sigs: for dsid in sigs: sp = sigsearchpath % int(dsid) newx = '%s::%s::%s' % (sp, treename, x) h_sig = ipyhep.tree.project(newx, *args, **kwargs) if h_sig: h_sigs.append(h_sig) n_sigs.append(dsid) if h_sigs: stuff['h_sigs'] = h_sigs assert h_sigs ## style data if h_data: h_data.title = 'Data' h_data.linecolor = ipyhep.style.black h_data.linewidth = 2 h_data.markercolor = ipyhep.style.black h_data.markerstyle = 20 h_data.markersize = 1.2 h_data.fillstyle = ipyhep.style.fill_hollow h_data.drawstyle = 'PE' h_data.legendstyle = 'LP' ## scale and style background histograms if h_bkgs: assert len(h_bkgs) == len(n_bkgs), '%s\n%s' % (h_bkgs, n_bkgs) for h, dsid in zip(h_bkgs, n_bkgs): sf = ipyhep.sampleops.get_sf(dsid) if dsid.isdigit(): sf *= lumi / __ntuple_lumi h.Scale(sf) h.title = ipyhep.sampleops.get_label(dsid) h.linecolor = ipyhep.style.black h.linewidth = 1 h.markercolor = ipyhep.sampleops.get_color(dsid) h.fillcolor = ipyhep.sampleops.get_color(dsid) h.fillstyle = ipyhep.style.fill_solid h.legendstyle = 'F' ## calculate stat error on total background h_bkg_total = None if h_bkgs: for h_bkg in h_bkgs: if h_bkg_total: h_bkg_total.Add(h_bkg) else: h_bkg_total = h_bkg.Clone() stuff['h_bkg_total'] = h_bkg_total ## style h_bkg_total if h_bkg_total: h_bkg_total.title = 'stat. uncert.' h_bkg_total.linecolor = ipyhep.style.black h_bkg_total.linewidth = 1 h_bkg_total.markerstyle = 0 h_bkg_total.fillcolor = ipyhep.style.dark_gray h_bkg_total.fillstyle = ipyhep.style.fill_lines h_bkg_total.drawstyle = 'E2' h_bkg_total.legendstyle = 'LF' ## scale and style signal histograms if h_sigs: assert len(h_sigs) == len(n_sigs) for h, dsid in zip(h_sigs, n_sigs): sf = ipyhep.sampleops.get_sf(dsid) sf *= lumi / __ntuple_lumi h.Scale(sf) h.title = ipyhep.sampleops.get_label(dsid) h.linecolor = ipyhep.sampleops.get_color(dsid) h.linewidth = 3 h.fillstyle = ipyhep.style.fill_hollow h.markerstyle = 0 h.drawstyle = 'HIST' h.legendstyle = 'L' ## build list of all_hists all_hists = list() main_hists = list() if h_data: all_hists.append(h_data) main_hists.append(h_data) if h_bkgs: all_hists.extend(h_bkgs) main_hists.extend(h_bkgs) if h_bkg_total: all_hists.append(h_bkg_total) main_hists.append(h_bkg_total) if h_sigs: all_hists.extend(h_sigs) ## get statistics if all_hists: stats_list = list() for h in all_hists: stats_list.extend(get_stats(h)) html = convert_table_to_html(convert_stats_to_table(stats_list)) stuff['html'] = html ## renormalize for bin widths bins = kwargs.pop('bins', None) if bins and isinstance(bins, list): for h in all_hists: renormalize_for_bin_widths(h, bins) ## stack background histograms if h_bkgs: assert len(h_bkgs) == len(n_bkgs), '%s\n%s' % (h_bkgs, n_bkgs) h_bkgs.reverse() n_bkgs.reverse() hstack = HistStack() for h in h_bkgs: hstack.Add(h) hstack.title = 'stack sum' hstack.drawstyle = 'HIST' stuff['stack'] = hstack h_bkgs.reverse() n_bkgs.reverse() # ## convert data to TGraphAsymmErrors # g_data = None # if h_data: # if __use_poissonize: # g_data = poissonize.GetPoissonizedGraph(h_data) # else: # g_data = ROOT.TGraphAsymmErrors() # i_g = 0 # nbins = h_data.GetNbinsX() # for i_bin in xrange(1, nbins+1): # skip underflow/overflow # c = h_data.GetBinContent(i_bin) # e = h_data.GetBinError(i_bin) # if c != 0.0: # g_data.SetPoint(i_g, h_data.GetBinCenter(i_bin), c) # g_ratio.SetPointError(i_g, # h_data.GetBinWidth(i_bin)/2., # h_data.GetBinWidth(i_bin)/2., # e, # e) # i_g += 1 ## build list of objects to draw objects = list() if h_bkgs: objects.append(stuff['stack']) objects.append(stuff['h_bkg_total']) if h_sigs: objects.extend(h_sigs) if h_data: objects.append(h_data) ## set xlimits and ylimits ypadding = 0.21 logy_crop_value = 7e-3 xmin, xmax, ymin, ymax = 0.0, 1.0, 0.0, 1.0 if objects: xmin, xmax, ymin, ymax = get_limits(objects, logx=logx, logy=logy, ypadding=ypadding, logy_crop_value=logy_crop_value) if logy: ymin = 7e-3 else: ymin = 0.0 xlimits = (xmin, xmax) ylimits = (ymin, ymax) stuff['xlimits'] = xlimits stuff['ylimits'] = ylimits ## remove xtitle for do_ratio _xtitle = xtitle if h_data and h_bkg_total and kwargs.get('do_ratio'): _xtitle = '' ## make canvas canvas = Canvas(800, 600) stuff['canvas'] = canvas ## draw the objects if objects: canvas.cd() draw(objects, pad=canvas, xtitle=_xtitle, ytitle=ytitle, xlimits=xlimits, ylimits=ylimits) ## set log x/y, for some reason doesn't work before draw if logx or logy: if logx: canvas.SetLogx() if logy: canvas.SetLogy() canvas.Update() ## draw blind_line if has_blinded_data: if isinstance(blind, tuple): blind_list = list(blind) else: blind_list = [blind] blind_lines = list() for bl in blind_list: line_y1 = ymin line_y2 = ymax blind_line = ROOT.TLine(bl, line_y1, bl, line_y2) blind_line.SetLineColor(ROOT.kGray + 2) blind_line.SetLineStyle(7) blind_line.SetLineWidth(2) blind_line.Draw() blind_lines.append(blind_line) stuff['blind_lines'] = blind_lines canvas.Update() ## legend lefty = True if h_bkg_total: lefty = is_left_sided(h_bkg_total) elif h_data: lefty = is_left_sided(h_data) elif h_sigs: lefty = is_left_sided(h_sigs[0]) if main_hists: header = '%.1f fb^{-1}, 13 TeV' % (lumi / 1000.0) if lefty: legend = Legend(main_hists, pad=canvas, header=header, textsize=16, topmargin=0.03, leftmargin=0.60, rightmargin=0.02, entrysep=0.01, entryheight=0.04) else: legend = Legend(main_hists, pad=canvas, header=header, textsize=16, topmargin=0.03, leftmargin=0.03, rightmargin=0.59, entrysep=0.01, entryheight=0.04) legend.Draw() stuff['legend'] = legend if h_sigs: # header = 'ATLAS Internal' header = '' if lefty: legend2 = Legend(h_sigs, pad=canvas, header=header, textsize=16, topmargin=0.03, leftmargin=0.37, rightmargin=0.23, entrysep=0.01, entryheight=0.04) else: legend2 = Legend(h_sigs, pad=canvas, header=header, textsize=16, topmargin=0.03, leftmargin=0.20, rightmargin=0.40, entrysep=0.01, entryheight=0.04) legend2.Draw() stuff['legend2'] = legend2 ## do_ratio if h_data and h_bkg_total and kwargs.get('do_ratio'): ## top canvas top_canvas = stuff.pop('canvas') stuff['top_canvas'] = top_canvas ## make SM/SM with error band: h_ratio_band i_sfratio = int(kwargs.get('sfratio', -1)) if i_sfratio < 0: # ratio plot of Data/Model h_ratio_band = h_bkg_total.Clone() nbins = h_ratio_band.GetNbinsX() for i_bin in xrange(nbins + 2): h_ratio_band.SetBinContent(i_bin, 1.0) c = h_bkg_total.GetBinContent(i_bin) e = h_bkg_total.GetBinError(i_bin) / c if c > 0.0 else 0.0 h_ratio_band.SetBinError(i_bin, e) stuff['h_ratio_band'] = h_ratio_band else: # ratio plot of Scale Factor for ith background hi = h_bkgs[i_sfratio] h_ratio_band = hi.Clone() nbins = h_ratio_band.GetNbinsX() for i_bin in xrange(nbins + 2): h_ratio_band.SetBinContent(i_bin, 1.0) c = hi.GetBinContent(i_bin) e = hi.GetBinError(i_bin) / c if c > 0.0 else 0.0 h_ratio_band.SetBinError(i_bin, e) stuff['h_ratio_band'] = h_ratio_band ## make data/(SM) h_ratio if i_sfratio < 0: h_ratio = h_data.Clone() h_ratio.Divide(h_data, h_bkg_total, 1.0, 1.0) stuff['h_ratio'] = h_ratio else: ## SF1 = 1.0 + (data - MCtot) / MC1 sfname = kwargs.get('sfname') sffile = kwargs.get('sffile') if not sfname: sfname = 'h_sf' hi = h_bkgs[i_sfratio] h_numer = h_data.Clone() h_numer.Add(h_bkg_total, -1.0) ## do the division h_ratio = h_data.Clone(sfname) h_ratio.Divide(h_numer, hi, 1.0, 1.0) ## add the 1.0 nbins = h_ratio.GetNbinsX() for i_bin in xrange(nbins + 2): c = h_ratio.GetBinContent(i_bin) h_ratio.SetBinContent(i_bin, c + 1.0) h_ratio_band.SetBinContent(i_bin, c + 1.0) ## ignore bins with no data for SF for i_bin in xrange(nbins + 2): c = h_data.GetBinContent(i_bin) if c <= 0: h_ratio.SetBinContent(i_bin, 0.0) h_ratio.SetBinError(i_bin, 0.0) h_ratio_band.SetBinError(i_bin, 0.0) stuff['h_ratio'] = h_ratio if sffile: f_out = ipyhep.file.write(h_ratio, sffile) # f_out.Close() ## convert ratio to a TGraphErrors so that Draw('E0') ## shows error bars for points off the pad g_ratio = ROOT.TGraphErrors() i_g = 0 for i_bin in xrange(1, nbins + 1): # skip underflow/overflow ratio_content = h_ratio.GetBinContent(i_bin) if ratio_content != 0.0: g_ratio.SetPoint(i_g, h_ratio.GetBinCenter(i_bin), ratio_content) g_ratio.SetPointError(i_g, h_ratio.GetBinWidth(i_bin) / 2., h_ratio.GetBinError(i_bin)) i_g += 1 else: h_ratio.SetBinError(i_bin, 0.0) stuff['g_ratio'] = g_ratio ## style ratio h_ratio_band.title = 'bkg uncert.' if i_sfratio < 0: h_ratio_band.linecolor = ipyhep.style.yellow else: h_ratio_band.linecolor = ipyhep.style.light_gray h_ratio_band.linewidth = 0 h_ratio_band.markerstyle = 0 if i_sfratio < 0: h_ratio_band.fillcolor = ipyhep.style.yellow else: h_ratio_band.linecolor = ipyhep.style.light_gray h_ratio_band.fillstyle = ipyhep.style.fill_solid h_ratio_band.drawstyle = 'E2' h_ratio_band.legendstyle = 'F' h_ratio.title = 'ratio' h_ratio.linecolor = ipyhep.style.black h_ratio.linewidth = 2 h_ratio.markercolor = ipyhep.style.black h_ratio.markerstyle = 20 h_ratio.markersize = 1.2 h_ratio.fillstyle = ipyhep.style.fill_hollow h_ratio.drawstyle = 'PE' h_ratio.legendstyle = 'LP' ## bottom canvas bottom_canvas = Canvas(800, 600) bottom_canvas.cd() stuff['bottom_canvas'] = bottom_canvas ## set ratio ylimits ratio_min = kwargs.get('ratio_min', -0.2) ratio_max = kwargs.get('ratio_max', 2.2) ratio_ylimits = (ratio_min, ratio_max) ## draw ratio band if i_sfratio < 0: _ytitle = 'Data / Model' else: hi = h_bkgs[i_sfratio] _ytitle = 'SF(%s)' % hi.title draw([h_ratio_band], pad=bottom_canvas, xtitle=xtitle, ytitle=_ytitle, xlimits=xlimits, ylimits=ratio_ylimits) ## set log x/y, for some reason doesn't work before draw? if logx: bottom_canvas.SetLogx() bottom_canvas.Update() ### make horiz lines in ratio plot every 0.5: line_ys = [ y / 10.0 for y in range(10 * int(round(ratio_min)), 10 * int(round(ratio_max)) + 5, 5) ] line_x1 = canvas.GetUxmin() line_x2 = canvas.GetUxmax() line_xwidth = abs(line_x2 - line_x1) lines = [] for line_y in line_ys: line = ROOT.TLine(line_x1 + 0.02 * line_xwidth, line_y, line_x2 - 0.02 * line_xwidth, line_y) line.SetLineWidth(1) line.SetLineStyle(7) if line_y == 1.0: line.SetLineColor(ROOT.kGray + 2) else: line.SetLineColor(ROOT.kGray + 0) line.Draw() lines.append(line) stuff['lines'] = lines ## draw blind_line if has_blinded_data: if isinstance(blind, tuple): blind_list = list(blind) else: blind_list = [blind] blind_lines = list() for bl in blind_list: line_y1 = ymin line_y2 = ymax blind_line = ROOT.TLine(bl, line_y1, bl, line_y2) blind_line.SetLineColor(ROOT.kGray + 2) blind_line.SetLineStyle(7) blind_line.SetLineWidth(2) blind_line.Draw() blind_lines.append(blind_line) stuff['blind_lines2'] = blind_lines canvas.Update() ## draw ratio g_ratio.Draw('PE0') # h_ratio.GetYaxis().SetRangeUser(ratio_min, ratio_max) # h_ratio.Draw('PE,SAME') ## shared canvas shared_canvas = Canvas(800, 800) shared_plot = plot_shared_axis(top_canvas, bottom_canvas, canvas=shared_canvas, split=0.35, axissep=0.01) stuff['canvas'] = shared_canvas canvas = shared_canvas ## save figures save = kwargs.get('save') if save is None: # NOTE: save can be False to skip saving save = ['pdf', 'png'] if save: ipyhep.file.save_figures(canvas, x, save) global results results = stuff return stuff
x1A = vstack((sigTestA, bkgTestA)) y1A = hstack((onesInt(len(sigTestA)), zerosInt(len(bkgTestA)))) y1A = transpose(y1A) from rootpy.interactive import wait from rootpy.plotting import Canvas, Hist, Hist2D, Hist3D, Legend from rootpy.io import root_open as ropen, DoesNotExist from rootpy.plotting import HistStack import ROOT ROOT.gROOT.SetBatch(True) f = ropen('output.root','recreate') c1 = Canvas() c1.cd() histDictSigA = {'W':[],'Z':[],'WW':[],'ZZ':[],'st':[],'ttbar':[],'WZ':[],'WH125':[]} histDictBkgA = {'W':[],'Z':[],'WW':[],'ZZ':[],'st':[],'ttbar':[],'WZ':[],'WH125':[]} coloursForStack = ['blue', 'green', 'red', 'yellow', 'black', 'pink', 'magenta', 'cyan'] colourDict = {'W':0,'Z':1,'WW':2,'ZZ':3,'st':4,'ttbar':5,'WZ':6,'WH125':7} lblcount = 0 sigTestA = transpose(sigTestA) bkgTestA = transpose(bkgTestA) sigtemp1B = cutTree(sig,False,len(sig)/2,'B') bkgtemp1B = cutTree(bkg,False,len(bkg)/2,'B')
def pvalue_plot(poi, pvalues, pad=None, xtitle='X', ytitle='P_{0}', linestyle=None, linecolor=None, yrange=None, verbose=False): """ Draw a pvalue plot Parameters ---------- poi : list List of POI values tested pvalues : list List of p-values or list of lists of p-values to overlay multiple p-value curves pad : Canvas or Pad, optional (default=None) Pad to draw onto. Create new pad if None. xtitle : str, optional (default='X') The x-axis label (POI name) ytitle : str, optional (default='P_{0}') The y-axis label linestyle : str or list, optional (default=None) Line style for the p-value graph or a list of linestyles for multiple p-value graphs. linecolor : str or list, optional (default=None) Line color for the p-value graph or a list of linestyles for multiple p-value graphs. Returns ------- pad : Canvas The pad. graphs : list of Graph The p-value graphs """ if not pvalues: raise ValueError("pvalues is empty") if not poi: raise ValueError("poi is empty") # determine if pvalues is list or list of lists if not isinstance(pvalues[0], (list, tuple)): pvalues = [pvalues] if linecolor is not None: if not isinstance(linecolor, list): linecolor = [linecolor] linecolor = cycle(linecolor) if linestyle is not None: if not isinstance(linestyle, list): linestyle = [linestyle] linestyle = cycle(linestyle) with preserve_current_canvas(): if pad is None: pad = Canvas() pad.cd() pad.SetLogy() # create the axis min_poi, max_poi = min(poi), max(poi) haxis = Hist(1000, min_poi, max_poi) xaxis = haxis.xaxis yaxis = haxis.yaxis xaxis.SetRangeUser(min_poi, max_poi) haxis.Draw('AXIS') min_pvalue = float('inf') graphs = [] for ipv, pv in enumerate(pvalues): graph = Graph(len(poi), linestyle='dashed', drawstyle='L', linewidth=2) for idx, (point, pvalue) in enumerate(zip(poi, pv)): graph.SetPoint(idx, point, pvalue) if linestyle is not None: graph.linestyle = linestyle.next() if linecolor is not None: graph.linecolor = linecolor.next() graphs.append(graph) curr_min_pvalue = min(pv) if curr_min_pvalue < min_pvalue: min_pvalue = curr_min_pvalue if verbose: for graph in graphs: log.info(['{0:1.1f}'.format(xval) for xval in list(graph.x())]) log.info(['{0:0.3f}'.format(yval) for yval in list(graph.y())]) # automatically handles axis limits axes, bounds = draw(graphs, pad=pad, same=True, logy=True, xtitle=xtitle, ytitle=ytitle, xaxis=xaxis, yaxis=yaxis, ypadding=(0.2, 0.1), logy_crop_value=1E-300) if yrange is not None: xaxis, yaxis = axes yaxis.SetLimits(*yrange) yaxis.SetRangeUser(*yrange) min_pvalue = yrange[0] # draw sigma levels up to minimum of pvalues line = Line() line.SetLineStyle(2) line.SetLineColor(2) latex = ROOT.TLatex() latex.SetNDC(False) latex.SetTextSize(20) latex.SetTextColor(2) sigma = 0 while True: pvalue = gaussian_cdf_c(sigma) if pvalue < min_pvalue: break keepalive( pad, latex.DrawLatex(max_poi, pvalue, " {0}#sigma".format(sigma))) keepalive(pad, line.DrawLine(min_poi, pvalue, max_poi, pvalue)) sigma += 1 pad.RedrawAxis() pad.Update() return pad, graphs
print "[VARIABLE][" + variable + "]" + str(round(time.time() - t0,0)) + "seconds" canvas = Canvas(width=canvaswidth,height=int((1-(1-doRatio)*0.2)*canvasheight)) canvas.SetFrameBorderMode(0) topmargins = (1.0 , 1.0 ) bottommargins = (0.0 , 0.4 ) leftmargins = (0.0 , 0.0 ) rightmargins = (0.0 , 0.0 ) top = topmargins[doRatio] bottom = bottommargins[doRatio] left = leftmargins[doRatio] right = 1 - rightmargins[doRatio] canvas.cd() histpad = Pad(left,bottom,right, top,color="white",bordersize =5) if not doRatio: histpad.SetBottomMargin(0.15) histpad.SetFrameBorderMode(0) histpad.Draw() histpad.SetLogy() histpad.cd() histpad.SetFrameBorderSize(2) histpad.SetFrameLineWidth(2); canvas.cd() #ratiopad = Pad(leftmargins[1],0.00,1 - rightmargins[1],bottommargins[1]-0.02)
stack.sum.Integral() except: print "stack has no integral!" continue if plotWithMPL: gs = mpl.gridspec.GridSpec(2,1,height_ratios=[4,1]) gs.update(wspace=0.00, hspace=0.00) axes = plt.subplot(gs[0]) axes_ratio = plt.subplot(gs[1], sharex=axes) plt.setp(axes.get_xticklabels(), visible=False) if plotWithROOT: c = Canvas(700,700) c.cd() pad1 = Pad( 0, 0.3, 1, 1.0) pad1.SetBottomMargin(0); # Upper and lower plot are joined pad1.SetGrid(); # Vertical grid pad1.Draw(); # Draw the upper pad: pad1 c.cd() pad2 = Pad( 0, 0.05, 1, 0.3); pad2.SetTopMargin(0); # Upper and lower plot are joined pad2.SetBottomMargin(0.3); # Upper and lower plot are joined pad2.SetGrid(); # Vertical grid pad2.Draw(); # Draw the upper pad: pad1 pad1.cd(); # pad1 becomes the current pad rootstack = ROOT.THStack(stack) rootstack.Draw('HIST') rootstack.GetYaxis().SetTitle("Entries");
def drawAll( m, c, eff = True ) : can = Canvas( width=600, height=3000 ) pad = 2; can.Divide( 1, 6 ) if ( eff ) : can.cd(pad) pad+=1 yePi = f.tofEff.Get( "eff_Pi_"+m+"_"+c ) yeK = f.tofEff.Get( "eff_K_"+m+"_"+c ) yeP = f.tofEff.Get( "eff_P_"+m+"_"+c ) yePi.Draw() yePi.SetTitle( "Fit Yields; P [GeV]; dN/(P dP)" ) yePi.GetYaxis().SetRangeUser( 0.0, 1.7 ) yeK.SetMarkerColor( R.kRed ) yeK.Draw( "same") yeP.SetMarkerColor( R.kBlue ) yeP.Draw("same") ############################################ # zb mu can.cd(pad) pad+=1 R.gPad.SetLogy(0) piMu = f.Pi_zbMu.Get( "mu_Pi_"+ m +"_" + c ) kMu = f.K_zbMu.Get( "mu_K_"+ m +"_" + c ) pMu = f.P_zbMu.Get( "mu_P_"+ m +"_" + c ) pidMu = f.Pi_zbMu.Get( "deltamu_Pi_"+ m +"_" + c ).Clone( "pisdmu" ) kdMu = f.K_zbMu.Get( "deltamu_K_"+ m +"_" + c ).Clone( "ksdmu" ) pdMu = f.P_zbMu.Get( "deltamu_P_"+ m +"_" + c ).Clone( "psdmu" ) piMu.SetTitle( "<zb_{fit}> - <zb_{exp}>; P [GeV]" ) R.gPad.SetLogy(0) piMu.GetYaxis().SetRangeUser( -0.2, 0.2) piMu.Draw() kMu.Draw("same") kMu.SetMarkerColor( R.kRed ) pMu.Draw("same") pMu.SetMarkerColor( R.kBlue ) pidMu.Scale(10); pidMu.SetMarkerStyle( R.kOpenCircle ) pidMu.Draw("same") kdMu.Scale(10); kdMu.SetMarkerColor( R.kRed ) kdMu.SetMarkerStyle( R.kOpenCircle ) kdMu.Draw("same") pdMu.Scale(10); pdMu.SetMarkerColor( R.kBlue ) pdMu.SetMarkerStyle( R.kOpenCircle ) pdMu.Draw("same") ####################################################### #zd mu try : can.cd(pad) pad+=1 R.gPad.SetLogy(0) piMu = f.Pi_zdMu.Get( "mu_Pi_"+ m +"_" + c ) kMu = f.K_zdMu.Get( "mu_K_"+ m +"_" + c ) pMu = f.P_zdMu.Get( "mu_P_"+ m +"_" + c ) pidMu = f.Pi_zbMu.Get( "deltamu_Pi_"+ m +"_" + c ).Clone( "zdpisdmu" ) kdMu = f.K_zbMu.Get( "deltamu_K_"+ m +"_" + c ).Clone( "zdksdmu" ) pdMu = f.P_zbMu.Get( "deltamu_P_"+ m +"_" + c ).Clone( "zdpsdmu" ) piMu.SetTitle( "<zd_{fit}> - <zd_{exp}>" ) piMu.Draw() zdSig = 0.08 piMu.GetYaxis().SetRangeUser( -zdSig * 5.0, zdSig * 5.0) kMu.Draw("same") kMu.SetMarkerColor( R.kRed ) pMu.Draw("same") pMu.SetMarkerColor( R.kBlue ) pidMu.Scale(10); pidMu.SetMarkerStyle( R.kOpenCircle ) pidMu.Draw("same") kdMu.Scale(10); kdMu.SetMarkerColor( R.kRed ) kdMu.SetMarkerStyle( R.kOpenCircle ) kdMu.Draw("same") pdMu.Scale(10); pdMu.SetMarkerColor( R.kBlue ) pdMu.SetMarkerStyle( R.kOpenCircle ) pdMu.Draw("same") except : print "no" ########################################################## #zbSig try : can.cd(pad) pad+=1 R.gPad.SetLogy(0) piMu = f.Pi_zbSigma.Get( "sigma_Pi_"+ m +"_" + c ) kMu = f.K_zbSigma.Get( "sigma_K_"+ m +"_" + c ) pMu = f.P_zbSigma.Get( "sigma_P_"+ m +"_" + c ) piMu.SetTitle( "#sigma zb_{fit}" ) piMu.Draw() piMu.GetYaxis().SetRangeUser( 0.000, 0.055) kMu.Draw("same") kMu.SetMarkerColor( R.kRed ) pMu.Draw("same") pMu.SetMarkerColor( R.kBlue ) R.gPad.SetGrid(1, 1) except : print "no" ########################################################## #zdSig can.cd(pad) try : pad+=1 R.gPad.SetLogy(0) piMu = f.Pi_zdSigma.Get( "sigma_Pi_"+ m +"_" + c ) kMu = f.K_zdSigma.Get( "sigma_K_"+ m +"_" + c ) pMu = f.P_zdSigma.Get( "sigma_P_"+ m +"_" + c ) piMu.SetTitle( "#sigma zd_{fit}" ) piMu.Draw() piMu.GetYaxis().SetRangeUser( 0.0, 0.09) kMu.Draw("same") kMu.SetMarkerColor( R.kRed ) pMu.Draw("same") pMu.SetMarkerColor( R.kBlue ) R.gPad.SetGrid(1,1) except : print "no" ########################################################## #Yield can.cd(1) yPi = f.Pi_yield.Get( "yield_Pi_"+m+"_"+c ) yK = f.K_yield.Get( "yield_K_"+m+"_"+c ) yP = f.P_yield.Get( "yield_P_"+m+"_"+c ) yPi.Draw() yPi.SetTitle( "Fit Yields; P [GeV]; dN/(P dP)" ) yPi.GetYaxis().SetRangeUser( 1e-6, yPi.GetMaximum() *10 ) yK.SetMarkerColor( R.kRed ) yK.Draw( "same") yP.SetMarkerColor( R.kBlue ) yP.Draw("same") R.gPad.SetLogy(1) return can
h_t3.SetLineColor(6) h_t4.SetLineColor(7) h_data.SetLineColor(1) h_data.SetMarkerColor(1) ymax = getMax( [h_data, h_t1, h_t2, h_t3] ) ymax = ymax*1.1 h_data.GetYaxis().SetRangeUser(0,ymax) h_t1.GetYaxis().SetRangeUser(0,ymax) h_t2.GetYaxis().SetRangeUser(0,ymax) h_t3.GetYaxis().SetRangeUser(0,ymax) h_t4.GetYaxis().SetRangeUser(0,ymax) c = Canvas() c.Divide(2) c.cd(1) h_data.Draw('PE') h_t1.Draw('SAME HIST') h_t2.Draw('SAME HIST') h_t3.Draw('SAME HIST') h_t4.Draw('SAME HIST') templates = { } if useT1: templates['t1'] = h_t1 if useT2: templates['t2'] = h_t2 if useT3: templates['t3'] = h_t3 if useT4: templates['t4'] = h_t4 fit_data = FitData( h_data, templates, fit_boundaries = ( 0, h_data.nbins() ) )
data3 = "normal+uniform", np.concatenate((data1[1], 10 * data2[1])) data4 = "normal+normal", np.concatenate((data1[1], np.random.normal(2.5, 0.1, 100000))) datas = (data0, data1, data2, data3, data4) recipes = "manual1", "sturges", "sturges-doane", "scott", "sqrt", \ "doane", "freedman-diaconis", "risk", "knuth" objs = [] canvas = Canvas() canvas.Divide(len(recipes), len(datas)) print '-' * 80 print '\t\t{:<20s}{:>10s} {:<6s}'.format('method', 'bins', 'time/s') print '-' * 80 for id, (dataname, d) in enumerate(datas): print dataname for ir, r in enumerate(recipes): canvas.cd(id * len(recipes) + ir + 1) timer = Timer() if r == "manual1": with timer: bins, h = FillHistogram(d, 50, np.min(d), np.max(d), drawstyle='hist') else: with timer: bins, h = FillHistogram(d, binning=r, drawstyle='hist') print '\t\t{:<20s}{:>10d} {:<6.2f}'.format(r, h.GetNbinsX(), timer.duration_in_seconds()) h.Draw() h.GetYaxis().SetRangeUser(0, h.GetMaximum() * 1.2) l = ROOT.TLatex(0.15, 0.8, "%s: %d" % (r, h.GetNbinsX())) l.SetNDC() l.SetTextSize(0.1) l.Draw() canvas.Update()
def createHists(sample, labelCodes, nameOfType, labelsForSample, weightsPerSample, foundVariables, allHistStack, allLegendStack, corrWeights, subset = 'TestA', createLog = False): """Create all of the histograms for each sample and save them to file. Keyword arguments: sample -- the sample from which the histograms are created labelCodes -- the codes for all of the labels (0 == W, for eg.) nameOfType -- signal or bkg (default bkg) labelsForSample -- the labels of all the entries in the sample weightsPerSample -- the XS weight foundVariables -- the variables in the order in which they were found allHistStack -- the histogram stack of all of the samples allLegendStack -- the legends for all samples and entries to go on the stack subset -- extra label for the output file (default A) createLog -- create a log for the output (default False) """ global histLimits #TODO: These should really be read in from a settings file #histDict = {'W':[],'Z':[],'WW':[],'ZZ':[],'st':[],'ttbar':[],'WZ':[],'WH125':[]} histDict = {'Wl':[],'Wcc':[],'Wc':[],'Wb':[],'Zb':[],'Z':[],'WW':[],'ZZ':[],'stop':[],'ttbar':[],'WZ':[],'WH125':[]} #coloursForStack = ['Green', 'Blue', 'Orange', 'Orange', 'Orange-2', 'Yellow', 'Pink', 'Red'] coloursForStack = [3, 4, 800, 800, 795, 5, 6, 2] #colourDict = {'W':0,'Z':1,'WW':2,'ZZ':3,'st':4,'ttbar':5,'WZ':6,'WH125':7} colourDict = {'Wl':0,'Wcc':0,'Wc':0,'Wb':0,'Zb':1,'Z':1,'WW':2,'ZZ':3,'stop':4,'ttbar':5,'WZ':6,'WH125':7} if nameOfType == 'signal': fillcol = 'blue' else: fillcol = 'red' hist = [] histidx = 0 log = open(nameOfType+subset+'.log','w') c1 = Canvas() c1.cd() log.write('########################### '+ nameOfType +' ###########################\n') for c in sample: variableName = foundVariables[histidx] #hist.append(Hist(50,int(histLimits[variableName][0]),int(histLimits[variableName][1]))) hist.append(Hist(50,int(histLimits[variableName][0]),int(histLimits[variableName][1]))) hist[histidx].fill_array(c) hist[histidx].scale(1.0/hist[histidx].integral()) hist[histidx].fillcolor=fillcol hist[histidx].linecolor=fillcol hist[histidx].GetXaxis().SetTitle(foundVariables[histidx]) hist[histidx].GetYaxis().SetTitle('# Events Normalised to 1') hist[histidx].SetTitle(nameOfType) hist[histidx].fillstyle='solid' hist[histidx].SetStats(0) lblcount = 0 for k in histDict.iterkeys(): #histDict[k].append(Hist(50,int(histLimits[variableName][0]),int(histLimits[variableName][1]))) histDict[k].append(Hist(50,int(histLimits[variableName][0]),int(histLimits[variableName][1]))) #histDict[k][histidx].fillcolor = coloursForStack[int(colourDict[k])] histDict[k][histidx].SetFillColor(int(coloursForStack[int(colourDict[k])])) histDict[k][histidx].fillstyle = 'solid' histDict[k][histidx].SetOption('hist') histDict[k][histidx].SetTitle(str(k))# + str(foundVariables[histidx])) histDict[k][histidx].SetStats(0) histDict[k][histidx].GetXaxis().SetTitle(foundVariables[histidx]) histDict[k][histidx].GetYaxis().SetTitle('# Events') for i in c: lbl = labelCodes[int(labelsForSample[lblcount])] if lbl in histDict.keys(): histDict[lbl][histidx].fill(i,corrWeights[histidx]) #histDict[lbl][histidx].scale(corrWeights[histidx]) lblcount += 1 histidx+=1 # create stacks and legends histStack = [] legendStack = [] if not allHistStack: initStack = True else: initStack = False for st in foundVariables: if initStack == True: allHistStack.append(HistStack(st,st)) allLegendStack.append(Legend(7)) histStack.append(HistStack(st,st)) legendStack.append(Legend(7)) rwcount_outer = 0 for rw in histDict.iterkeys(): log.write(rw + ' length: '+str(len(histDict[rw]))+'\n') for rwcount in xrange(0,len(histDict[rw])): if histDict[rw][rwcount].GetEntries() > 0: if rw in weightsPerSample: histDict[rw][rwcount].scale(weightsPerSample[rw]) histStack[rwcount].Add(histDict[rw][rwcount].Clone()) allHistStack[rwcount].Add(histDict[rw][rwcount].Clone()) histStack[rwcount].Draw() allHistStack[rwcount].Draw() histDict[rw][rwcount].draw('hist') histStack[rwcount].GetXaxis().SetTitle(histDict[rw][rwcount].GetXaxis().GetTitle()) histStack[rwcount].GetYaxis().SetTitle('# Events') allHistStack[rwcount].GetXaxis().SetTitle(histDict[rw][rwcount].GetXaxis().GetTitle()) allHistStack[rwcount].GetYaxis().SetTitle('# Events') legendStack[rwcount].AddEntry( histDict[rw][rwcount], 'F') allLegendStack[rwcount].AddEntry( histDict[rw][rwcount], 'F') #c1.SaveAs("histDict"+str(nameOfType)+str(subset)+str(rwcount)+".png") log.write(rw + '['+str(rwcount)+'] entries: ' + str(histDict[rw][rwcount].GetEntries())+'\n') rwcount_outer += 1 log.close() return hist,histDict,histStack,legendStack
h1.Sumw2() h1.SetLineColor(ROOT.kRed) h2 = Hist(100, -5, 5, name="h2", title="Histogram 2", linecolor='blue') h2.SetLineColor(ROOT.kBlue) for ievt in xrange(10000): #some test histograms: #1. let 2 histograms screwed #h1.Fill(rand.Gaus(0.5, 0.8)) #h2.Fill(rand.Gaus(0, 1)) #2. long tail and short tail h1.Fill(rand.Gaus(0, 0.8)) h2.Fill(rand.Gaus(0, 1)) pad = c.cd(1) h1.Draw('hist') h2.Draw('hist same') pad.SetTitle("") leg = Legend(2, pad=pad, leftmargin=0.5, topmargin=0.11, rightmargin=0.05) leg.SetFillColor(0) leg.AddEntry(h1, h1.GetTitle(), "l") leg.AddEntry(h2, h2.GetTitle(), "l") leg.Draw() pad = c.cd(2) gr = qqgraph(h1, h2)
else: datas = (data0, data1, data2, data3, data4) recipes = ( "manual1", "sturges", "sturges-doane", "scott", "sqrt", "doane", "freedman-diaconis", "risk", "knuth") objs = [] canvas = Canvas() canvas.Divide(len(recipes), len(datas), 1E-3, 1E-3) print '-' * 57 print '\t\t{0:<20s}{1:>10s} {2:<6s}'.format('method', 'bins', 'time [s]') print '-' * 57 for id, (dataname, d) in enumerate(datas): print dataname for ir, r in enumerate(recipes): canvas.cd(id * len(recipes) + ir + 1) timer = Timer() if r == "manual1": with timer: bins, h = histogram(d, 50, np.min(d), np.max(d), drawstyle='hist') else: with timer: bins, h = histogram(d, binning=r, drawstyle='hist') print '\t\t{0:<20s}{1:>10d} {2:<6.2f}'.format( r, h.GetNbinsX(), timer.duration_in_seconds()) h.Draw() h.GetYaxis().SetRangeUser(0, h.GetMaximum() * 1.2) l = ROOT.TLatex(0.15, 0.8, "{0}: {1:d}".format(r, h.GetNbinsX())) l.SetNDC() l.SetTextSize(0.1)
def Hist_comp_ratios (dec, bkg) : # weights = pickle.load( open( "weights_train.pck", "rb" ) ) # probas = pickle.load( open( "class_proba.pck", "rb" ) ) # mt_dec = pickle.load( open( "inputs_train.pck", "rb" ) ) # classes = pickle.load( open( "targets_train.pck", "rb" ) ) weights = pickle.load( open( "weights.pck", "rb" ) ) probas = pickle.load( open( "class_probaWholeSample.pck", "rb" ) ) mt_dec = pickle.load( open( "inputs.pck", "rb" ) ) classes = pickle.load( open( "classes.pck", "rb" ) ) #put mt_dec weights and the RELEVANT class probability together class_num = (GetClassIndex(bkg)-1) all_data = np.transpose(np.vstack((mt_dec[:,0],mt_dec[:,1], classes, weights, probas[:,class_num]))) all_data = np.array(filter(lambda x: x[2] == class_num, all_data)) #extract the relevant values for the specific decay channel Filter = np.array(filter(lambda x: x[1] == dec, all_data)) MT = Filter[:,0] Weight = Filter[:,3] Prob_bkg = Filter[:,4] SumProbXweight = np.multiply(Prob_bkg,Weight) h1 = TH1D("h1","SumProbXweight_"+bkg+str(dec), 25 , 0.0 ,250.) root_open("plots/ROOTfiles/H1_SumProbXweight_"+bkg+str(dec)+"Samptot.root", 'recreate') fill_hist(h1,MT, weights=SumProbXweight) h1.Write() h2 = TH1D("h2","SumWeight_"+bkg+str(dec), 25 , 0.0 ,250.) root_open("plots/ROOTfiles/H1_SumWeight_"+bkg+str(dec)+"Samptot.root", 'recreate') fill_hist(h2,MT,weights=Weight) h2.Write() h3 = h1.Clone("h3") h3.Divide(h2) if (dec == 0.0) : Filter2 = np.array(filter(lambda x: x[1] == 1.0, all_data)) MT2 = Filter2[:,0] Weight2 = Filter2[:,3] Prob_bkg2 = Filter2[:,4] SumProbXweight2 = np.multiply(Prob_bkg2,Weight2) h12 = TH1D("h12","SumProbXweight_"+bkg+str(1.0), 25 , 0.0 ,250.) root_open("plots/ROOTfiles/H1_SumProbXweight_"+bkg+str(1.0)+"Samptot.root", 'recreate') fill_hist(h12,MT2, weights=SumProbXweight2) h12.Write() h22 = TH1D("h22","SumWeight_"+bkg+str(1.0), 25 , 0.0 ,250.) root_open("plots/ROOTfiles/H1_SumWeight_"+bkg+str(1.0)+"Samptot.root", 'recreate') fill_hist(h22,MT2,weights=Weight2) h22.Write() h32 = h12.Clone("h32") h32.Divide(h22) h12.SetStats(0) h22.SetStats(0) h32.SetStats(0) h12.SetLineColor(2) h22.SetLineColor(2) h32.SetLineColor(0) h32.SetMarkerStyle(23) h32.SetMarkerColor(2) h32.SetMarkerSize(1.2) #create Canvas and save the plot as png c = Canvas() c.Divide(2,2) c.cd(1) h1.SetStats(0) if (dec == 0.0) : if (h1.GetMaximum() > h12.GetMaximum()) : print "h1" h12.GetYaxis().SetRangeUser(0.,h1.GetMaximum()) else : print "h12" h12.GetYaxis().SetRangeUser(0.,h12.GetMaximum()) h12.Draw("HIST") h1.Draw("HIST SAME") c.cd(2) h2.SetStats(0) if (dec == 0.0) : if (h2.GetMaximum() > h22.GetMaximum()) : h22.GetYaxis().SetRangeUser(0.,h2.GetMaximum()) else : h22.GetYaxis().SetRangeUser(0.,h22.GetMaximum()) h22.Draw("HIST") h2.Draw("HIST SAME") c.cd(3) f1 = root_open(GetClassProbaPath(bkg)) #get the 2 histograms for the 2 decay channels: 1 track & 3 tracks H1 = f1.Get("h_w_2d") if (dec == 10.0) : #3 tracks h_data = Hist(list(H1.xedges())) h_data[:] = H1[:,2] else : #1 track h_data = Hist(list(H1.xedges())) h_data[:] = H1[:,1] h_data.GetXaxis().SetRangeUser(0.,250.) h_data.GetYaxis().SetRangeUser(0.,1.) h_data.fillstyle = '/' h_data.fillcolor = (255,255,0) #yellow h_data.SetStats(0) h_data.Draw("HIST") # h3.SetFillColor(4) #blue # h3.SetFillStyle(3005) h3.SetLineColor(0) h3.SetMarkerStyle(21) h3.SetMarkerColor(4) h3.SetMarkerSize(1.2) h3.SetStats(0) h3.SetTitle(bkg+str(dec)) h3.GetXaxis().SetTitle("m_{T}") h3.GetYaxis().SetTitle("Class probability") h3.Draw("HIST P SAME") if (dec == 0.0) : h32.Draw("HIST P SAME") c.Update() if (dec == 0.0) : legend = Legend(3, leftmargin=0.45, margin=0.3) legend.AddEntry(h3, "training, no #pi^{0}", style='P') legend.AddEntry(h32, "training, with #pi^{0}", style='P') legend.AddEntry(h_data, "data", style='F') legend.Draw() else : legend = Legend(2, leftmargin=0.45, margin=0.3) legend.AddEntry(h3, "training", style='P') legend.AddEntry(h_data, "data", style='F') legend.Draw() c.SaveAs("plots/H1_"+bkg+str(dec)+"_RatioCompSamptot.png")
def makeComparisionPage( histodicts , fileNames, fileDescr, separateFiles): """ Prepares a canvas comparing multiple histograms: plotting all in one pad and their ratios in the second """ import rootpy from rootpy.plotting import Hist, Canvas, Legend import ROOT from ROOT import gPad log = logging.getLogger('pyroplot') cans = {} colors = [ROOT.kBlue, ROOT.kRed+1,ROOT.kViolet-1, ROOT.kOrange+7,ROOT.kGreen-7,ROOT.kOrange-6, ROOT.kPink-9,ROOT.kTeal-6,ROOT.kBlue+4,ROOT.kAzure+2] log.info( "Drawing histograms .." ) # prepare set of histograms to compare to the reference on (the first) # loop over the reference set of histos (sorted by key): for hidx, refname in enumerate(sorted(histodicts[0].keys())): # prepare canvas if separateFiles: log.debug( "Creating new canvas with index %d."%(hidx)) c=Canvas( 600, 270) cans[refname] = c c.Divide(3,1) c.cd(1) if not separateFiles and (hidx)%4 == 0: log.debug( "Creating new canvas with index %d."%(hidx/3)) # start a new canvas c=Canvas( 600, 800) cans[refname] = c c.Divide(3,4) # prepare histograms for drawing log.debug( "Drawing histogram #" + str(hidx+1) +" (" + refname + ") on canvas #" + str(len(cans)) ) hists = [] ratiohists = [] hiter = iter (histodicts) # special treatment for tprofile: prepare the reference projection for the ratio if histodicts[0][refname].__class__.__name__=="Profile": refProj = histodicts[0][refname].ProjectionX() refProj.SetName("reference_proj") for idx, h in enumerate(hiter): # make sure we have this histogram loaded: if not refname in h: continue # access the corresponding histogram of the other files at the same hidx as used for ref h[refname].color = colors[idx] h[refname].linestyle = idx hists.append (h[refname]) # prepare ratio is this is not the first (i.e. reference) histogram if idx: # special treatment for TProfile: if h[refname].__class__.__name__=="Profile": myratio = Hist(h[refname].nbins(), h[refname].lowerbound(), h[refname].upperbound()) #dummy hist myratioproj = h[refname].ProjectionX() myratioproj.SetName("cmp_hist_proj"+str(idx)) try: myratio.divide(myratioproj,refProj) except rootpy.ROOTError, e: log.error("Calculation of ratio for histogram %s caused ROOTError exception ('%s')"%(h[refname].GetName(),e.msg)) break myratio.color = colors[idx] else: myratio = h[refname].clone() # make sure that the ratio has the right type try: myratio.Divide(h[refname], histodicts[0][refname]) # divide by reference hist except rootpy.ROOTError, e: log.error("Calculation of ratio for histogram %s caused ROOTError exception ('%s')"%(h[refname].GetName(),e.msg)) break myratio.yaxis.SetTitle("(h_{cmp} - h_{ref})/h_{ref}") myratio.SetTitle("ratio to reference") myratio.SetMaximum(2) myratio.SetMinimum(0) myratio.SetStats(0) ratiohists.append(myratio)
def createHistsData(sample, foundVariables, allHistStack, allLegendStack, subset = 'TestA', createLog = False): """Create histograms for data. Keyword arguments: sample -- the input data sample foundVariables -- the variables in the sample, in order allHistStack -- the histogram stack for all other samples subset -- an extra identifier for the filename (default TestA) createLog -- create a log (default False) """ histDict = {'data':[]} coloursForStack = ['blue', 'green', 'red', 'yellow', 'black', 'pink', 'magenta', 'cyan'] global histLimits fillcol = 'black' hist = [] histidx = 0 log = open('data'+str(subset)+'.log','w') c1 = Canvas() c1.cd() log.write('########################### DATA ###########################\n') for c in sample: variableName = foundVariables[histidx] #hist.append(Hist(50,int(histLimits[variableName][0]),int(histLimits[variableName][1]))) hist.append(Hist(50,int(histLimits[variableName][0]),int(histLimits[variableName][1]))) hist[histidx].fill_array(c) hist[histidx].scale(1.0/hist[histidx].integral()) hist[histidx].fillcolor=fillcol hist[histidx].linecolor=fillcol hist[histidx].GetXaxis().SetTitle(foundVariables[histidx]) hist[histidx].GetYaxis().SetTitle('# Events Normalised to 1') hist[histidx].SetTitle('data') hist[histidx].fillstyle='solid' hist[histidx].SetStats(0) #histDict['data'].append(Hist(50,int(histLimits[variableName][0]),int(histLimits[variableName][1]))) histDict['data'].append(Hist(50,int(histLimits[variableName][0]),int(histLimits[variableName][1]))) histDict['data'][histidx].fillcolor=fillcol histDict['data'][histidx].linecolor=fillcol histDict['data'][histidx].SetOption('hist') histDict['data'][histidx].SetTitle('data') histDict['data'][histidx].GetXaxis().SetTitle(foundVariables[histidx]) histDict['data'][histidx].GetYaxis().SetTitle("# Events") histDict['data'][histidx].SetStats(0) for i in c: histDict['data'][histidx].fill(i) histidx+=1 # create stacks and legends histStack = [] legendStack = [] for st in foundVariables: histStack.append(HistStack(st,st)) legendStack.append(Legend(7)) for rw in histDict.keys(): log.write(rw + ' length: '+str(len(histDict[rw]))+'\n') for rwcount in xrange(0,len(histDict[rw])): if histDict[rw][rwcount].GetEntries() > 0: histStack[rwcount].Add(histDict[rw][rwcount].Clone()) histStack[rwcount].Draw() histStack[rwcount].GetXaxis().SetTitle(histDict[rw][rwcount].GetXaxis().GetTitle()) histStack[rwcount].GetYaxis().SetTitle('# Events') histDict[rw][rwcount].draw('hist') legendStack[rwcount].AddEntry( histDict[rw][rwcount], 'F') allLegendStack[rwcount].AddEntry(histDict[rw][rwcount], 'F') #c1.SaveAs("histDictData"+str(subset)+str(rwcount)+".png") log.write(rw + '['+str(rwcount)+'] entries: ' + str(histDict[rw][rwcount].GetEntries())+'\n') log.close() return hist,histDict,histStack,legendStack
heff.markersize = 0.2 heff.SetMaximum(6) heff.SetMinimum(0) heff.SetStats(False) hists[i].append(heff) ## Plots nvars = len(variables) ncuts = len(mva_cuts) # ROOT canvas = Canvas(800, 400, 500, 10) canvas.Divide(2, 1) for i, var in enumerate(variables): for j in (0, 1): # sig, bkg canvas.cd(j+1) for k, cut in enumerate(mva_cuts): opts = 'e1' if k == 0 else 'e1 same' hists[j][i*ncuts + k].Draw(opts) canvas.Modified() canvas.Update() if doprint: for typ in ['png']: #, 'pdf']: canvas.Print('{}_bkg_sig_eff_{}.{}'.format(var, n, typ)) del tmp, canvas # clean up # Matplotlib import matplotlib.pyplot as plt plt.rc('font', family='Liberation Sans') # choose font plt.rc('mathtext', default='regular') # use default font for math
def makeComparisionPage(histodicts, fileNames, fileDescr, separateFiles): """ Prepares a canvas comparing multiple histograms: plotting all in one pad and their ratios in the second """ import rootpy from rootpy.plotting import Hist, Canvas, Legend import ROOT from ROOT import gPad log = logging.getLogger('pyroplot') cans = {} colors = [ ROOT.kBlue, ROOT.kRed + 1, ROOT.kViolet - 1, ROOT.kOrange + 7, ROOT.kGreen - 7, ROOT.kOrange - 6, ROOT.kPink - 9, ROOT.kTeal - 6, ROOT.kBlue + 4, ROOT.kAzure + 2 ] log.info("Drawing histograms ..") # prepare set of histograms to compare to the reference on (the first) # loop over the reference set of histos (sorted by key): for hidx, refname in enumerate(sorted(histodicts[0].keys())): # prepare canvas if separateFiles: log.debug("Creating new canvas with index %d." % (hidx)) c = Canvas(600, 270) cans[refname] = c c.Divide(3, 1) c.cd(1) if not separateFiles and (hidx) % 4 == 0: log.debug("Creating new canvas with index %d." % (hidx / 3)) # start a new canvas c = Canvas(600, 800) cans[refname] = c c.Divide(3, 4) # prepare histograms for drawing log.debug("Drawing histogram #" + str(hidx + 1) + " (" + refname + ") on canvas #" + str(len(cans))) hists = [] ratiohists = [] hiter = iter(histodicts) # special treatment for tprofile: prepare the reference projection for the ratio if histodicts[0][refname].__class__.__name__ == "Profile": refProj = histodicts[0][refname].ProjectionX() refProj.SetName("reference_proj") for idx, h in enumerate(hiter): # make sure we have this histogram loaded: if not refname in h: continue # access the corresponding histogram of the other files at the same hidx as used for ref h[refname].color = colors[idx] h[refname].linestyle = idx hists.append(h[refname]) # prepare ratio is this is not the first (i.e. reference) histogram if idx: # special treatment for TProfile: if h[refname].__class__.__name__ == "Profile": myratio = Hist(h[refname].nbins(), h[refname].lowerbound(), h[refname].upperbound()) #dummy hist myratioproj = h[refname].ProjectionX() myratioproj.SetName("cmp_hist_proj" + str(idx)) try: myratio.divide(myratioproj, refProj) except rootpy.ROOTError, e: log.error( "Calculation of ratio for histogram %s caused ROOTError exception ('%s')" % (h[refname].GetName(), e.msg)) break myratio.color = colors[idx] else: myratio = h[refname].clone( ) # make sure that the ratio has the right type try: myratio.Divide( h[refname], histodicts[0][refname]) # divide by reference hist except rootpy.ROOTError, e: log.error( "Calculation of ratio for histogram %s caused ROOTError exception ('%s')" % (h[refname].GetName(), e.msg)) break myratio.yaxis.SetTitle("(h_{cmp} - h_{ref})/h_{ref}") myratio.SetTitle("ratio to reference") myratio.SetMaximum(2) myratio.SetMinimum(0) myratio.SetStats(0) ratiohists.append(myratio)
title="Histogram 1", linecolor='red', legendstyle='l') h2 = Hist(100, -5, 5, name="h2", title="Histogram 2", linecolor='blue', legendstyle='l') for ievt in range(10000): h1.Fill(rand.Gaus(0, 0.8)) h2.Fill(rand.Gaus(0, 1)) pad = c.cd(1) h1.Draw('hist') h2.Draw('hist same') leg = Legend([h1, h2], pad=pad, leftmargin=0.5, topmargin=0.11, rightmargin=0.05, textsize=20) leg.Draw() pad = c.cd(2) gr = qqgraph(h1, h2)
i, modules_hist[j].GetBinContent(i) / args.bw) modules_hist[j].SetBinError(i, modules_hist[j].GetBinError(i) / args.bw) print ' - drawing ... ' y_max = 0 for i in xrange(25): if modules_hist[i].GetMaximum() > y_max: y_max = modules_hist[i].GetMaximum() for i in xrange(25): modules_hist[i].SetMaximum(y_max * 1.1) canvas_trigger = Canvas(1000, 800, name="canvas_trigger", title="rate of event trigger") canvas_trigger.ToggleEventStatus() canvas_trigger.cd() trigger_hist.Draw('EH') canvas_modules = Canvas(1500, 1000, name="canvas_modules", title="rate of 25 modules") canvas_modules.ToggleEventStatus() canvas_modules.Divide(5, 5) for i in xrange(25): canvas_modules.cd(5 * (i % 5) + i / 5 + 1) modules_hist[i].Draw('EH') sci_trigger_r_obj.close_file() wait(True)
class Plotter(object): def __init__(self, channel, year, plot_dir, base_dir, post_fix, selection_data, selection_mc, selection_tight, pandas_selection, lumi, model, transformation, features, process_signals, plot_signals, blinded, datacards=[], mini_signals=False, do_ratio=True, mc_subtraction=True, dir_suffix='', relaxed_mc_scaling=1., data_driven=True): self.channel = channel.split('_')[0] self.year = year self.full_channel = channel self.plt_dir = '/'.join( [plot_dir, channel, '_'.join([dir_suffix, get_time_str()])]) self.base_dir = base_dir self.post_fix = post_fix self.selection_data = ' & '.join(selection_data) self.selection_mc = ' & '.join(selection_mc) self.selection_tight = selection_tight self.pandas_selection = pandas_selection self.lumi = lumi self.model = model self.transformation = transformation self.features = features self.process_signals = process_signals self.plot_signals = plot_signals if self.process_signals else [] self.blinded = blinded self.selection_lnt = 'not (%s)' % self.selection_tight self.do_ratio = do_ratio self.mini_signals = mini_signals self.datacards = datacards self.mc_subtraction = mc_subtraction self.relaxed_mc_scaling = relaxed_mc_scaling self.data_driven = data_driven if self.year == 2018: from plotter.samples.samples_2018 import get_data_samples, get_mc_samples, get_signal_samples if self.year == 2017: from plotter.samples.samples_2017 import get_data_samples, get_mc_samples, get_signal_samples if self.year == 2016: from plotter.samples.samples_2016 import get_data_samples, get_mc_samples, get_signal_samples self.get_data_samples = get_data_samples self.get_mc_samples = get_mc_samples self.get_signal_samples = get_signal_samples def total_weight_calculator(self, df, weight_list, scalar_weights=[]): total_weight = df[weight_list[0]].to_numpy().astype(np.float) for iw in weight_list[1:]: total_weight *= df[iw].to_numpy().astype(np.float) for iw in scalar_weights: total_weight *= iw return total_weight def create_canvas(self, ratio=True): if ratio: self.canvas = Canvas(width=700, height=700) self.canvas.Draw() self.canvas.cd() self.main_pad = Pad(0., 0.25, 1., 1.) self.main_pad.Draw() self.canvas.cd() self.ratio_pad = Pad(0., 0., 1., 0.25) self.ratio_pad.Draw() self.main_pad.SetTicks(True) self.main_pad.SetBottomMargin(0.) self.main_pad.SetLeftMargin(0.15) self.main_pad.SetRightMargin(0.15) self.ratio_pad.SetLeftMargin(0.15) self.ratio_pad.SetRightMargin(0.15) self.ratio_pad.SetTopMargin(0.) self.ratio_pad.SetGridy() self.ratio_pad.SetBottomMargin(0.3) else: self.canvas = Canvas(width=700, height=700) self.canvas.Draw() self.canvas.cd() self.main_pad = Pad(0., 0., 1., 1.) self.main_pad.Draw() self.canvas.cd() self.ratio_pad = Pad(-1., -1., -.9, -.9) self.ratio_pad.Draw() # put it outside the canvas self.main_pad.SetTicks(True) self.main_pad.SetTopMargin(0.15) self.main_pad.SetBottomMargin(0.15) self.main_pad.SetLeftMargin(0.15) self.main_pad.SetRightMargin(0.15) def create_datacards(self, data, bkgs, signals, label, protect_empty_bins=['nonprompt']): ''' FIXME! For now this is specific to the data-driven case ''' # save a ROOT file with histograms, aka datacard datacard_dir = '/'.join([self.plt_dir, 'datacards']) makedirs(datacard_dir, exist_ok=True) outfile = ROOT.TFile.Open( '/'.join([datacard_dir, 'datacard_%s.root' % label]), 'recreate') outfile.cd() # data in tight data.name = 'data_obs' data.Write() # reads off a dictionary for bkg_name, bkg in bkgs.items(): bkg.name = bkg_name.split('#')[0] bkg.drawstyle = 'HIST E' bkg.color = 'black' bkg.linewidth = 2 # manual protection against empty bins, that would make combine crash if bkg_name in protect_empty_bins: for ibin in bkg.bins_range(): if bkg.GetBinContent(ibin) <= 0.: bkg.SetBinContent(ibin, 1e-2) bkg.SetBinError(ibin, np.sqrt(1e-2)) bkg.Write() # signals for isig in signals: isig.name = isig.name.split('#')[0] isig.drawstyle = 'HIST E' isig.color = 'black' isig.Write() # print out the txt datacard with open( '/'.join([ datacard_dir, 'datacard_%s_%s.txt' % (label, isig.name) ]), 'w') as card: card.write(''' imax 1 number of bins jmax * number of processes minus 1 kmax * number of nuisance parameters -------------------------------------------------------------------------------------------------------------------------------------------- shapes * {cat} datacard_{cat}.root $PROCESS $PROCESS_$SYSTEMATIC -------------------------------------------------------------------------------------------------------------------------------------------- bin {cat} observation {obs:d} -------------------------------------------------------------------------------------------------------------------------------------------- bin {cat} {cat} {cat} process {signal_name} nonprompt prompt process 0 1 2 rate {signal:.4f} {nonprompt:.4f} {prompt:.4f} -------------------------------------------------------------------------------------------------------------------------------------------- lumi lnN 1.025 - - norm_prompt_{ch}_{y}_{cat} lnN - - 1.15 norm_nonprompt_{ch}_{y}_{cat} lnN - 1.20 - norm_sig_{ch}_{y}_{cat} lnN 1.2 - - -------------------------------------------------------------------------------------------------------------------------------------------- {cat} autoMCStats 0 0 1 '''.format( cat=label, obs=int(data.integral()) if self.blinded == False else -1, signal_name=isig.name, signal=isig.integral(), ch=self.full_channel, y=self.year, prompt=bkgs['prompt'].integral(), nonprompt=bkgs['nonprompt'].integral(), )) outfile.Close() def plot(self): evaluator = Evaluator(self.model, self.transformation, self.features) makedirs(self.plt_dir, exist_ok=True) makedirs('/'.join([self.plt_dir, 'lin']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'log']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'lin', 'png']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'lin', 'root']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'log', 'png']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'log', 'root']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'lnt_region', 'lin']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'lnt_region', 'log']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'lnt_region', 'lin', 'png']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'lnt_region', 'lin', 'root']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'lnt_region', 'log', 'png']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'lnt_region', 'log', 'root']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'shapes', 'lin']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'shapes', 'log']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'shapes', 'lin', 'png']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'shapes', 'lin', 'root']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'shapes', 'log', 'png']), exist_ok=True) makedirs('/'.join([self.plt_dir, 'shapes', 'log', 'root']), exist_ok=True) # NN evaluator print('============> starting reading the trees') print('Plots will be stored in: ', self.plt_dir) now = time() signal = [] if self.process_signals: signal = self.get_signal_samples(self.channel, self.base_dir, self.post_fix, self.selection_data, mini=self.mini_signals) else: signal = [] data = self.get_data_samples(self.channel, self.base_dir, self.post_fix, self.selection_data) mc = self.get_mc_samples(self.channel, self.base_dir, self.post_fix, self.selection_mc) print('============> it took %.2f seconds' % (time() - now)) # evaluate FR for isample in (mc + data): #+signal): isample.df['fr'] = evaluator.evaluate(isample.df) # already corrected, ready to be applied in lnt-not-tight isample.df['fr_corr'] = isample.df['fr'] / (1. - isample.df['fr']) # apply an extra selection to the pandas dataframes if len(self.pandas_selection): for isample in (mc + data + signal): isample.df = isample.df.query(self.pandas_selection) # split the dataframe in tight and lnt-not-tight (called simply lnt for short) print('============> splitting dataframe in tight and loose not tight') for isample in (mc + data + signal): isample.df_tight = isample.df.query(self.selection_tight) if isample not in signal: isample.df_lnt = isample.df.query(self.selection_lnt) # free some mem del isample.df gc.collect() print('============> ... done') # sort depending on their position in the stack mc.sort(key=lambda x: x.position_in_stack) # now we plot self.create_canvas(self.do_ratio) for ivar in variables: variable, bins, label, xlabel, ylabel, extra_sel = ivar.var, ivar.bins, ivar.label, ivar.xlabel, ivar.ylabel, ivar.extra_selection print('plotting', label) ###################################################################################### # plot MC stacks, in tight and lnt ###################################################################################### stack_prompt = [] stack_nonprompt = [] stack_nonprompt_control = [] for imc in mc: if extra_sel: mc_df_tight = imc.df_tight.query(extra_sel) mc_df_lnt = imc.df_lnt.query(extra_sel) else: mc_df_tight = imc.df_tight mc_df_lnt = imc.df_lnt histo_tight = Hist(bins, title=imc.label, markersize=0, legendstyle='F', name=imc.datacard_name + '#' + label + '#t') weights = self.total_weight_calculator( mc_df_tight, ['weight'] + imc.extra_signal_weights, [self.lumi, imc.lumi_scaling]) histo_tight.fill_array(mc_df_tight[variable], weights=weights * self.relaxed_mc_scaling) histo_tight.fillstyle = 'solid' histo_tight.fillcolor = 'steelblue' if self.data_driven else imc.colour histo_tight.linewidth = 0 stack_prompt.append(histo_tight) # optionally remove the MC subtraction in loose-not-tight # may help if MC stats is terrible (and it often is) if self.data_driven: if self.mc_subtraction: histo_lnt = Hist(bins, title=imc.label, markersize=0, legendstyle='F', name=imc.datacard_name + '#' + label + '#lnt') weights = self.total_weight_calculator( mc_df_lnt, ['weight', 'fr_corr'] + imc.extra_signal_weights, [-1., self.lumi, imc.lumi_scaling]) histo_lnt.fill_array(mc_df_lnt[variable], weights=weights * self.relaxed_mc_scaling) histo_lnt.fillstyle = 'solid' histo_lnt.fillcolor = 'skyblue' if self.data_driven else imc.colour histo_lnt.linewidth = 0 stack_nonprompt.append(histo_lnt) histo_lnt_control = Hist(bins, title=imc.label, markersize=0, legendstyle='F', name=imc.datacard_name + '#' + label + '#lntcontrol') weights_control = self.total_weight_calculator( mc_df_lnt, ['weight'] + imc.extra_signal_weights, [self.lumi, imc.lumi_scaling]) histo_lnt_control.fill_array(mc_df_lnt[variable], weights=weights_control * self.relaxed_mc_scaling) histo_lnt_control.fillstyle = 'solid' histo_lnt_control.fillcolor = imc.colour histo_lnt_control.linewidth = 0 # print(histo_lnt_control) # print(histo_lnt_control.fillcolor) # print(imc.name, imc.colour) # print(histo_lnt_control.integral()) stack_nonprompt_control.append(histo_lnt_control) # merge different samples together (add the histograms) # prepare two temporary containers for the post-grouping histograms stack_prompt_tmp = [] stack_nonprompt_tmp = [] stack_nonprompt_control_tmp = [] for ini, fin in [(stack_prompt, stack_prompt_tmp), (stack_nonprompt, stack_nonprompt_tmp), (stack_nonprompt_control, stack_nonprompt_control_tmp)]: for k, v in groups.items(): grouped = [] for ihist in ini: if ihist.name.split('#')[0] in v: grouped.append(ihist) elif ihist.name.split('#')[0] not in togroup: fin.append(ihist) if len(grouped): group = sum(grouped) group.title = k group.name = '#'.join([k] + ihist.name.split('#')[1:]) group.fillstyle = grouped[0].fillstyle group.fillcolor = grouped[0].fillcolor group.linewidth = grouped[0].linewidth fin.append(group) stack_prompt = stack_prompt_tmp stack_nonprompt = stack_nonprompt_tmp stack_nonprompt_control = stack_nonprompt_control_tmp ###################################################################################### # plot the signals ###################################################################################### all_signals = [] signals_to_plot = [] for isig in signal: if variable not in self.datacards: if not isig.toplot: continue if variable == 'fr' or variable == 'fr_corr': continue if extra_sel: isig_df_tight = isig.df_tight.query(extra_sel) else: isig_df_tight = isig.df_tight histo_tight = Hist( bins, title=isig.label, markersize=0, legendstyle='L', name=isig.datacard_name + '#' + label ) # the "#" thing is a trick to give hists unique name, else ROOT complains weights = self.total_weight_calculator( isig_df_tight, ['weight'] + isig.extra_signal_weights, [self.lumi, isig.lumi_scaling]) histo_tight.fill_array(isig_df_tight[variable], weights=weights) histo_tight.color = isig.colour histo_tight.fillstyle = 'hollow' histo_tight.linewidth = 2 histo_tight.linestyle = 'dashed' histo_tight.drawstyle = 'HIST' all_signals.append(histo_tight) if isig.toplot: signals_to_plot.append(histo_tight) ###################################################################################### # plot the data ###################################################################################### data_prompt = [] data_nonprompt = [] data_nonprompt_control = [] for idata in data: if extra_sel: idata_df_tight = idata.df_tight.query(extra_sel) idata_df_lnt = idata.df_lnt.query(extra_sel) else: idata_df_tight = idata.df_tight idata_df_lnt = idata.df_lnt histo_tight = Hist(bins, title=idata.label, markersize=1, legendstyle='LEP') histo_tight.fill_array(idata_df_tight[variable]) data_prompt.append(histo_tight) if self.data_driven: histo_lnt = Hist(bins, title=idata.label, markersize=0, legendstyle='F') histo_lnt.fill_array(idata_df_lnt[variable], weights=idata_df_lnt.fr_corr) histo_lnt.fillstyle = 'solid' histo_lnt.fillcolor = 'skyblue' histo_lnt.linewidth = 0 histo_lnt_control = Hist(bins, title=idata.label, markersize=1, legendstyle='LEP') histo_lnt_control.fill_array(idata_df_lnt[variable]) data_nonprompt.append(histo_lnt) data_nonprompt_control.append(histo_lnt_control) if self.data_driven: # put the prompt backgrounds together all_exp_prompt = sum(stack_prompt) all_exp_prompt.title = 'prompt' # put the nonprompt backgrounds together all_exp_nonprompt = sum(stack_nonprompt + data_nonprompt) all_exp_nonprompt.fillstyle = 'solid' all_exp_nonprompt.fillcolor = 'skyblue' all_exp_nonprompt.linewidth = 0 all_exp_nonprompt.title = 'nonprompt' # create the stacks stack = HistStack([all_exp_prompt, all_exp_nonprompt], drawstyle='HIST', title='') stack_control = HistStack(stack_nonprompt_control, drawstyle='HIST', title='') else: stack = HistStack(stack_prompt, drawstyle='HIST', title='') # stat uncertainty hist_error = stack.sum #sum([all_exp_prompt, all_exp_nonprompt]) hist_error.drawstyle = 'E2' hist_error.fillstyle = '/' hist_error.color = 'gray' hist_error.title = 'stat. unc.' hist_error.legendstyle = 'F' if self.data_driven: hist_error_control = stack_control.sum hist_error_control.drawstyle = 'E2' hist_error_control.fillstyle = '/' hist_error_control.color = 'gray' hist_error_control.title = 'stat. unc.' hist_error_control.legendstyle = 'F' # put the data together all_obs_prompt = sum(data_prompt) all_obs_prompt.title = 'observed' if self.data_driven: all_obs_nonprompt_control = sum(data_nonprompt_control) all_obs_nonprompt_control.title = 'observed' all_obs_nonprompt_control.drawstyle = 'EP' # prepare the legend print(signals_to_plot) for jj in signals_to_plot: print(jj.name, jj.integral()) if len(signals_to_plot): legend = Legend([all_obs_prompt, stack, hist_error], pad=self.main_pad, leftmargin=0., rightmargin=0., topmargin=0., textfont=42, textsize=0.025, entrysep=0.01, entryheight=0.04) legend_signals = Legend(signals_to_plot, pad=self.main_pad, leftmargin=0., rightmargin=0., topmargin=0., textfont=42, textsize=0.025, entrysep=0.01, entryheight=0.04) legend_signals.SetBorderSize(0) legend_signals.x1 = 0.42 legend_signals.y1 = 0.74 legend_signals.x2 = 0.88 legend_signals.y2 = 0.90 legend_signals.SetFillColor(0) legend.SetBorderSize(0) legend.x1 = 0.2 legend.y1 = 0.74 legend.x2 = 0.45 legend.y2 = 0.90 legend.SetFillColor(0) else: legend = Legend([all_obs_prompt, stack, hist_error], pad=self.main_pad, leftmargin=0., rightmargin=0., topmargin=0., textfont=42, textsize=0.03, entrysep=0.01, entryheight=0.04) legend.SetBorderSize(0) legend.x1 = 0.55 legend.y1 = 0.74 legend.x2 = 0.88 legend.y2 = 0.90 legend.SetFillColor(0) # plot with ROOT, linear and log scale for islogy in [False, True]: things_to_plot = [stack, hist_error] if not self.blinded: things_to_plot.append(all_obs_prompt) # plot signals, as an option if self.plot_signals: things_to_plot += signals_to_plot # set the y axis range # FIXME! setting it by hand to each object as it doesn't work if passed to draw if islogy: yaxis_max = 40. * max( [ithing.max() for ithing in things_to_plot]) else: yaxis_max = 1.65 * max( [ithing.max() for ithing in things_to_plot]) if islogy: yaxis_min = 0.01 else: yaxis_min = 0. for ithing in things_to_plot: ithing.SetMaximum(yaxis_max) draw(things_to_plot, xtitle=xlabel, ytitle=ylabel, pad=self.main_pad, logy=islogy) # expectation uncertainty in the ratio pad ratio_exp_error = Hist(bins) ratio_data = Hist(bins) for ibin in hist_error.bins_range(): ratio_exp_error.set_bin_content(ibin, 1.) ratio_exp_error.set_bin_error( ibin, hist_error.get_bin_error(ibin) / hist_error.get_bin_content(ibin) if hist_error.get_bin_content(ibin) != 0. else 0.) ratio_data.set_bin_content( ibin, all_obs_prompt.get_bin_content(ibin) / hist_error.get_bin_content(ibin) if hist_error.get_bin_content(ibin) != 0. else 0.) ratio_data.set_bin_error( ibin, all_obs_prompt.get_bin_error(ibin) / hist_error.get_bin_content(ibin) if hist_error.get_bin_content(ibin) != 0. else 0.) ratio_data.drawstyle = 'EP' ratio_data.title = '' ratio_exp_error.drawstyle = 'E2' ratio_exp_error.markersize = 0 ratio_exp_error.title = '' ratio_exp_error.fillstyle = '/' ratio_exp_error.color = 'gray' for ithing in [ratio_data, ratio_exp_error]: ithing.xaxis.set_label_size( ithing.xaxis.get_label_size() * 3. ) # the scale should match that of the main/ratio pad size ratio ithing.yaxis.set_label_size( ithing.yaxis.get_label_size() * 3. ) # the scale should match that of the main/ratio pad size ratio ithing.xaxis.set_title_size( ithing.xaxis.get_title_size() * 3. ) # the scale should match that of the main/ratio pad size ratio ithing.yaxis.set_title_size( ithing.yaxis.get_title_size() * 3. ) # the scale should match that of the main/ratio pad size ratio ithing.yaxis.set_ndivisions(405) ithing.yaxis.set_title_offset(0.4) things_to_plot = [ratio_exp_error] if not self.blinded: things_to_plot.append(ratio_data) draw(things_to_plot, xtitle=xlabel, ytitle='obs/exp', pad=self.ratio_pad, logy=False, ylimits=(0.5, 1.5)) line = ROOT.TLine(min(bins), 1., max(bins), 1.) line.SetLineColor(ROOT.kBlack) line.SetLineWidth(1) self.ratio_pad.cd() line.Draw('same') # chi2_score_text = '\chi^{2}/NDF = %.1f' %(all_obs_prompt.Chi2Test(hist_error, 'UW CHI2/NDF')) chi2_score_text = 'p-value = %.2f' % (all_obs_prompt.Chi2Test( hist_error, 'UW')) chi2_score = ROOT.TLatex(0.7, 0.81, chi2_score_text) chi2_score.SetTextFont(43) chi2_score.SetTextSize(15) chi2_score.SetNDC() chi2_score.Draw('same') self.canvas.cd() # FIXME! add SS and OS channels if self.full_channel == 'mmm': channel = '\mu\mu\mu' elif self.full_channel == 'eee': channel = 'eee' elif self.full_channel == 'mem_os': channel = '\mu^{\pm}\mu^{\mp}e' elif self.full_channel == 'mem_ss': channel = '\mu^{\pm}\mu^{\pm}e' elif self.full_channel == 'eem_os': channel = 'e^{\pm}e^{\mp}\mu' elif self.full_channel == 'eem_ss': channel = 'e^{\pm}e^{\pm}\mu' else: assert False, 'ERROR: Channel not valid.' finalstate = ROOT.TLatex(0.68, 0.68, channel) finalstate.SetTextFont(43) finalstate.SetTextSize(25) finalstate.SetNDC() finalstate.Draw('same') self.canvas.cd() # remove old legend for iprim in self.canvas.primitives: if isinstance(iprim, Legend): self.canvas.primitives.remove(iprim) legend.Draw('same') if self.plot_signals: legend_signals.Draw('same') CMS_lumi(self.main_pad, 4, 0, lumi_13TeV="%d, L = %.1f fb^{-1}" % (self.year, self.lumi / 1000.)) self.canvas.Modified() self.canvas.Update() for iformat in ['pdf', 'png', 'root']: self.canvas.SaveAs('/'.join([ self.plt_dir, 'log' if islogy else 'lin', iformat if iformat != 'pdf' else '', '%s%s.%s' % (label, '_log' if islogy else '_lin', iformat) ])) # plot distributions in loose not tight # check MC contamination there if self.data_driven and variable not in ['fr', 'fr_corr']: things_to_plot = [ stack_control, hist_error_control, all_obs_nonprompt_control ] # set the y axis range # FIXME! setting it by hand to each object as it doesn't work if passed to draw if islogy: yaxis_max = 40. * max( [ithing.max() for ithing in things_to_plot]) else: yaxis_max = 1.65 * max( [ithing.max() for ithing in things_to_plot]) if islogy: yaxis_min = 0.01 else: yaxis_min = 0. for ithing in things_to_plot: ithing.SetMaximum(yaxis_max) ithing.SetMinimum(yaxis_min) draw(things_to_plot, xtitle=xlabel, ytitle=ylabel, pad=self.main_pad, logy=islogy, ylimits=(yaxis_min, yaxis_max)) new_legend = Legend( stack_control.hists + [hist_error_control, all_obs_nonprompt_control], pad=self.main_pad, leftmargin=0., rightmargin=0., topmargin=0., textfont=42, textsize=0.03, entrysep=0.01, entryheight=0.04) new_legend.SetBorderSize(0) new_legend.x1 = 0.55 new_legend.y1 = 0.71 new_legend.x2 = 0.88 new_legend.y2 = 0.90 new_legend.SetFillColor(0) # divide MC to subtract by data stack_nonprompt_control_scaled_list = [] for ihist in stack_control.hists: new_hist = copy(ihist) for ibin in new_hist.bins_range(): new_hist.SetBinContent( ibin, np.nan_to_num( np.divide( new_hist.GetBinContent(ibin), all_obs_nonprompt_control. GetBinContent(ibin)))) new_hist.SetBinError( ibin, np.nan_to_num( np.divide( new_hist.GetBinError(ibin), all_obs_nonprompt_control. GetBinContent(ibin)))) stack_nonprompt_control_scaled_list.append(new_hist) stack_control_scaled = HistStack( stack_nonprompt_control_scaled_list, drawstyle='HIST', title='') stack_control_scaled_err = stack_control_scaled.sum stack_control_scaled_err.drawstyle = 'E2' stack_control_scaled_err.fillstyle = '/' stack_control_scaled_err.color = 'gray' stack_control_scaled_err.title = 'stat. unc.' stack_control_scaled_err.legendstyle = 'F' draw([stack_control_scaled, stack_control_scaled_err], xtitle=xlabel, ytitle='MC/data', pad=self.ratio_pad, logy=False) stack_control_scaled.xaxis.set_label_size( stack_control_scaled.xaxis.get_label_size() * 3. ) # the scale should match that of the main/ratio pad size ratio stack_control_scaled.yaxis.set_label_size( stack_control_scaled.yaxis.get_label_size() * 3. ) # the scale should match that of the main/ratio pad size ratio stack_control_scaled.xaxis.set_title_size( stack_control_scaled.xaxis.get_title_size() * 3. ) # the scale should match that of the main/ratio pad size ratio stack_control_scaled.yaxis.set_title_size( stack_control_scaled.yaxis.get_title_size() * 3. ) # the scale should match that of the main/ratio pad size ratio stack_control_scaled.yaxis.set_ndivisions(405) stack_control_scaled.yaxis.set_title_offset(0.4) stack_control_scaled.SetMinimum(0.) stack_control_scaled.SetMaximum(1.5) CMS_lumi(self.main_pad, 4, 0, lumi_13TeV="%d, L = %.1f fb^{-1}" % (self.year, self.lumi / 1000.)) self.canvas.cd() # remove old legend for iprim in self.canvas.primitives: if isinstance(iprim, Legend): self.canvas.primitives.remove(iprim) # draw new legend new_legend.Draw('same') self.canvas.Modified() self.canvas.Update() for iformat in ['pdf', 'png', 'root']: self.canvas.SaveAs('/'.join([ self.plt_dir, 'lnt_region', 'log' if islogy else 'lin', iformat if iformat != 'pdf' else '', '%s%s.%s' % (label, '_log' if islogy else '_lin', iformat) ])) # compare shapes in tight and loose not tight # data in tight all_obs_prompt_norm = copy(all_obs_prompt) if all_obs_prompt_norm.integral() != 0: all_obs_prompt_norm.Scale( np.nan_to_num( np.divide(1., all_obs_prompt_norm.integral()))) #import pdb; pdb.set_trace() all_obs_prompt_norm.drawstyle = 'hist e' all_obs_prompt_norm.linecolor = 'black' all_obs_prompt_norm.markersize = 0 all_obs_prompt_norm.legendstyle = 'LE' all_obs_prompt_norm.title = '' all_obs_prompt_norm.label = 'data - tight' # data MC subtracted in loose all_obs_prompt_mc_sub_norm = copy(all_obs_prompt) all_obs_prompt_mc_sub_norm.add(all_exp_prompt, -1) all_obs_prompt_mc_sub_norm.Scale( np.nan_to_num( np.divide(1., all_obs_prompt_mc_sub_norm.integral()))) all_obs_prompt_mc_sub_norm.drawstyle = 'hist e' all_obs_prompt_mc_sub_norm.linecolor = 'green' all_obs_prompt_mc_sub_norm.markersize = 0 all_obs_prompt_mc_sub_norm.legendstyle = 'LE' all_obs_prompt_mc_sub_norm.title = '' all_obs_prompt_mc_sub_norm.label = '(data-MC) - tight' # data in loose all_obs_nonprompt_control_norm = copy( all_obs_nonprompt_control) all_obs_nonprompt_control_norm.Scale( np.nan_to_num( np.divide( 1., all_obs_nonprompt_control_norm.integral()))) all_obs_nonprompt_control_norm.drawstyle = 'hist e' all_obs_nonprompt_control_norm.linecolor = 'red' all_obs_nonprompt_control_norm.markersize = 0 all_obs_nonprompt_control_norm.legendstyle = 'LE' all_obs_nonprompt_control_norm.title = '' all_obs_nonprompt_control_norm.label = 'data - l-n-t' # data MC subtracted in loose all_obs_nonprompt_control_mc_sub_norm = copy( all_obs_nonprompt_control) all_obs_nonprompt_control_mc_sub_norm.add( stack_control.sum, -1) all_obs_nonprompt_control_mc_sub_norm.Scale( np.nan_to_num( np.divide( 1., all_obs_nonprompt_control_mc_sub_norm.integral( )))) all_obs_nonprompt_control_mc_sub_norm.drawstyle = 'hist e' all_obs_nonprompt_control_mc_sub_norm.linecolor = 'blue' all_obs_nonprompt_control_mc_sub_norm.markersize = 0 all_obs_nonprompt_control_mc_sub_norm.legendstyle = 'LE' all_obs_nonprompt_control_mc_sub_norm.title = '' all_obs_nonprompt_control_mc_sub_norm.label = '(data-MC) - l-n-t' things_to_plot = [ all_obs_prompt_norm, all_obs_prompt_mc_sub_norm, all_obs_nonprompt_control_norm, all_obs_nonprompt_control_mc_sub_norm, ] yaxis_max = max([ii.GetMaximum() for ii in things_to_plot]) draw(things_to_plot, xtitle=xlabel, ytitle=ylabel, pad=self.main_pad, logy=islogy, ylimits=(yaxis_min, 1.55 * yaxis_max)) self.canvas.cd() # remove old legend for iprim in self.canvas.primitives: if isinstance(iprim, Legend): self.canvas.primitives.remove(iprim) shape_legend = Legend([], pad=self.main_pad, leftmargin=0., rightmargin=0., topmargin=0., textfont=42, textsize=0.03, entrysep=0.01, entryheight=0.04) shape_legend.AddEntry(all_obs_prompt_norm, all_obs_prompt_norm.label, all_obs_prompt_norm.legendstyle) shape_legend.AddEntry( all_obs_prompt_mc_sub_norm, all_obs_prompt_mc_sub_norm.label, all_obs_prompt_mc_sub_norm.legendstyle) shape_legend.AddEntry( all_obs_nonprompt_control_norm, all_obs_nonprompt_control_norm.label, all_obs_nonprompt_control_norm.legendstyle) shape_legend.AddEntry( all_obs_nonprompt_control_mc_sub_norm, all_obs_nonprompt_control_mc_sub_norm.label, all_obs_nonprompt_control_mc_sub_norm.legendstyle) shape_legend.SetBorderSize(0) shape_legend.x1 = 0.50 shape_legend.y1 = 0.71 shape_legend.x2 = 0.88 shape_legend.y2 = 0.90 shape_legend.SetFillColor(0) shape_legend.Draw('same') # plot ratios all_obs_prompt_norm_ratio = copy(all_obs_prompt_norm) all_obs_prompt_mc_sub_norm_ratio = copy( all_obs_prompt_mc_sub_norm) all_obs_nonprompt_control_norm_ratio = copy( all_obs_nonprompt_control_norm) all_obs_nonprompt_control_mc_sub_norm_ratio = copy( all_obs_nonprompt_control_mc_sub_norm) all_obs_prompt_norm_ratio.Divide( all_obs_prompt_mc_sub_norm_ratio) all_obs_nonprompt_control_norm_ratio.Divide( all_obs_prompt_mc_sub_norm_ratio) all_obs_nonprompt_control_mc_sub_norm_ratio.Divide( all_obs_prompt_mc_sub_norm_ratio) things_to_plot_ratio = [ all_obs_prompt_norm_ratio, all_obs_nonprompt_control_norm_ratio, all_obs_nonprompt_control_mc_sub_norm_ratio, ] for ithing in things_to_plot_ratio: ithing.xaxis.set_label_size( ithing.xaxis.get_label_size() * 3. ) # the scale should match that of the main/ratio pad size ratio ithing.yaxis.set_label_size( ithing.yaxis.get_label_size() * 3. ) # the scale should match that of the main/ratio pad size ratio ithing.xaxis.set_title_size( ithing.xaxis.get_title_size() * 3. ) # the scale should match that of the main/ratio pad size ratio ithing.yaxis.set_title_size( ithing.yaxis.get_title_size() * 3. ) # the scale should match that of the main/ratio pad size ratio ithing.yaxis.set_ndivisions(405) ithing.yaxis.set_title_offset(0.4) ithing.SetMinimum(0.) ithing.SetMaximum(2.) draw(things_to_plot_ratio, xtitle=xlabel, ytitle='1/(data-MC)_{tight}', pad=self.ratio_pad, logy=False, ylimits=(0., 2.)) self.ratio_pad.cd() line.Draw('same') CMS_lumi(self.main_pad, 4, 0, lumi_13TeV="%d, L = %.1f fb^{-1}" % (self.year, self.lumi / 1000.)) self.canvas.Modified() self.canvas.Update() for iformat in ['pdf', 'png', 'root']: self.canvas.SaveAs('/'.join([ self.plt_dir, 'shapes', 'log' if islogy else 'lin', iformat if iformat != 'pdf' else '', '%s%s.%s' % (label, '_log' if islogy else '_lin', iformat) ])) # save only the datacards you want, don't flood everything if len(self.datacards) and label not in self.datacards: continue # FIXME! allow it to save datacards even for non data driven bkgs if self.data_driven: self.create_datacards(data=all_obs_prompt, bkgs={ 'prompt': all_exp_prompt, 'nonprompt': all_exp_nonprompt }, signals=all_signals, label=label)
h_t3.SetLineColor(6) h_t4.SetLineColor(7) h_data.SetLineColor(1) h_data.SetMarkerColor(1) ymax = getMax([h_data, h_t1, h_t2, h_t3]) ymax = ymax * 1.1 h_data.GetYaxis().SetRangeUser(0, ymax) h_t1.GetYaxis().SetRangeUser(0, ymax) h_t2.GetYaxis().SetRangeUser(0, ymax) h_t3.GetYaxis().SetRangeUser(0, ymax) h_t4.GetYaxis().SetRangeUser(0, ymax) c = Canvas() c.Divide(2) c.cd(1) h_data.Draw('PE') h_t1.Draw('SAME HIST') h_t2.Draw('SAME HIST') h_t3.Draw('SAME HIST') h_t4.Draw('SAME HIST') templates = {} if useT1: templates['t1'] = h_t1 if useT2: templates['t2'] = h_t2 if useT3: templates['t3'] = h_t3 if useT4: templates['t4'] = h_t4 fit_data = FitData(h_data, templates, fit_boundaries=(0, h_data.nbins())) fit_collection = FitDataCollection()
legend.AddEntry(zz, 'ZZ', 'lf') legend.AddEntry(fakes, 'Non-prompt', "lf") whtt_file = io.open('../vhtt_shapes.root') def get_total(histo): return whtt_file.Get('mmt_mumu_final_MuTauMass/%s' % histo) + \ whtt_file.Get('emt_emu_final_SubleadingMass/%s' % histo) hWZ = get_total('WZ') hZZ = get_total('ZZ') hZJets = get_total('fakes') hData = get_total('data_obs') hSignal = get_total('VH120') + get_total('VH120WW') hHWW = hSignal canvas.cd() hHWW = hHWW*5.0 hZZ.decorate(zz) hWZ.decorate(wz) hZJets.decorate(fakes) hData.decorate(data) hHWW.decorate(signal) hHWW.SetLineWidth(2) for hist in [hZZ, hZJets, hWZ]: hist.format = 'hist' for hist in [hZZ, hZJets, hData]: pass
try: stack.sum.Integral() except: print "stack has no integral!" continue if plotWithMPL: gs = mpl.gridspec.GridSpec(2, 1, height_ratios=[4, 1]) gs.update(wspace=0.00, hspace=0.00) axes = plt.subplot(gs[0]) axes_ratio = plt.subplot(gs[1], sharex=axes) plt.setp(axes.get_xticklabels(), visible=False) if plotWithROOT: c = Canvas(700, 700) c.cd() pad1 = Pad(0, 0.3, 1, 1.0) pad1.SetBottomMargin(0) # Upper and lower plot are joined pad1.SetGrid() # Vertical grid pad1.Draw() # Draw the upper pad: pad1 c.cd() pad2 = Pad(0, 0.05, 1, 0.3) pad2.SetTopMargin(0) # Upper and lower plot are joined pad2.SetBottomMargin(0.3) # Upper and lower plot are joined pad2.SetGrid() # Vertical grid
def pvalue_plot(poi, pvalues, pad=None, xtitle='X', ytitle='P_{0}', linestyle=None, linecolor=None, yrange=None, verbose=False): """ Draw a pvalue plot Parameters ---------- poi : list List of POI values tested pvalues : list List of p-values or list of lists of p-values to overlay multiple p-value curves pad : Canvas or Pad, optional (default=None) Pad to draw onto. Create new pad if None. xtitle : str, optional (default='X') The x-axis label (POI name) ytitle : str, optional (default='P_{0}') The y-axis label linestyle : str or list, optional (default=None) Line style for the p-value graph or a list of linestyles for multiple p-value graphs. linecolor : str or list, optional (default=None) Line color for the p-value graph or a list of linestyles for multiple p-value graphs. Returns ------- pad : Canvas The pad. graphs : list of Graph The p-value graphs """ if not pvalues: raise ValueError("pvalues is empty") if not poi: raise ValueError("poi is empty") # determine if pvalues is list or list of lists if not isinstance(pvalues[0], (list, tuple)): pvalues = [pvalues] if linecolor is not None: if not isinstance(linecolor, list): linecolor = [linecolor] linecolor = cycle(linecolor) if linestyle is not None: if not isinstance(linestyle, list): linestyle = [linestyle] linestyle = cycle(linestyle) with preserve_current_canvas(): if pad is None: pad = Canvas() pad.cd() pad.SetLogy() # create the axis min_poi, max_poi = min(poi), max(poi) haxis = Hist(1000, min_poi, max_poi) xaxis = haxis.xaxis yaxis = haxis.yaxis xaxis.SetRangeUser(min_poi, max_poi) haxis.Draw('AXIS') min_pvalue = float('inf') graphs = [] for ipv, pv in enumerate(pvalues): graph = Graph(len(poi), linestyle='dashed', drawstyle='L', linewidth=2) for idx, (point, pvalue) in enumerate(zip(poi, pv)): graph.SetPoint(idx, point, pvalue) if linestyle is not None: graph.linestyle = linestyle.next() if linecolor is not None: graph.linecolor = linecolor.next() graphs.append(graph) curr_min_pvalue = min(pv) if curr_min_pvalue < min_pvalue: min_pvalue = curr_min_pvalue if verbose: for graph in graphs: log.info(['{0:1.1f}'.format(xval) for xval in list(graph.x())]) log.info(['{0:0.3f}'.format(yval) for yval in list(graph.y())]) # automatically handles axis limits axes, bounds = draw(graphs, pad=pad, same=True, logy=True, xtitle=xtitle, ytitle=ytitle, xaxis=xaxis, yaxis=yaxis, ypadding=(0.2, 0.1), logy_crop_value=1E-300) if yrange is not None: xaxis, yaxis = axes yaxis.SetLimits(*yrange) yaxis.SetRangeUser(*yrange) min_pvalue = yrange[0] # draw sigma levels up to minimum of pvalues line = Line() line.SetLineStyle(2) line.SetLineColor(2) latex = ROOT.TLatex() latex.SetNDC(False) latex.SetTextSize(20) latex.SetTextColor(2) sigma = 0 while True: pvalue = gaussian_cdf_c(sigma) if pvalue < min_pvalue: break keepalive(pad, latex.DrawLatex(max_poi, pvalue, " {0}#sigma".format(sigma))) keepalive(pad, line.DrawLine(min_poi, pvalue, max_poi, pvalue)) sigma += 1 pad.RedrawAxis() pad.Update() return pad, graphs
def ln_likelihood_full_matching(self, w_value, alpha, zeta, beta, gamma, delta, kappa, eta, lamb, g1_value, extraction_efficiency, gas_gain_value, gas_gain_width, spe_res, s1_acc_par0, s1_acc_par1, s2_acc_par0, s2_acc_par1, scale_par, d_gpu_local_info, draw_fit=False): # ----------------------------------------------- # ----------------------------------------------- # determine prior likelihood and variables # ----------------------------------------------- # ----------------------------------------------- prior_ln_likelihood = 0 matching_ln_likelihood = 0 # get w-value prior lieklihood prior_ln_likelihood += self.get_ln_prior_w_value(w_value) # priors of detector variables prior_ln_likelihood += self.get_ln_prior_g1(g1_value) prior_ln_likelihood += self.get_ln_prior_extraction_efficiency( extraction_efficiency) prior_ln_likelihood += self.get_ln_prior_gas_gain_value(gas_gain_value) prior_ln_likelihood += self.get_ln_prior_gas_gain_width(gas_gain_width) prior_ln_likelihood += self.get_ln_prior_spe_res(spe_res) prior_ln_likelihood += self.get_ln_prior_s1_acc_pars( s1_acc_par0, s1_acc_par1) prior_ln_likelihood += self.get_ln_prior_s2_acc_pars( s2_acc_par0, s2_acc_par1) # get priors from lindhard parameters prior_ln_likelihood += self.get_ln_prior_par_greater_than_zero(alpha) prior_ln_likelihood += self.get_ln_prior_par_greater_than_zero(beta) prior_ln_likelihood += self.get_ln_prior_par_greater_than_zero(gamma) prior_ln_likelihood += self.get_ln_prior_par_greater_than_zero(kappa) prior_ln_likelihood += self.get_ln_prior_par_greater_than_zero(eta) prior_ln_likelihood += self.get_ln_prior_par_greater_than_zero(lamb) prior_ln_likelihood += self.get_ln_prior_zeta(zeta) prior_ln_likelihood += self.get_ln_prior_delta(delta) # if prior is -inf then don't bother with MC #print 'removed prior infinity catch temporarily' if not np.isfinite(prior_ln_likelihood) and not draw_fit: return -np.inf # ----------------------------------------------- # ----------------------------------------------- # run MC # ----------------------------------------------- # ----------------------------------------------- num_trials = np.asarray(self.num_mc_events, dtype=np.int32) num_repetitions = np.asarray(d_gpu_local_info['num_repetitions'], dtype=np.int32) mean_field = np.asarray(self.d_data_parameters['mean_field'], dtype=np.float32) w_value = np.asarray(w_value, dtype=np.float32) alpha = np.asarray(alpha, dtype=np.float32) zeta = np.asarray(zeta, dtype=np.float32) beta = np.asarray(beta, dtype=np.float32) gamma = np.asarray(gamma, dtype=np.float32) delta = np.asarray(delta, dtype=np.float32) kappa = np.asarray(kappa, dtype=np.float32) eta = np.asarray(eta, dtype=np.float32) lamb = np.asarray(lamb, dtype=np.float32) g1_value = np.asarray(g1_value, dtype=np.float32) extraction_efficiency = np.asarray(extraction_efficiency, dtype=np.float32) gas_gain_value = np.asarray(gas_gain_value, dtype=np.float32) gas_gain_width = np.asarray(gas_gain_width, dtype=np.float32) spe_res = np.asarray(spe_res, dtype=np.float32) s1_acc_par0 = np.asarray(s1_acc_par0, dtype=np.float32) s1_acc_par1 = np.asarray(s1_acc_par1, dtype=np.float32) s2_acc_par0 = np.asarray(s2_acc_par0, dtype=np.float32) s2_acc_par1 = np.asarray(s2_acc_par1, dtype=np.float32) # for histogram binning num_bins_s1 = np.asarray(self.s1_settings[0], dtype=np.int32) num_bins_s2 = np.asarray(self.s2_settings[0], dtype=np.int32) a_hist_2d = np.zeros(self.s1_settings[0] * self.s2_settings[0], dtype=np.int32) #print d_gpu_local_info['d_gpu_energy'][degree_setting] l_gpu_args = (d_gpu_local_info['rng_states'], drv.In(num_trials), drv.In(num_repetitions), drv.In(mean_field), d_gpu_local_info['gpu_energies'], drv.In(w_value), drv.In(alpha), drv.In(zeta), drv.In(beta), drv.In(gamma), drv.In(delta), drv.In(kappa), drv.In(eta), drv.In(lamb), drv.In(g1_value), drv.In(extraction_efficiency), drv.In(gas_gain_value), drv.In(gas_gain_width), drv.In(spe_res), drv.In(s1_acc_par0), drv.In(s1_acc_par1), drv.In(s2_acc_par0), drv.In(s2_acc_par1), drv.In(num_bins_s1), d_gpu_local_info['gpu_bin_edges_s1'], drv.In(num_bins_s2), d_gpu_local_info['gpu_bin_edges_s2'], drv.InOut(a_hist_2d)) #print '\n\n\nBefore call...' #print d_gpu_local_info d_gpu_local_info['function_to_call'](*l_gpu_args, **self.d_gpu_scale) #print 'After call...\n\n\n' a_s1_s2_mc = np.reshape(a_hist_2d, (self.s2_settings[0], self.s1_settings[0])).T #print list(a_s1_s2_mc) sum_mc = np.sum(a_s1_s2_mc, dtype=np.float32) if sum_mc == 0: #print 'sum mc == 0' return -np.inf # this forces our scale to be close to 1 (difference comes from acceptance) a_s1_s2_mc = np.multiply( a_s1_s2_mc, float(scale_par) * len(self.a_data_s1) / float(self.num_mc_events * self.num_repetitions)) #'ml' if draw_fit: f, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True) ax1.set_xlabel('S1 [PE]') ax1.set_ylabel('log(S2/S1)') ax2.set_xlabel('S1 [PE]') ax2.set_ylabel('log(S2/S1)') s1_s2_data_plot = np.rot90(self.d_coincidence_data_information[ self.l_cathode_settings_in_use[0]][degree_setting] ['a_log_s2_s1']) s1_s2_data_plot = np.flipud(s1_s2_data_plot) ax1.pcolormesh(self.a_s1_bin_edges, self.a_log_bin_edges, s1_s2_data_plot) s1_s2_mc_plot = np.rot90(a_s1_s2_mc) s1_s2_mc_plot = np.flipud(s1_s2_mc_plot) #print self.l_s1_settings #print self.l_log_settings #print self.d_coincidence_data_information[self.l_cathode_settings_in_use[0]][self.l_degree_settings_in_use[0]]['a_log_s2_s1'].shape #print a_s1_s2_mc.shape #print s1_s2_data_plot.shape #print s1_s2_mc_plot.shape ax2.pcolormesh(self.a_s1_bin_edges, self.a_log_bin_edges, s1_s2_mc_plot) #plt.colorbar() c1 = Canvas(1400, 400) c1.Divide(2) h_s1_data = Hist(*self.l_s1_settings, name='hS1_draw_data') root_numpy.array2hist( self.d_coincidence_data_information[ self.l_cathode_settings_in_use[0]][degree_setting] ['a_log_s2_s1'].sum(axis=1), h_s1_data) hS1MC = Hist(*self.l_s1_settings, name='hS1_draw_mc') root_numpy.array2hist(a_s1_s2_mc.sum(axis=1), hS1MC) s1_scale_factor = h_s1_data.Integral() / float(hS1MC.Integral()) g_s1_data = neriX_analysis.convert_hist_to_graph_with_poisson_errors( h_s1_data) g_s1_mc = neriX_analysis.convert_hist_to_graph_with_poisson_errors( hS1MC, scale=s1_scale_factor) g_s1_mc.SetFillColor(root.kBlue) g_s1_mc.SetMarkerColor(root.kBlue) g_s1_mc.SetLineColor(root.kBlue) g_s1_mc.SetFillStyle(3005) g_s1_data.SetTitle('S1 Comparison') g_s1_data.GetXaxis().SetTitle('S1 [PE]') g_s1_data.GetYaxis().SetTitle('Counts') g_s1_data.SetLineColor(root.kRed) g_s1_data.SetMarkerSize(0) g_s1_data.GetXaxis().SetRangeUser(self.l_s1_settings[1], self.l_s1_settings[2]) g_s1_data.GetYaxis().SetRangeUser( 0, 1.2 * max(h_s1_data.GetMaximum(), hS1MC.GetMaximum())) c1.cd(1) g_s1_data.Draw('ap') g_s1_mc.Draw('same') g_s1_mc_band = g_s1_mc.Clone() g_s1_mc_band.Draw('3 same') h_s2_data = Hist(*self.l_log_settings, name='h_s2_draw_data') root_numpy.array2hist( self.d_coincidence_data_information[ self.l_cathode_settings_in_use[0]][degree_setting] ['a_log_s2_s1'].sum(axis=0), h_s2_data) h_s2_mc = Hist(*self.l_log_settings, name='h_s2_draw_mc') root_numpy.array2hist(a_s1_s2_mc.sum(axis=0), h_s2_mc) s2_scale_factor = h_s2_data.Integral() / float(h_s2_mc.Integral()) g_s2_data = neriX_analysis.convert_hist_to_graph_with_poisson_errors( h_s2_data) g_s2_mc = neriX_analysis.convert_hist_to_graph_with_poisson_errors( h_s2_mc, scale=s2_scale_factor) g_s2_mc.SetFillColor(root.kBlue) g_s2_mc.SetMarkerColor(root.kBlue) g_s2_mc.SetLineColor(root.kBlue) g_s2_mc.SetFillStyle(3005) g_s2_data.SetTitle('Log(S2/S1) Comparison') g_s2_data.GetXaxis().SetTitle('Log(S2/S1)') g_s2_data.GetYaxis().SetTitle('Counts') g_s2_data.SetLineColor(root.kRed) g_s2_data.SetMarkerSize(0) g_s2_data.GetXaxis().SetRangeUser(self.l_log_settings[1], self.l_log_settings[2]) g_s2_data.GetYaxis().SetRangeUser( 0, 1.2 * max(h_s2_data.GetMaximum(), h_s2_mc.GetMaximum())) c1.cd(2) g_s2_data.Draw('ap') g_s2_mc.Draw('same') g_s2_mc_band = g_s2_mc.Clone() g_s2_mc_band.Draw('3 same') c1.Update() neriX_analysis.save_plot(['temp_results'], c1, '%d_deg_1d_hists' % (degree_setting), batch_mode=True) f.savefig('./temp_results/%d_deg_2d_hist.png' % (degree_setting)) flat_s1_s2_data = np.asarray(self.a_data_hist_s1_s2.flatten(), dtype=np.float32) flat_s1_s2_mc = np.asarray(a_s1_s2_mc.flatten(), dtype=np.float32) logLikelihoodMatching = c_log_likelihood( flat_s1_s2_data, flat_s1_s2_mc, len(flat_s1_s2_data), int(self.num_mc_events * self.num_repetitions), 0.95) #print prior_ln_likelihood #print logLikelihoodMatching #print max(flat_s1_s2_data) #print max(flat_s1_s2_mc) #print '\n\n' if np.isnan(logLikelihoodMatching): return -np.inf else: matching_ln_likelihood += logLikelihoodMatching if self.b_suppress_likelihood: matching_ln_likelihood /= self.ll_suppression_factor total_ln_likelihood = prior_ln_likelihood + matching_ln_likelihood #print total_ln_likelihood if np.isnan(total_ln_likelihood): return -np.inf else: return total_ln_likelihood