def do_data_mc_plot(dirname, histname, output_filename, **plot_kwargs): data_file = cu.open_root_file(os.path.join(dirname, qgc.JETHT_ZB_FILENAME)) qcd_file = cu.open_root_file(os.path.join(dirname, qgc.QCD_FILENAME)) qcd_py_file = cu.open_root_file( os.path.join(dirname, qgc.QCD_PYTHIA_ONLY_FILENAME)) qcd_hpp_file = cu.open_root_file( os.path.join(dirname, qgc.QCD_HERWIG_FILENAME)) data_hist = cu.get_from_tfile(data_file, histname) qcd_hist = cu.get_from_tfile(qcd_file, histname) qcd_py_hist = cu.get_from_tfile(qcd_py_file, histname) qcd_hpp_hist = cu.get_from_tfile(qcd_hpp_file, histname) conts = [ Contribution(data_hist, label="Data", line_color=ROOT.kBlack, marker_size=0, marker_color=ROOT.kBlack), Contribution(qcd_hist, label="QCD MG+PYTHIA8 MC", line_color=qgc.QCD_COLOUR, subplot=data_hist, marker_size=0, marker_color=qgc.QCD_COLOUR), Contribution(qcd_py_hist, label="QCD PYTHIA8 MC", line_color=qgc.QCD_COLOURS[2], subplot=data_hist, marker_size=0, marker_color=qgc.QCD_COLOURS[2]), # Contribution(qcd_hpp_hist, label="QCD HERWIG++ MC", line_color=qgc.HERWIGPP_QCD_COLOUR, subplot=data_hist, marker_size=0, marker_color=qgc.HERWIGPP_QCD_COLOUR), ] plot = Plot(conts, what='hist', ytitle="N", xtitle="p_{T}^{Leading jet} [GeV]", subplot_type="ratio", subplot_title="Simulation / data", ylim=[1E3, None], lumi=cu.get_lumi_str(do_dijet=True, do_zpj=False), **plot_kwargs) plot.y_padding_max_log = 500 plot.legend.SetX1(0.55) plot.legend.SetX2(0.98) plot.legend.SetY1(0.7) # plot.legend.SetY2(0.88) plot.plot("NOSTACK HIST E") plot.set_logx(do_more_labels=True, do_exponent=False) plot.set_logy(do_more_labels=False) plot.save(output_filename)
def do_mc_pt_comparison_plot(dirname_label_pairs, output_filename, qcd_filename, **plot_kwargs): # qcd_files = [cu.open_root_file(os.path.join(dl[0], qgc.QCD_FILENAME)) for dl in dirname_label_pairs] qcd_files = [ cu.open_root_file(os.path.join(dl[0], qgc.QCD_PYTHIA_ONLY_FILENAME)) for dl in dirname_label_pairs ] histname = "Dijet_tighter/pt_jet1" qcd_hists = [cu.get_from_tfile(qf, histname) for qf in qcd_files] N = len(dirname_label_pairs) conts = [ Contribution(qcd_hists[i], label=lab, marker_color=cu.get_colour_seq(i, N), line_color=cu.get_colour_seq(i, N), line_style=(i % 3) + 1, line_width=2, rebin_hist=1, subplot=qcd_hists[0] if i != 0 else None) for i, (d, lab) in enumerate(dirname_label_pairs) ] plot = Plot(conts, what='hist', ytitle="N", subplot_limits=(0.5, 1.5), subplot_type="ratio", subplot_title="* / %s" % (dirname_label_pairs[0][1]), **plot_kwargs) plot.y_padding_max_log = 500 plot.legend.SetY1(0.7) plot.plot("NOSTACK HIST E") plot.set_logx(do_more_labels=False) plot.set_logy(do_more_labels=False) plot.save(output_filename)
def do_weight_vs_var_plot_per_pt(histname, input_filename, output_filename): ROOT.gStyle.SetPalette(palette_2D) tf = cu.open_root_file(input_filename) h3d = cu.get_from_tfile(tf, histname) if h3d.GetEntries() == 0: return canv = ROOT.TCanvas(cu.get_unique_str(), "", 800, 600) canv.SetTicks(1, 1) canv.SetLeftMargin(0.1) canv.SetRightMargin(0.15) canv.SetLogz() weight_str = "(unweighted)" if "unweighted" in histname else "(weighted)" for ibin in range(1, h3d.GetNbinsY()+1): h3d.GetYaxis().SetRange(ibin, ibin+1) h2d = h3d.Project3D("xz") if h2d.GetEntries() == 0: continue pt_low = h3d.GetYaxis().GetBinLowEdge(ibin) pt_high = h3d.GetYaxis().GetBinLowEdge(ibin+1) jet_str = pt_genjet_str if "_vs_pt_genjet_vs_" in histname else pt_str h2d.SetTitle("%g < %s < %g GeV %s" % (pt_low, jet_str, pt_high, weight_str)) h2d.GetXaxis().SetTitle(get_var_str(histname)) h2d.Draw("COLZ") jet_app = "_genjet" if "_vs_pt_genjet_vs" in histname else "" this_output_filename = output_filename.replace(".pdf", "_pt%s%gto%g.pdf" % (jet_app, pt_low, pt_high)) canv.SaveAs(this_output_filename) canv.Clear() tf.Close()
def do_response_plots(in_file, plot_dir, do_these=None): tfile = cu.open_root_file(in_file) for full_var_name, xlabel, log_var, rebin in do_these: mydir, myvar = full_var_name.split("/") # reco vs gen do_response_plot(tfile.Get(mydir), plot_dir=plot_dir, var_name=myvar + "_response", xlabel=xlabel, log_var=log_var, rebinx=rebin, rebiny=rebin, do_migration_summary_plots=False, do_resolution_plots=False, save_response_hists=False) rebiny = 10 if "multiplicity" in myvar.lower() else 5 # relative respone (reco/gen) on y axis do_response_plot(tfile.Get(mydir), plot_dir=plot_dir, var_name=myvar + "_rel_response", xlabel=xlabel, log_var=log_var, rebinx=rebin, rebiny=rebiny, do_migration_summary_plots=False, do_resolution_plots=False, save_response_hists=False)
def make_plot_eta_binned(input_filename, output_filename, title=''): f = cu.open_root_file(input_filename) tree = cu.get_from_file(f, 'valid') hists = [] eta_bins = binning.eta_bins # eta_bins = [0, 3, 5] for i, (eta_min, eta_max) in enumerate(binning.pairwise(eta_bins)): hname = "h_%g_%g" % (eta_min, eta_max) h = ROOT.TH1D(hname, title + " PU15 - 25;response;p.d.f", 30, 0, 3) tree.Draw("rsp>>%s" % hname, "%g < TMath::Abs(eta) && TMath::Abs(eta) < %g && numPUVertices<25 && numPUVertices >15" % (eta_min, eta_max)) h.SetLineColor(binning.eta_bin_colors[i]) h.SetLineWidth(2) h.Scale(1. / h.Integral()) hists.append(h) canv = ROOT.TCanvas("c", "", 600, 600) canv.SetTicks(1, 1) hstack = ROOT.THStack("hst", title + " PU15 - 25;response;p.d.f") leg = ROOT.TLegend(0.6, 0.6, 0.88, 0.88) for i, h in enumerate(hists): hstack.Add(h) leg.AddEntry(h, '%g < |#eta| < %g' % (eta_bins[i], eta_bins[i + 1]), 'L') hstack.Draw("NOSTACK HIST") leg.Draw() canv.SaveAs(output_filename)
def make_plots(filenames, oDir, hist_names, title): c = ROOT.TCanvas("", "", 800, 600) c.SetTicks(1, 1) files = [cu.open_root_file(f.filename) for f in filenames] for hname in hist_names: # print hname hists = [cu.get_from_file(f, hname).Clone() for f in files] leg = ROOT.TLegend(0.6, 0.6, 0.85, 0.85) for i, h in enumerate(hists): norm_hist(h) h.Rebin(2) h.SetTitle('%s: %s' % (title, hname)) h.SetLineColor(filenames[i].color) if i == 0: h.Draw("HISTE") else: h.Draw("HISTE SAME") leg.AddEntry(h, filenames[i].label, "L") leg.Draw() outname = os.path.join(oDir, hname+'.pdf') cu.check_dir_exists_create(os.path.dirname(outname)) c.SaveAs(os.path.join(oDir, hname+'.pdf'))
def do_dijet_gen_distributions(root_dir): """Do plots comparing different different inputs in dijet region""" root_files = [ qgc.QCD_FILENAME, qgc.QCD_PYTHIA_ONLY_FILENAME, qgc.QCD_HERWIG_FILENAME ][:] root_files = [ cu.open_root_file(os.path.join(root_dir, r)) for r in root_files ] directories = [ cu.get_from_tfile(rf, "Dijet_tighter") for rf in root_files[:] ] mc_col = qgc.QCD_COLOUR mc_col2 = qgc.QCD_COLOURS[2] mc_col3 = qgc.QCD_COLOURS[3] msize = 1 lw = 2 csd = [ { "label": "QCD MC [MG+PY8]", "line_color": mc_col, "fill_color": mc_col, "marker_color": mc_col, "marker_style": 22, "fill_style": 0, "marker_size": msize, 'line_width': lw }, { "label": "QCD MC [PY8]", "line_color": mc_col2, "fill_color": mc_col2, "marker_color": mc_col2, "marker_style": 21, "fill_style": 0, "marker_size": msize, 'line_width': lw }, { "label": "QCD MC [H++]", "line_color": mc_col3, "fill_color": mc_col3, "marker_color": mc_col3, "marker_style": 23, "fill_style": 0, "marker_size": msize, 'line_width': lw }, ] jet_config_str = qgc.extract_jet_config(root_dir) # Compare shapes do_all_1D_plots_in_dir(directories=directories, output_dir=os.path.join( root_dir, "Dijet_gen_kin_comparison_normalised"), components_styles_dicts=csd, jet_config_str=jet_config_str, normalise_hists=True)
def do_jet_pt_rel_error_with_var_cuts(histname, cuts, input_filename, output_filename): ROOT.gStyle.SetPalette(palette_1D) tf = cu.open_root_file(input_filename) h3d = cu.get_from_tfile(tf, histname) if h3d.GetEntries() == 0: return pt_hists = [] for cut in cuts: max_bin = h3d.GetZaxis().FindFixBin(cut) # print("cut:", cut, "bin:", max_bin) h = h3d.ProjectionY("pt_var_lt_%g" % cut, 0, -1, 0, max_bin, "e") h2 = h.Clone() h2.Rebin(2) if h.GetEntries() > 0: h3 = qgp.hist_divide_bin_width(h2) # convert bin contents to bin error/bin contents for ibin in range(1, h2.GetNbinsX()+1): if h3.GetBinContent(ibin) == 0: continue h3.SetBinContent(ibin, h3.GetBinError(ibin) / h3.GetBinContent(ibin)) h3.SetBinError(ibin, 0) pt_hists.append(h3) line_styles = [1, 2, 3] n_line_styles = len(line_styles) conts = [Contribution(h, label=" < %g" % cut, line_color=cu.get_colour_seq(ind, len(cuts)), line_style=line_styles[ind % n_line_styles], line_width=2, marker_color=cu.get_colour_seq(ind, len(cuts)), subplot=pt_hists[-1]) for ind, (h, cut) in enumerate(zip(pt_hists, cuts))] jet_str = pt_genjet_str if "_vs_pt_genjet_vs_" in histname else pt_str weight_str = "(unweighted)" if "unweighted" in histname else "(weighted)" ratio_lims = (0.98, 1.02) if "unweighted" in histname else None plot = Plot(conts, what='hist', title='%s for cuts on %s %s' % (jet_str, get_var_str(histname), weight_str), xtitle=None, ytitle='Relative error', # xlim=None, ylim=None, legend=True, subplot_type='ratio', subplot_title='* / var < %g' % cuts[-1], subplot_limits=ratio_lims, has_data=False) plot.y_padding_max_log = 200 plot.subplot_maximum_ceil = 2 plot.subplot_maximum_floor = 1.02 plot.subplot_minimum_ceil = 0.98 plot.legend.SetY1(0.7) plot.legend.SetY2(0.89) plot.legend.SetX1(0.78) plot.legend.SetX2(0.88) plot.plot("NOSTACK HISTE", "NOSTACK HIST") plot.set_logx(True, do_more_labels=True) plot.set_logy(True, do_more_labels=False) plot.save(output_filename)
def plot_corr_results(in_name): """Puts correction plots from ROOT file in one pdf. Parameters ---------- in_name : str Name of ROOT file to process (output from runCalibration.py) """ print "Opening", in_name in_stem = os.path.basename(in_name).replace(".root", "") input_file = cu.open_root_file(in_name) # Setup output directory & filenames odir = os.path.join(os.path.dirname(os.path.abspath(in_name)), in_stem) cu.check_dir_exists_create(odir) out_name = os.path.join(odir, in_stem + ".pdf") out_stem = out_name.replace(".pdf", "") print "Writing to", out_name # Start beamer file - make main tex file # Use template - change title, subtitle, include file frontpage_title = "Correction value plots, binned by $|\eta|$" sub = in_stem.replace("output_", "").replace("_", "\_").replace("_ak", r"\\_ak") subtitle = "{\\tt " + sub + "}" main_file = out_stem + ".tex" slides_file = out_stem + "_slides.tex" make_main_tex_file(frontpage_title, subtitle, AUTHOR, main_file, slides_file) # Now make the slides file to be included in main file with open(slides_file, "w") as slides: titles = [] plotnames = [] etaBins = binning.eta_bins for i, (eta_min, eta_max) in enumerate(binning.pairwise(etaBins)): plotname = "l1corr_eta_%g_%g" % (eta_min, eta_max) bin_title = "%g < |\eta^{L1}| < %g" % (eta_min, eta_max) xtitle = "<p_{T}^{L1}> [GeV]" ytitle = "Correction = 1/<p_{T}^{L1}/p_{T}^{Ref}>" output_plots = [os.path.join(odir, plotname + ext) for ext in ['.tex', '.pdf']] if plot_to_file(input_file, plotname, output_plots, xtitle=xtitle, ytitle=ytitle, title="", drawfit=True, extend_fit=True): titles.append("$%s$" % bin_title) plotnames.append(os.path.join(odir, plotname + ".tex")) # When we have 4 plots, or reached the end, write to a slide if (((i + 1) % 4 == 0) and (i != 0)) or (i == len(etaBins) - 2): print "Writing slide" slidetitle = "Correction value" slides.write(bst.make_slide(bst.four_plot_slide, titles, plotnames, slidetitle)) titles = [] plotnames = [] compile_pdf(main_file, out_name, odir, 1)
def do_jet_pt_vs_genht_plot(dirname, output_filename, title=""): """2D heat map of genHT vs jet pt""" canv = ROOT.TCanvas(cu.get_unique_str(), "", 800, 600) qcd_file = cu.open_root_file(os.path.join(dirname, qgc.QCD_FILENAME)) histname = "Dijet_tighter/pt_jet_vs_genHT" canv.SetRightMargin(0.15) h = cu.get_from_tfile(qcd_file, histname) h.SetTitle(title + ";p_{T}^{Leading jet} [GeV]; H_{T}^{Gen} [GeV]") h.Draw("COLZ") h.GetXaxis().SetRangeUser(0, 200) h.GetYaxis().SetRangeUser(0, 200) canv.SaveAs(output_filename)
def grab_obj(file_name, obj_name): """Get object names obj_name from ROOT file file_name""" # TODO: checks! input_file = cu.open_root_file(file_name) obj = cu.get_from_tfile(input_file, obj_name) # print("Getting", obj_name, "from", file_name) if isinstance(obj, (ROOT.TH1, ROOT.TGraph)): obj.SetDirectory(0) # Ownership kludge input_file.Close() return obj.Clone(ROOT.TUUID().AsString()) else: return obj
def make_plots(input_filename, output_dir): input_file = cu.open_root_file(input_filename) forbidden = ['SFrame', 'cf_metfilters_raw', 'cf_metfilters'] directories = [ d for d in get_list_of_obj(input_file) if d not in forbidden ] print(directories) for d in directories: region, pt_edges, eta_edges = extract_pt_eta_from_name(d) title = "%s, %s < p_{T} < %s GeV, %s < |#eta| < %s" % ( region, *pt_edges, *eta_edges) print_plots(input_file.Get(d), os.path.join(output_dir, d), title)
def get_obj(self): """Get object for this contribution.""" input_file = cu.open_root_file(self.file_name) self.obj = cu.get_from_file(input_file, self.obj_name) self.obj.SetLineWidth(self.line_width) self.obj.SetLineColor(self.line_color) self.obj.SetLineStyle(self.line_style) self.obj.SetMarkerSize(self.marker_size) self.obj.SetMarkerColor(self.marker_color) self.obj.SetMarkerStyle(self.marker_style) input_file.Close() return self.obj
def do_genht_plot(dirname, output_filename, **plot_kwargs): qcd_file = cu.open_root_file(os.path.join(dirname, qgc.QCD_FILENAME)) histname = "Dijet_gen/gen_ht" qcd_hist = cu.get_from_tfile(qcd_file, histname) conts = [Contribution(qcd_hist, label="QCD MC", line_color=ROOT.kRed)] plot = Plot(conts, what='hist', ytitle="N", **plot_kwargs) plot.y_padding_max_log = 500 plot.legend.SetY1(0.7) plot.plot("NOSTACK HIST E") plot.set_logx(do_more_labels=False) plot.set_logy(do_more_labels=False) plot.save(output_filename)
def grab_obj(file_name, obj_name): """Get object names obj_name from ROOT file file_name""" # TODO: checks! input_file = cu.open_root_file(file_name) obj = cu.get_from_tfile(input_file, obj_name) # print("Getting", obj_name, "from", file_name) if isinstance(obj, (ROOT.TH1, ROOT.TGraph, ROOT.TH2)): # THIS ORDER IS VERY IMPORTANT TO AVOID MEMORY LEAKS new_obj = obj.Clone(ROOT.TUUID().AsString()) new_obj.SetDirectory(0) input_file.Close() return new_obj else: return obj
def get_obj(self): """Get object for this contribution.""" input_file = cu.open_root_file(self.file_name) self.obj = cu.get_from_file(input_file, self.obj_name) self.obj.SetLineWidth(self.line_width) self.obj.SetLineColor(self.line_color) self.obj.SetLineStyle(self.line_style) self.obj.SetMarkerSize(self.marker_size) self.obj.SetMarkerColor(self.marker_color) self.obj.SetMarkerStyle(self.marker_style) if isinstance(self.obj, ROOT.TH1): self.obj.SetDirectory(0) input_file.Close() return self.obj
def do_plot(entries, output_file, hist_name=None, xlim=None, ylim=None, rebin=2, is_data=True, is_ak8=False): components = [] do_unweighted = any(["unweighted" in e.get('hist_name', hist_name) for e in entries]) for ent in entries: if 'tfile' not in ent: ent['tfile'] = cu.open_root_file(ent['filename']) ent['hist'] = cu.get_from_tfile(ent['tfile'], ent.get('hist_name', hist_name)) if not do_unweighted and 'scale' in ent: ent['hist'].Scale(ent.get('scale', 1)) components.append( Contribution(ent['hist'], fill_color=ent['color'], line_color=ent['color'], marker_color=ent['color'], marker_size=0, line_width=2, label=ent['label'], rebin_hist=rebin ) ) # print stats print(ent['hist_name'], ent['label'], ent['hist'].Integral()) title = 'AK8 PUPPI' if is_ak8 else 'AK4 PUPPI' plot = Plot(components, what='hist', has_data=is_data, title=title, xlim=xlim, ylim=ylim, xtitle="p_{T}^{jet 1} [GeV]", ytitle="Unweighted N" if do_unweighted else 'N') # plot.y_padding_min_log = 10 if 'unweighted' in hist_name else 10 plot.default_canvas_size = (700, 600) plot.legend.SetNColumns(2) plot.legend.SetX1(0.55) plot.legend.SetY1(0.7) plot.legend.SetY2(0.88) plot.plot("HISTE") plot.set_logx() plot.set_logy(do_more_labels=False) plot.save(output_file) # do non-stacked version stem, ext = os.path.splitext(output_file) plot.plot("HISTE NOSTACK") plot.set_logx() plot.set_logy(do_more_labels=False) plot.save(stem+"_nostack" + ext)
def get_functions_graphs_params_rootfile(root_filename): """Get function parameters from ROOT file Gets object based on name in runCalibration.py Parameters ---------- root_filename : str Name of ROOT file to get things from Returns ------- all_fits : list[TF1] Collection of TF1 objects, one per line of file ( = 1 eta bin) all_fit_params : list[list[float]] Collection of fit parameters, one per line of file ( = 1 eta bin) all_graphs : list[TGraphErrors] Colleciton of correction graphs, one per eta bin """ print 'Reading functions from ROOT file' in_file = cu.open_root_file(root_filename) all_fit_params = [] all_fits = [] all_graphs = [] # Get all the fit functions from file and their corresponding graphs etaBins = binning.eta_bins for i, (eta_min, eta_max) in enumerate(izip(etaBins[:-1], etaBins[1:])): print "Eta bin:", eta_min, "-", eta_max # get the fitted TF1 try: fit_func = cu.get_from_file(in_file, "fitfcneta_%g_%g" % (eta_min, eta_max)) fit_params = [fit_func.GetParameter(par) for par in range(fit_func.GetNumberFreeParameters())] print "Fit fn evaluated at 5 GeV:", fit_func.Eval(5) except IOError: print "No fit func" fit_func = None fit_params = [] all_fits.append(fit_func) all_fit_params.append(fit_params) # print "Fit parameters:", fit_params # get the corresponding fit graph fit_graph = cu.get_from_file(in_file, generate_eta_graph_name(eta_min, eta_max)) all_graphs.append(fit_graph) in_file.Close() return all_fits, all_fit_params, all_graphs
def process_file(filename, eta_bins=binning.eta_bins_forward): """Process a ROOT file with graphs, print a mean & mean histogram for each. Parameters ---------- filename : str Name of ROOT file to process (from runCalibration.py) eta_bins : list[[float, float]] Eta bin edges. """ f = cu.open_root_file(filename) for eta_min, eta_max in binning.pairwise(eta_bins): gr = cu.get_from_file(f, generate_eta_graph_name(eta_min, eta_max)) if not gr: raise RuntimeError("Can't get graph") xarr, yarr = cu.get_xy(gr) xarr, yarr = np.array(xarr), np.array( yarr) # use numpy array for easy slicing # Loop over all possible subgraphs, and calculate a mean for each end = len(yarr) means = [] while end > 0: start = 0 while start < end: means.append(yarr[start:end].mean()) start += 1 end -= 1 # Jackknife means jack_means = [np.delete(yarr, i).mean() for i in range(len(yarr))] # Do plotting & peak finding in both ROOT and MPL...not sure which is better? # peak = plot_find_peak_mpl(means, eta_min, eta_max, os.path.dirname(os.path.realpath(filename))) peak = plot_find_peak_root(means, eta_min, eta_max, os.path.dirname(os.path.realpath(filename))) jackpeak = plot_jacknife_root( jack_means, eta_min, eta_max, os.path.dirname(os.path.realpath(filename))) print 'Eta bin:', eta_min, '-', eta_max print peak print 'jackknife mean:' print np.array(jack_means).mean() f.Close()
def do_weight_vs_pt_plot(input_filename, output_filename): ROOT.gStyle.SetPalette(palette_2D) histname = "Weight_Presel/weight_vs_pt_vs_pt_jet_qScale_ratio" tf = cu.open_root_file(input_filename) h3d = cu.get_from_tfile(tf, histname) if h3d.GetEntries() == 0: return h2d = h3d.Project3D("xy") canv = ROOT.TCanvas(cu.get_unique_str(), "", 800, 600) canv.SetTicks(1, 1) canv.SetLeftMargin(0.1) canv.SetRightMargin(0.15) canv.SetLogz() canv.SetLogx() h2d.Draw("COLZ") canv.SaveAs(output_filename) tf.Close()
def process_file(filename, eta_bins=binning.eta_bins_forward): """Process a ROOT file with graphs, print a mean & mean histogram for each. Parameters ---------- filename : str Name of ROOT file to process (from runCalibration.py) eta_bins : list[[float, float]] Eta bin edges. """ f = cu.open_root_file(filename) for eta_min, eta_max in binning.pairwise(eta_bins): gr = cu.get_from_file(f, generate_eta_graph_name(eta_min, eta_max)) if not gr: raise RuntimeError("Can't get graph") xarr, yarr = cu.get_xy(gr) xarr, yarr = np.array(xarr), np.array(yarr) # use numpy array for easy slicing # Loop over all possible subgraphs, and calculate a mean for each end = len(yarr) means = [] while end > 0: start = 0 while start < end: means.append(yarr[start:end].mean()) start += 1 end -= 1 # Jackknife means jack_means = [np.delete(yarr, i).mean() for i in range(len(yarr))] # Do plotting & peak finding in both ROOT and MPL...not sure which is better? # peak = plot_find_peak_mpl(means, eta_min, eta_max, os.path.dirname(os.path.realpath(filename))) peak = plot_find_peak_root(means, eta_min, eta_max, os.path.dirname(os.path.realpath(filename))) jackpeak = plot_jacknife_root(jack_means, eta_min, eta_max, os.path.dirname(os.path.realpath(filename))) print 'Eta bin:', eta_min, '-', eta_max print peak print 'jackknife mean:' print np.array(jack_means).mean() f.Close()
def do_pthat_comparison_plot(dirname_label_pairs, output_filename, **plot_kwargs): qcd_files = [ cu.open_root_file(os.path.join(dl[0], qgc.QCD_PYTHIA_ONLY_FILENAME)) for dl in dirname_label_pairs ] histname = "Dijet_gen/ptHat" qcd_hists = [cu.get_from_tfile(qf, histname) for qf in qcd_files] N = len(dirname_label_pairs) pthat_rebin = array('d', [ 15, 30, 50, 80, 120, 170, 300, 470, 600, 800, 1000, 1400, 1800, 2400, 3200, 5000 ]) nbins = len(pthat_rebin) - 1 qcd_hists = [ h.Rebin(nbins, cu.get_unique_str(), pthat_rebin) for h in qcd_hists ] conts = [ Contribution(qcd_hists[i], label=lab, marker_color=cu.get_colour_seq(i, N), line_color=cu.get_colour_seq(i, N), line_style=i + 1, line_width=2, subplot=qcd_hists[0] if i != 0 else None) for i, (d, lab) in enumerate(dirname_label_pairs) ] plot = Plot(conts, what='hist', ytitle="N", subplot_limits=(0.75, 1.25), subplot_type="ratio", subplot_title="* / %s" % (dirname_label_pairs[0][1]), **plot_kwargs) plot.y_padding_max_log = 500 plot.legend.SetY1(0.7) plot.plot("NOSTACK HIST E") plot.set_logx(do_more_labels=False) plot.set_logy(do_more_labels=False) plot.save(output_filename)
def do_weight_vs_var_plot(histname, input_filename, output_filename): ROOT.gStyle.SetPalette(palette_2D) tf = cu.open_root_file(input_filename) h3d = cu.get_from_tfile(tf, histname) if h3d.GetEntries() == 0: return h2d = h3d.Project3D("xz") if "unweighted" in histname: h2d.SetTitle("Unweighted") else: h2d.SetTitle("Weighted") h2d.GetXaxis().SetTitle(get_var_str(histname)) canv = ROOT.TCanvas(cu.get_unique_str(), "", 800, 600) canv.SetTicks(1, 1) canv.SetLeftMargin(0.1) canv.SetRightMargin(0.15) canv.SetLogz() h2d.Draw("COLZ") canv.SaveAs(output_filename) tf.Close()
def make_htt_plots(input_filename, output_dir): """Make HTT plots for one input file. Parameters ---------- input_filename : str Name of pairs ROOT file. output_dir : str Name of output directory for plots. """ in_stem = os.path.splitext(os.path.basename(input_filename))[0] output_dir = os.path.join(output_dir, in_stem) if not os.path.isdir(output_dir): print 'Making output dir', output_dir os.makedirs(output_dir) f = cu.open_root_file(input_filename) tree = cu.get_from_file(f, "valid") common_cut = COMMON_CUT norm_cut = '1./nMatches' # normalisation, for event-level quantities, since we store it for each match in an event if common_cut != '': norm_cut += ' && %s' % common_cut do_htt_plots(tree, output_dir, norm_cut) do_mht_plots(tree, output_dir, norm_cut) # Do plots where y axis is some variable of interest do_dr_plots(tree, output_dir, common_cut) do_rsp_plots(tree, output_dir, common_cut) do_nvtx_plots(tree, output_dir, norm_cut) do_njets_plots(tree, output_dir, norm_cut) do_jet_pt_plots(tree, output_dir, common_cut) f.Close()
def do_genht_comparison_plot(dirname_label_pairs, output_filename, **plot_kwargs): """Like do_genht but for multiple samples""" qcd_files = [ cu.open_root_file(os.path.join(dl[0], qgc.QCD_FILENAME)) for dl in dirname_label_pairs ] histname = "Dijet_gen/gen_ht" qcd_hists = [cu.get_from_tfile(qf, histname) for qf in qcd_files] N = len(dirname_label_pairs) conts = [ Contribution(qcd_hists[i], label=lab, marker_color=cu.get_colour_seq(i, N), line_color=cu.get_colour_seq(i, N), line_style=i + 1, line_width=2, subplot=qcd_hists[0] if i != 0 else None) for i, (d, lab) in enumerate(dirname_label_pairs) ] plot = Plot( conts, what='hist', ytitle="N", # subplot_limits=(0.75, 1.25), subplot_type="ratio", subplot_title="* / %s" % (dirname_label_pairs[0][1]), ylim=[1E6, None], **plot_kwargs) plot.y_padding_max_log = 500 plot.legend.SetY1(0.7) plot.subplot_maximum_ceil = 5 plot.plot("NOSTACK HIST E") plot.set_logx(do_more_labels=False) plot.set_logy(do_more_labels=False) plot.save(output_filename)
def do_jet_pt_migration_plot(input_filename, directory, title, output_dir): """Do migration stats plot""" tfile = cu.open_root_file(input_filename) h2d = tfile.Get("%s/jet_pt_vs_genjet_pt" % directory) h2d_new = h2d h2d_renorm_y = cu.make_normalised_TH2(h2d_new, 'Y', recolour=False, do_errors=True) h2d_renorm_x = cu.make_normalised_TH2(h2d_new, 'X', recolour=False, do_errors=True) # Plot 2D response matrix plot_jet_pt_response_matrix(h2d_new, h2d_renorm_x, h2d_renorm_y, title, output_dir) # Do migration metrics xlabel = 'p_{T}^{jet} [GeV]' qgp.make_migration_summary_plot(h2d_renorm_x, h2d_renorm_y, xlabel, title=title, log_var=True, output_filename=os.path.join(output_dir, "jet_pt_migration_summary.pdf"), do_reco_updown2=False, do_gen_updown=False) tfile.Close()
def main(in_args=sys.argv[1:]): print in_args parser = argparse.ArgumentParser(description=__doc__, formatter_class=cu.CustomFormatter) parser.add_argument("input", help="input ROOT filename") parser.add_argument("output", help="output ROOT filename") parser.add_argument("--incl", action="store_true", help="Do inclusive eta plots") parser.add_argument("--excl", action="store_true", help="Do exclusive eta plots") parser.add_argument("--central", action='store_true', help="Do central eta bins only (eta <= 3)") parser.add_argument("--forward", action='store_true', help="Do forward eta bins only (eta >= 3)") parser.add_argument("--etaInd", nargs="+", help="list of eta bin INDICES to run over - " "if unspecified will do all. " "This overrides --central/--forward. " "Handy for batch mode. " "IMPORTANT: MUST PUT AT VERY END") parser.add_argument("--maxPt", default=500, type=float, help="Maximum pT for L1 Jets") parser.add_argument("--PUmin", default=-99, type=float, help="Minimum number of PU vertices (refers to *actual* " "number of PU vertices in the event, not the centre " "of of the distribution)") parser.add_argument("--PUmax", default=999, type=float, help="Maximum number of PU vertices (refers to *actual* " "number of PU vertices in the event, not the centre " "of of the distribution)") args = parser.parse_args(args=in_args) inputf = cu.open_root_file(args.input, "READ") outputf = cu.open_root_file(args.output, "RECREATE") print "Reading from", args.input print "Writing to", args.output if not inputf or not outputf: raise Exception("Couldn't open input or output files") # Setup eta bins etaBins = binning.eta_bins[:] if args.etaInd: args.etaInd.append(int(args.etaInd[-1])+1) # need upper eta bin edge # check eta bins are ok etaBins = [etaBins[int(x)] for x in args.etaInd] elif args.central: etaBins = binning.eta_bins_central elif args.forward: etaBins = binning.eta_bins_forward print "Running over eta bins:", etaBins # Do plots for individual eta bins if args.excl: print "Doing individual eta bins" for i, (eta_min, eta_max) in enumerate(pairwise(etaBins)): # whether we're doing a central or forward bin (.1 is for rounding err) forward_bin = eta_max > 3.1 # setup pt bins, wider ones for forward region ptBins = binning.pt_bins_stage2_8 if not forward_bin else binning.pt_bins_stage2_8_wide # ptBins = binning.pt_bins_stage2 if not forward_bin else binning.pt_bins_stage2_hf plot_resolution(inputf, outputf, ptBins, eta_min, eta_max, args.maxPt, args.PUmin, args.PUmax) # Do plots for inclusive eta # Skip if doing exlcusive and only 2 bins, or if only 1 bin if args.incl and ((not args.excl and len(etaBins) >= 2) or (args.excl and len(etaBins)>2)): print "Doing inclusive eta" # ptBins = binning.pt_bins_stage2_hf if etaBins[0] > 2.9 else binning.pt_bins_stage2 ptBins = binning.pt_bins_stage2_8_wide if etaBins[0] > 2.9 else binning.pt_bins_stage2_8 plot_resolution(inputf, outputf, ptBins, etaBins[0], etaBins[-1], args.maxPt, args.PUmin, args.PUmax) if not args.incl and not args.excl: print "Not doing inclusive or exclusive - you must specify at least one!" return 1 inputf.Close() outputf.Close() return 0
def do_var_vs_pt_plot(histname, input_filename, output_filename): ROOT.gStyle.SetPalette(palette_2D) tf = cu.open_root_file(input_filename) h3d = cu.get_from_tfile(tf, histname) if h3d.GetEntries() == 0: return h2d = h3d.Project3D("zy") xlabel = h2d.GetXaxis().GetTitle() ylabel = h2d.GetYaxis().GetTitle() ylabel = get_var_str(histname) # find largest var value (ie row) that has a filled bin h2d_ndarray = cu.th2_to_ndarray(h2d)[0] xbins = np.array(cu.get_bin_edges(h2d, 'x')) ybins = np.array(cu.get_bin_edges(h2d, 'y')) # remove dodgy bins with 0 width cos I was an idiot and duplicated some bins n_deleted = 0 # weight bin # xax = h2d.GetXaxis() # for ix in range(1, h2d.GetNbinsX()+1): # if xax.GetBinWidth(ix) == 0: # h2d_ndarray = np.delete(h2d_ndarray, ix-1-n_deleted, axis=1) # xbins = np.delete(xbins, ix-1-n_deleted, axis=0) # n_deleted += 1 # print("Deleting bin", ix) # pt bin # n_deleted = 0 # yax = h2d.GetYaxis() # for iy in range(1, h2d.GetNbinsY()+1): # if yax.GetBinWidth(iy) == 0: # h2d_ndarray = np.delete(h2d_ndarray, iy-1-n_deleted, axis=0) # ybins = np.delete(ybins, iy-1-n_deleted, axis=0) # n_deleted += 1 # print("Deleting bin", iy) # nonzero returns (row #s)(col #s) of non-zero elements # we only want the largest row # max_filled_row_ind = int(np.nonzero(h2d_ndarray)[0].max()) h2d = cu.ndarray_to_th2(h2d_ndarray, binsx=xbins, binsy=ybins) if "unweighted" in histname: h2d.SetTitle("Unweighted;%s;%s" % (xlabel, ylabel)) else: h2d.SetTitle("Weighted;%s;%s" % (xlabel, ylabel)) h2d.GetYaxis().SetRange(1, max_filled_row_ind+2) # +1 as ROOT 1-indexed, +1 for padding h2d.GetYaxis().SetTitle(get_var_str(histname)) xmin = 15 if "pt_genjet_vs" in histname else 30 xmax = 300 canv = ROOT.TCanvas(cu.get_unique_str(), "", 800, 600) canv.SetTicks(1, 1) canv.SetLeftMargin(0.12) canv.SetRightMargin(0.15) # canv.SetLogz() # canv.SetLogy() h2d_copy = h2d.Clone() # h2d_copy.Scale(1, "width") h2d_copy.Draw("COLZ") canv.SetLogx() h2d_copy.GetXaxis().SetMoreLogLabels() canv.SaveAs(output_filename) zoom_ymin, zoom_ymax = 0.1, 5 h2d_copy.SetAxisRange(zoom_ymin, zoom_ymax,"Y") h2d_copy.SetAxisRange(xmin, xmax, "X") canv.SaveAs(output_filename.replace(".pdf", "_zoomY.pdf")) canv.SetLogz() canv.SaveAs(output_filename.replace(".pdf", "_zoomY_logZ.pdf")) canv.SetLogz(False) # h2d.Scale(1, "width") h2d_normed = cu.make_normalised_TH2(h2d, norm_axis='x', recolour=True) h2d_normed.Draw("COLZ") h2d_normed.GetXaxis().SetMoreLogLabels() # h2d_normed.SetMinimum(1E-5) h2d_normed.SetAxisRange(xmin, xmax, "X") canv.SaveAs(output_filename.replace(".pdf", "_normX.pdf")) h2d_normed.SetAxisRange(zoom_ymin, zoom_ymax,"Y") canv.SaveAs(output_filename.replace(".pdf", "_normX_zoomY.pdf")) # Do cumulative plot per column (ie fraction of events passing cut < y) h2d_ndarray_cumsum = h2d_ndarray.cumsum(axis=0) nonzero_mask = h2d_ndarray_cumsum[-1] > 0 h2d_ndarray_cumsum[:, nonzero_mask] /= h2d_ndarray_cumsum[-1][nonzero_mask] # scale so total is 1 h2d_cumsum = cu.ndarray_to_th2(h2d_ndarray_cumsum, binsx=xbins, binsy=ybins) # Get max row ind max_filled_row_ind = int(h2d_ndarray_cumsum.argmax(axis=0).max()) h2d_cumsum.GetYaxis().SetRange(1, max_filled_row_ind+1) # +1 as ROOT 1-indexed # ROOT.gStyle.SetPalette(ROOT.kBird) ylabel = "Fraction of events with " + ylabel + " < y" if "unweighted" in histname: h2d_cumsum.SetTitle("Unweighted;%s;%s" % (xlabel, ylabel)) else: h2d_cumsum.SetTitle("Weighted;%s;%s" % (xlabel, ylabel)) canv.Clear() canv.SetLogz(False) h2d_cumsum.SetContour(20) h2d_cumsum.Draw("CONT1Z") h2d_cumsum.SetAxisRange(xmin, xmax, "X") canv.SetLogx() h2d_cumsum.GetXaxis().SetMoreLogLabels() canv.SaveAs(output_filename.replace(".pdf", "_cumulY.pdf")) h2d_cumsum.SetAxisRange(zoom_ymin, zoom_ymax,"Y") canv.SaveAs(output_filename.replace(".pdf", "_cumulY_zoomY.pdf")) canv.Clear() h2d_normed.Draw("COL") h2d_cumsum.Draw("CONT1Z SAME") h2d_cumsum.SetAxisRange(xmin, xmax, "X") canv.SetLogx() h2d_cumsum.GetXaxis().SetMoreLogLabels() canv.SaveAs(output_filename.replace(".pdf", "_cumulY_normX.pdf")) h2d_cumsum.SetAxisRange(zoom_ymin, zoom_ymax,"Y") canv.SaveAs(output_filename.replace(".pdf", "_cumulY_normX_zoomY.pdf")) tf.Close()
def do_cut_roc_per_pt(histname, input_filename, output_filename): """Plot fractional # unweighted vs fraction # weighted, for different cuts Not a true ROC, but kinda like one """ ROOT.gStyle.SetPalette(palette_1D) tf = cu.open_root_file(input_filename) h3d = cu.get_from_tfile(tf, histname) h3d_unweighted = cu.get_from_tfile(tf, histname+"_unweighted") if h3d.GetEntries() == 0: return canv = ROOT.TCanvas(cu.get_unique_str(), "", 800, 600) canv.SetTicks(1, 1) canv.SetLeftMargin(0.12) canv.SetRightMargin(0.12) # canv.SetLogz() h2d = h3d.Project3D("zy") # var vs pt h2d_unweighted = h3d_unweighted.Project3D("zy") # var vs pt var_name = os.path.basename(histname).replace("weight_vs_pt_vs_", "").replace("weight_vs_pt_genjet_vs_", "") for ibin in range(3, h2d.GetNbinsX()+1): # iterate over pt bins if h2d.Integral(ibin, ibin+1, 0, -1) == 0: # data.append(None) continue pt_low = h3d.GetYaxis().GetBinLowEdge(ibin) pt_high = h3d.GetYaxis().GetBinLowEdge(ibin+1) data = [] data_unweighted = [] # Do integral, error for increasingly looser cuts # find maximum in this pt bin # yes I probably should collapse to a 1D hist and use GetMaximumBin max_val, max_bin = 0, 0 for icut in range(1, h2d.GetNbinsY()+1): val = h2d.GetBinContent(ibin, icut) if val > max_val: max_val = val max_bin = icut for icut in range(max_bin + 5, h2d.GetNbinsY()+2, 2): # for icut in range(2, h2d.GetNbinsY()+2): err = array('d', [0]) count = h2d.IntegralAndError(ibin, ibin+1, 1, icut-1, err) if count == 0: continue data.append([count, err[0], h2d.GetYaxis().GetBinLowEdge(icut)]) err = array('d', [0]) count = h2d_unweighted.IntegralAndError(ibin, ibin+1, 1, icut-1, err) data_unweighted.append([count, err[0], h2d.GetYaxis().GetBinLowEdge(icut)]) cuts = np.array([d[2] for d in data][1:]) # cuts = np.array([d[2] for d in data]) weighted_fractions = np.array([abs(d[0]-dd[0]) / dd[0] for d, dd in zip(data[:-1], data[1:])]) unweighted_fractions = np.array([abs(d[0]-dd[0]) / dd[0] for d, dd in zip(data_unweighted[:-1], data_unweighted[1:])]) non_zero_mask = (unweighted_fractions>0) & (weighted_fractions>0) non_zero_weighted = weighted_fractions[non_zero_mask] weight_min_pow = math.floor(math.log10(min(non_zero_weighted))) if len(non_zero_weighted) > 0 else -10 weight_max_pow = math.floor(math.log10(max(non_zero_weighted))) if len(non_zero_weighted) > 0 else 0 assert(weight_max_pow>=weight_min_pow) non_zero_unweighted = unweighted_fractions[non_zero_mask] unweight_min_pow = math.floor(math.log10(min(non_zero_unweighted))) if len(non_zero_unweighted) > 0 else -10 unweight_max_pow = math.floor(math.log10(max(non_zero_unweighted))) if len(non_zero_unweighted) > 0 else 0 assert(unweight_max_pow>=unweight_min_pow) mask = unweighted_fractions < 10**(unweight_min_pow+1) # last decade of unweighted drops mask &= weighted_fractions > 10**(weight_max_pow-1) # largest decades of weighted drops if np.sum(mask) == 0: continue # weighted_fractions = np.array([d[0] / data[-1][0] for d in data]) # unweighted_fractions = np.array([d[0] / data_unweighted[-1][0] for d in data_unweighted]) unweighted_useful = unweighted_fractions[mask & non_zero_mask] weighted_useful = weighted_fractions[mask & non_zero_mask] if "pt_jet_genHT_ratio" in histname and pt_low == 800: print("weight_min_pow:", weight_min_pow) print("weight_max_pow:", weight_max_pow) print("unweight_min_pow:", unweight_min_pow) print("unweight_max_pow:", unweight_max_pow) print("unweight_max_pow:", unweight_max_pow) print("weighted_useful:", weighted_useful) print("unweighted_useful:", unweighted_useful) gr_count = ROOT.TGraph(len(unweighted_useful), unweighted_useful, weighted_useful) gr_count.SetMarkerColor(ROOT.kRed) gr_count.SetMarkerSize(0) gr_count.SetMarkerStyle(21) gr_count.SetLineColor(ROOT.kRed) gr_count.SetTitle("%s, %g < p_{T} < %g GeV;Relative unweighted count;Relative weighted count" % (get_var_str(histname), pt_low, pt_high)) gr_count.SetTitle("%s, %g < p_{T} < %g GeV;Unweighted fractional drop;Weighted fractional drop" % (get_var_str(histname), pt_low, pt_high)) # add annotations of cuts latexs = [] for i, cut in enumerate(cuts[mask * non_zero_mask]): latex = ROOT.TLatex(gr_count.GetX()[i], gr_count.GetY()[i], " < %.2f" % cut) latex.SetTextSize(0.02) latex.SetTextColor(ROOT.kBlue) gr_count.GetListOfFunctions().Add(latex) latexs.append(latex) # canv.SetLogx(False) # canv.SetLogy(False) # ROOT.TGaxis.SetMaxDigits(2) # gr_count.Draw("ALP") # ROOT.TGaxis.SetMaxDigits(2) # unweighted_min = 0.9999 # # Calculate differences between points # unweighted_diffs = unweighted_fractions[1:] - unweighted_fractions[:-1] # weighted_diffs = weighted_fractions[1:] - weighted_fractions[:-1] # big_diff_inds = [] # for ind, (u, w) in enumerate(zip(unweighted_diffs, weighted_diffs)): # # look for big diff in weighted frac, small diff in unweighted, # # with a limit on the minimum size of unweighted frac # # (only trying to remove a few events) # if u > 0 and w / u > 100 and u < 0.005 and unweighted_fractions[ind] > unweighted_min: # big_diff_inds.append(ind) # if "pt_jet_genHT_ratio" in histname and pt_low == 186: # for u, w in zip(unweighted_diffs, weighted_diffs): # print(u, w) # print(big_diff_inds) # make graph of big diff points, add annotations of cuts # if len(big_diff_inds) > 0: # gr_big_diffs = ROOT.TGraph(len(big_diff_inds), array('d', [unweighted_fractions[i+1] for i in big_diff_inds]), array('d', [weighted_fractions[i+1] for i in big_diff_inds])) # gr_big_diffs.SetLineWidth(0) # gr_big_diffs.SetMarkerColor(ROOT.kBlue) # gr_big_diffs.SetMarkerStyle(25) # latexs = [] # for i, ind in enumerate(big_diff_inds[:]): # latex = ROOT.TLatex(gr_big_diffs.GetX()[i], gr_big_diffs.GetY()[i], " < %.2f" % cuts[ind+1]) # latex.SetTextSize(0.02) # latex.SetTextColor(ROOT.kBlue) # gr_big_diffs.GetListOfFunctions().Add(latex) # latexs.append(latex) # gr_big_diffs.Draw("*") # gr_count.GetXaxis().SetLimits(unweighted_min, 1) # find corresponding value for weighted to set axis range # weighted_min = 0 # for ind, u in enumerate(unweighted_fractions): # if u >= unweighted_min: # weighted_min = weighted_fractions[ind-1] # if ind == len(unweighted_fractions) - 1: # weighted_min = 0 # break # gr_count.GetHistogram().SetMinimum(weighted_min*1.1 - 0.1) # gr_count.GetHistogram().SetMaximum(1) # canv.SaveAs(output_filename.replace(".pdf", "_count_pt%gto%g.pdf" % (pt_low, pt_high))) # do a version zoomed out canv.Clear() gr_count.SetMarkerSize(0.5) gr_count.Draw("AP") # unweighted_min = 0. # gr_count.GetXaxis().SetLimits(unweighted_min, 1) # weighted_min = 0 # for ind, u in enumerate(unweighted_fractions): # if u >= unweighted_min: # weighted_min = weighted_fractions[ind-1] # if ind == len(unweighted_fractions) - 1: # weighted_min = 0 # break # gr_count.GetHistogram().SetMinimum(weighted_min*1.1 - 0.1) gr_count.GetXaxis().SetMoreLogLabels() gr_count.GetYaxis().SetMoreLogLabels() weight_min_pow = math.floor(math.log10(min(weighted_useful))) if len(weighted_useful) > 0 else -10 weight_max_pow = math.floor(math.log10(max(weighted_useful))) if len(weighted_useful) > 0 else 0 unweight_min_pow = math.floor(math.log10(min(unweighted_useful))) if len(unweighted_useful) > 0 else -10 unweight_max_pow = math.floor(math.log10(max(unweighted_useful))) if len(unweighted_useful) > 0 else 0 gr_count.GetHistogram().SetMinimum(10**weight_min_pow) gr_count.GetHistogram().SetMaximum(10**(weight_max_pow+1)) gr_count.GetXaxis().SetLimits(10**unweight_min_pow, 10**(unweight_max_pow+1)) canv.SetLogy() canv.SetLogx() canv.SaveAs(output_filename.replace(".pdf", "_count_pt%gto%g_all.pdf" % (pt_low, pt_high))) tf.Close()
def do_cut_scan_per_pt(histname, input_filename, output_filename): ROOT.gStyle.SetPalette(palette_1D) tf = cu.open_root_file(input_filename) h3d = cu.get_from_tfile(tf, histname) h3d_unweighted = cu.get_from_tfile(tf, histname+"_unweighted") if h3d.GetEntries() == 0: return canv = ROOT.TCanvas(cu.get_unique_str(), "", 800, 600) canv.SetTicks(1, 1) canv.SetLeftMargin(0.12) canv.SetRightMargin(0.12) # canv.SetLogz() h2d = h3d.Project3D("zy") # var vs pt h2d_unweighted = h3d_unweighted.Project3D("zy") # var vs pt var_name = os.path.basename(histname).replace("weight_vs_pt_vs_", "").replace("weight_vs_pt_genjet_vs_", "") for ibin in range(3, h2d.GetNbinsX()+1): # iterate over pt bins if h2d.Integral(ibin, ibin+1, 0, -1) == 0: # data.append(None) continue pt_low = h3d.GetYaxis().GetBinLowEdge(ibin) pt_high = h3d.GetYaxis().GetBinLowEdge(ibin+1) data = [] data_unweighted = [] # Do integral, error for increasingly looser cuts # find maximum in this pt bin # yes I probably should collapse to a 1D hist and use GetMaximumBin max_val, max_bin = 0, 0 for icut in range(1, h2d.GetNbinsY()+1): val = h2d.GetBinContent(ibin, icut) if val > max_val: max_val = val max_bin = icut for icut in range(max_bin + 5, h2d.GetNbinsY()+2): err = array('d', [0]) count = h2d.IntegralAndError(ibin, ibin+1, 1, icut-1, err) if count == 0: continue data.append([count, err[0], h2d.GetYaxis().GetBinLowEdge(icut)]) err = array('d', [0]) count = h2d_unweighted.IntegralAndError(ibin, ibin+1, 1, icut, err) data_unweighted.append([count, err[0], h2d.GetYaxis().GetBinLowEdge(icut)]) # Plot count, rel error vs cut value cuts = [d[2] for d in data] gr_count = ROOT.TGraph(len(data), array('d', cuts), array('d', [d[0] / data[-1][0] for d in data])) gr_count.SetMarkerColor(ROOT.kRed) gr_count.SetMarkerStyle(22) gr_count.SetLineColor(ROOT.kRed) gr_count.SetTitle("%g < p_{T} < %g GeV;%s cut (<);Count (relative to loosest cut)" % (pt_low, pt_high, get_var_str(histname))) gr_count_unweighted = ROOT.TGraph(len(data), array('d', cuts), array('d', [d[0] / data_unweighted[-1][0] for d in data_unweighted])) gr_count_unweighted.SetMarkerColor(ROOT.kBlack) gr_count_unweighted.SetMarkerStyle(23) gr_count_unweighted.SetLineColor(ROOT.kBlack) gr_count_unweighted.SetTitle("%g < p_{T} < %g GeV;%s cut (<);Count (relative to loosest cut)" % (pt_low, pt_high, get_var_str(histname))) leg = ROOT.TLegend(0.7, 0.5, 0.85, 0.65) leg.AddEntry(gr_count, "Weighted", "LP") leg.AddEntry(gr_count_unweighted, "Unweighted", "LP") # gr_rel_err = ROOT.TGraph(len(data), array('d', cuts), array('d', [(d[1] / d[0]) if d[0] != 0 else 0 for d in data ])) # gr_rel_err.SetMarkerColor(ROOT.kRed) # gr_rel_err.SetMarkerStyle(22) # gr_rel_err.SetLineColor(ROOT.kRed) # gr_rel_err.SetTitle("%g < p_{T} < %g GeV;%s cut (<);Rel. error" % (pt_low, pt_high, var_name)) canv.SetLogy(False) gr_count.Draw("ALP") gr_count_unweighted.Draw("LP") gr_count.Draw("LP") leg.Draw() canv.SaveAs(output_filename.replace(".pdf", "_count_pt%gto%g.pdf" % (pt_low, pt_high))) # canv.Clear() # gr_rel_err.Draw("ALP") # canv.SetLogy() # gr_rel_err.GetYaxis().SetMoreLogLabels() # canv.SaveAs(output_filename.replace(".pdf", "_rel_err_pt%gto%g.pdf" % (pt_low, pt_high))) tf.Close()
# }, # { # "append": "_lowPt", # "title": "30 < p_{T}^{Reco} < 100 GeV", # }, { "append": "_midPt", "title": "100 < p_{T}^{Reco} < 250 GeV", }, { "append": "_highPt", "title": "p_{T}^{Reco} > 250 GeV", }, ] input_tfile = cu.open_root_file(args.input) for angle in qgc.COMMON_VARS[:]: # only care about multiplicity for pt_region_dict in pt_regions[:]: var_dict = { "name": "%s/%s%s" % (source_plot_dir_name, angle.var, pt_region_dict['append']), "var_label": "%s (%s)" % (angle.name, angle.lambda_str), "title": "%s\n%s" % (region_label, pt_region_dict['title']), }
def do_jet_pt_with_var_cuts(histname, cuts, input_filename, output_filename): ROOT.gStyle.SetPalette(palette_1D) total = len(cuts) - 1 + .1 # slight offset to not hit the maximum or minimum # if len(cuts) <= 3: # ROOT.gStyle.SetPalette(ROOT.kCool) # num_colours = ROOT.TColor.GetPalette().fN - 1 # print('num_colours:', num_colours) # for index in range(len(cuts)): # print(num_colours, index, len(cuts), index / len(cuts), num_colours * index / total) # print(index, ROOT.TColor.GetColorPalette(int(num_colours * 1. * index / total))) tf = cu.open_root_file(input_filename) h3d = cu.get_from_tfile(tf, histname) if h3d.GetEntries() == 0: return pt_hists = [] for cut in cuts: max_bin = h3d.GetZaxis().FindFixBin(cut) # print("cut:", cut, "bin:", max_bin) h = h3d.ProjectionY("pt_var_lt_%g" % cut, 0, -1, 0, max_bin, "e") h2 = h.Clone() h2.Rebin(2) if h.GetEntries() > 0: h3 = qgp.hist_divide_bin_width(h2) pt_hists.append(h3) line_styles = [1, 2, 3] if len(cuts) <= 3: line_styles = [1] n_line_styles = len(line_styles) ref_ind = 0 conts = [Contribution(h, label=" < %g" % cut, line_color=cu.get_colour_seq(ind, total), line_style=line_styles[ind % n_line_styles], line_width=2, marker_color=cu.get_colour_seq(ind, total), subplot=pt_hists[ref_ind] if ind != ref_ind else None) for ind, (h, cut) in enumerate(zip(pt_hists, cuts))] jet_str = pt_genjet_str if "_vs_pt_genjet_vs_" in histname else pt_str weight_str = "(unweighted)" if "unweighted" in histname else "(weighted)" ratio_lims = (0.5, 2.5) ratio_lims = (0.5, 1.1) plot = Plot(conts, what='hist', title='%s for cuts on %s %s' % (jet_str, get_var_str(histname), weight_str), xtitle=None, ytitle='N', # xlim=None, ylim=None, legend=True, subplot_type='ratio', subplot_title='* / var < %g' % cuts[ref_ind], subplot_limits=ratio_lims, has_data=False) plot.y_padding_max_log = 200 plot.subplot_maximum_ceil = 4 plot.subplot_maximum_floor = 1.02 plot.subplot_minimum_ceil = 0.98 plot.legend.SetY1(0.7) plot.legend.SetY2(0.89) plot.legend.SetX1(0.78) plot.legend.SetX2(0.88) plot.plot("NOSTACK HISTE", "NOSTACK HIST") plot.set_logx(True, do_more_labels=True) plot.set_logy(True, do_more_labels=False) plot.save(output_filename)
def main(in_args=sys.argv[1:]): parser = argparse.ArgumentParser(description=__doc__, formatter_class=cu.CustomFormatter) parser.add_argument("input", help="input ROOT filename") parser.add_argument("output", help="output ROOT filename") parser.add_argument("--no-genjet-plots", action='store_false', help="Don't do genjet plots for each pt/eta bin") parser.add_argument("--no-correction-fit", action='store_false', help="Don't do fits for correction functions") parser.add_argument("--redo-correction-fit", action='store_true', help="Redo fits for correction functions") parser.add_argument("--inherit-params", action='store_true', help='Use previous eta bins function parameters as starting point. ' 'Helpful when fits not converging.') parser.add_argument("--burr", action='store_true', help='Do Burr type 3 fit for response histograms instead of Gaus') parser.add_argument("--gct", action='store_true', help="Load legacy GCT specifics e.g. fit defaults.") parser.add_argument("--stage1", action='store_true', help="Load stage 1 specifics e.g. fit defaults.") parser.add_argument("--stage2", action='store_true', help="Load stage 2 specifics e.g. fit defaults, pt bins.") parser.add_argument("--central", action='store_true', help="Do central eta bins only (eta <= 3)") parser.add_argument("--forward", action='store_true', help="Do forward eta bins only (eta >= 3)") parser.add_argument("--PUmin", type=float, default=-100, help="Minimum number of PU vertices (refers to *actual* " "number of PU vertices in the event, not the centre " "of of the Poisson distribution)") parser.add_argument("--PUmax", type=float, default=1200, help="Maximum number of PU vertices (refers to *actual* " "number of PU vertices in the event, not the centre " "of of the Poisson distribution)") parser.add_argument("--etaInd", nargs="+", help="list of eta bin INDICES to run over - " "if unspecified will do all. " "This overrides --central/--forward. " "Handy for batch mode. " "IMPORTANT: MUST PUT AT VERY END") args = parser.parse_args(args=in_args) print args if args.stage2: print "Running with Stage2 defaults" elif args.stage1: print "Running with Stage1 defaults" elif args.gct: print "Running with GCT defaults" else: raise RuntimeError("You need to specify defaults: --gct/--stage1/--stage2") # Turn off gen plots if you don't want them - they slow things down, # and don't affect determination of correction fn do_genjet_plots = args.no_genjet_plots if not do_genjet_plots: print "Not producing genjet plots" # Turn off if you don't want to fit to the correction curve # e.g. if you're testing your calibrations, since it'll waste time do_correction_fit = args.no_correction_fit if not do_correction_fit: print "Not fitting correction curves" if args.burr: print 'Using Burr Type3 for response hist fits' # Open input & output files, check print "IN:", args.input print "OUT:", args.output if (args.redo_correction_fit and os.path.realpath(args.input) == os.path.realpath(args.output)): input_file = cu.open_root_file(args.input, "UPDATE") output_file = input_file else: input_file = cu.open_root_file(args.input, "READ") output_file = cu.open_root_file(args.output, "RECREATE") # Figure out which eta bins the user wants to run over etaBins = binning.eta_bins if args.etaInd: args.etaInd.append(int(args.etaInd[-1]) + 1) # need upper eta bin edge etaBins = [etaBins[int(x)] for x in args.etaInd] elif args.central: etaBins = [eta for eta in etaBins if eta < 3.1] elif args.forward: etaBins = [eta for eta in etaBins if eta > 2.9] print "Running over eta bins:", etaBins # Store last set of fit params if the user is doing --inherit-param previous_fit_params = [] # Do plots & fitting to get calib consts for i, (eta_min, eta_max) in enumerate(pairwise(etaBins)): print "Doing eta bin: %g - %g" % (eta_min, eta_max) # whether we're doing a central or forward bin (.01 is for rounding err) forward_bin = eta_max > 3.01 # setup pt bins, wider ones for forward region # ptBins = binning.pt_bins if not forward_bin else binning.pt_bins_wide ptBins = binning.pt_bins_stage2 if not forward_bin else binning.pt_bins_stage2_hf # Load fit function & starting params - important as wrong starting params # can cause fit failures default_params = [] if args.stage2: default_params =STAGE2_DEFAULT_PARAMS_SELECT # this is selected around line 90 elif args.stage1: default_params = STAGE1_DEFAULT_PARAMS elif args.gct: default_params = GCT_DEFAULT_PARAMS # Ignore the genric fit defaults and use the last fit params instead if args.inherit_params and previous_fit_params != []: print "Inheriting params from last fit" default_params = previous_fit_params[:] fitfunc = central_fit_select # this is selected around line 90 set_fit_params(fitfunc, default_params) # Actually do the graph making and/or fitting! if args.redo_correction_fit: fit_params = redo_correction_fit(input_file, output_file, eta_min, eta_max, fitfunc) else: fit_params = make_correction_curves(input_file, output_file, ptBins, eta_min, eta_max, fitfunc, do_genjet_plots, do_correction_fit, args.PUmin, args.PUmax, args.burr) # Save successful fit params if fit_params != []: previous_fit_params = fit_params[:] input_file.Close() output_file.Close() return 0
def draw_horizontal_rsp(max_pt): # lines of constant response line1 = ROOT.TLine(0, 1, max_pt, 1) line1.SetLineStyle(1) line1.SetLineWidth(2) line1.SetLineColor(ROOT.kMagenta) line1.Draw() ROOT.SetOwnership(line1, False) if __name__ == "__main__": # L1 Ntuple file ntuple_filename = '/hdfs/user/ra12451/L1JEC/CMSSW_7_6_0_pre7/L1JetEnergyCorrections/Stage2_Run260627/Express/run260627_expressNoJEC.root' f_ntuple = cu.open_root_file(ntuple_filename) reco_tree = cu.get_from_file(f_ntuple, 'l1JetRecoTree/JetRecoTree') # Matched pairs file pairs_filename = "/hdfs/user/ra12451/L1JEC/CMSSW_7_6_0_pre7/L1JetEnergyCorrections/Stage2_Run260627/pairs/pairs_run260627_expressNoJEC_data_ref10to5000_l10to5000_dr0p4_noCleaning_fixedEF_CSC_HLTvars.root" f_pairs = cu.open_root_file(pairs_filename) pairs_tree = cu.get_from_file(f_pairs, 'valid') plot_dir = '/users/ra12451/L1JEC/CMSSW_7_6_0_pre7/src/L1Trigger/L1JetEnergyCorrections/Run260627/pfCleaning/' if not os.path.isdir(plot_dir): os.makedirs(plot_dir) eta_cut = 'TMath::Abs(eta) < 0.348' etaRef_cut = 'TMath::Abs(etaRef) < 0.348' LS_cut = ("((LS > 91 && LS < 611) || (LS > 613 && LS < 757) || (LS > 760 && LS < 788) || (LS > 791 && LS < 1051) || (LS > 1054 && LS < 1530) || (LS > 1533 && LS < 1845))")
def main(in_args=sys.argv[1:]): print in_args parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("input", help="input ROOT filename") parser.add_argument("output", help="output ROOT filename") parser.add_argument("--incl", action="store_true", help="Do inclusive eta plots") parser.add_argument("--excl", action="store_true", help="Do exclusive eta plots") parser.add_argument("--central", action='store_true', help="Do central eta bins only (eta <= 3)") parser.add_argument("--forward", action='store_true', help="Do forward eta bins only (eta >= 3)") parser.add_argument("--etaInd", nargs="+", help="list of eta bin INDICES to run over - " "if unspecified will do all. " "This overrides --central/--forward. " "Handy for batch mode. " "IMPORTANT: MUST PUT AT VERY END") parser.add_argument("--maxPt", default=500, type=float, help="Maximum pT for L1 Jets") parser.add_argument("--PUmin", default=-99, type=float, help="Minimum number of PU vertices (refers to *actual* " "number of PU vertices in the event, not the centre " "of of the distribution)") parser.add_argument("--PUmax", default=999, type=float, help="Maximum number of PU vertices (refers to *actual* " "number of PU vertices in the event, not the centre " "of of the distribution)") args = parser.parse_args(args=in_args) # Open input & output files, check input_file = cu.open_root_file(args.input, "READ") output_file = cu.open_root_file(args.output, "RECREATE") print "IN:", args.input print "OUT:", args.output if not input_file or not output_file: raise Exception("Input or output files cannot be opened") etaBins = binning.eta_bins if args.etaInd: args.etaInd.append(int(args.etaInd[-1]) + 1) # need upper eta bin edge # check eta bins are ok etaBins = [etaBins[int(x)] for x in args.etaInd] elif args.central: etaBins = binning.eta_bins_central elif args.forward: etaBins = binning.eta_bins_forward print "Running over eta bins:", etaBins ptBins = binning.pt_bins ptBins = binning.pt_bins_stage2 # Do plots for each eta bin if args.excl: for i, eta in enumerate(etaBins[:-1]): eta_min = eta eta_max = etaBins[i + 1] plot_checks(input_file, output_file, eta_min, eta_max, args.maxPt, args.PUmin, args.PUmax) # Do a response vs pt graph plot_rsp_pt(input_file, output_file, eta_min, eta_max, ptBins, "pt", args.maxPt, args.PUmin, args.PUmax) plot_rsp_pt(input_file, output_file, eta_min, eta_max, ptBins, "ptRef", args.maxPt, args.PUmin, args.PUmax) # Do an inclusive plot for all eta bins if args.incl and len(etaBins) > 2: plot_checks(input_file, output_file, etaBins[0], etaBins[-1], args.maxPt, args.PUmin, args.PUmax) # Do a response vs pt graph # ptBins_wide = list(np.arange(10, 250, 8)) plot_rsp_pt(input_file, output_file, etaBins[0], etaBins[-1], ptBins, "pt", args.maxPt, args.PUmin, args.PUmax) plot_rsp_pt(input_file, output_file, etaBins[0], etaBins[-1], ptBins, "ptRef", args.maxPt, args.PUmin, args.PUmax) # Do a response vs eta graph, inclusive over all pt plot_rsp_eta(input_file, output_file, etaBins, 0, 1000, 'pt', args.PUmin, args.PUmax) # Sub-binned by pt for pt_min, pt_max in binning.check_pt_bins: plot_rsp_eta(input_file, output_file, etaBins, pt_min, pt_max, 'pt', args.PUmin, args.PUmax) plot_rsp_eta(input_file, output_file, etaBins, pt_min, pt_max, 'ptRef', args.PUmin, args.PUmax) input_file.Close() output_file.Close() return 0
def do_projection_plots(in_file, plot_dir, do_fit=True, skip_dirs=None): hist_name = "pt_jet_response" tfile = cu.open_root_file(in_file) dirs = cu.get_list_of_element_names(tfile) for mydir in dirs: if skip_dirs and mydir in skip_dirs: continue if hist_name not in cu.get_list_of_element_names(tfile.Get(mydir)): continue print("Doing", mydir) h2d = cu.grab_obj_from_file(in_file, "%s/%s" % (mydir, hist_name)) ax = h2d.GetXaxis() bin_edges = [ax.GetBinLowEdge(i) for i in range(1, ax.GetNbins() + 2)] bin_centers, sigmas, sigmas_unc = [], [], [] for pt_min, pt_max in zip(bin_edges[:-1], bin_edges[1:]): obj = qgg.get_projection_plot(h2d, pt_min, pt_max, cut_axis='x') if obj.GetEffectiveEntries() < 20: continue # obj.Rebin(rebin) obj.Scale(1. / obj.Integral()) label = "%s < p_{T}^{Gen} < %s GeV" % (str(pt_min), str(pt_max)) if do_fit: do_gaus_fit(obj) fit = obj.GetFunction("gausFit") label += "\n" label += fit_results_to_str(fit) # bin_centers.append(fit.GetParameter(1)) bin_centers.append(0.5 * (pt_max + pt_min)) sigmas.append(fit.GetParameter(2)) sigmas_unc.append(fit.GetParError(2)) # output_filename = os.path.join(plot_dir, "%s_%s_ptGen%sto%s.%s" % (mydir, hist_name, str(pt_min), str(pt_max), OUTPUT_FMT)) # cont = Contribution(obj, label=label) # delta = pt_max - pt_min # # xlim = (pt_min - 10*delta, pt_max + 10*delta) # xlim = (obj.GetMean()-3*obj.GetRMS(), obj.GetMean()+3*obj.GetRMS()) # ylim = (0, obj.GetMaximum()*1.1) # plot = Plot([cont], what='hist', # xtitle="p_{T}^{Reco} [GeV]", xlim=xlim, ylim=ylim) # plot.plot() # don't use histe as it wont draw the fit # plot.save(output_filename) gr = ROOT.TGraphErrors(len(bin_centers), array('d', bin_centers), array('d', sigmas), array('d', [0] * len(bin_centers)), array('d', sigmas_unc)) factor = 0.2 gr_ideal = ROOT.TGraphErrors( len(bin_centers), array('d', bin_centers), array('d', [factor * pt for pt in bin_centers]), array('d', [0] * len(bin_centers)), array('d', [0] * len(bin_centers))) gr_cont = Contribution(gr, label='Measured') gr_ideal_cont = Contribution(gr_ideal, label=str(factor) + '*p_{T}', line_color=ROOT.kBlue, marker_color=ROOT.kBlue) plot = Plot([gr_cont, gr_ideal_cont], what='graph', xtitle="p_{T}^{Reco}", ytitle="#sigma [GeV]", ylim=[0, 100], xlim=[10, 4000]) plot.plot() plot.set_logx() output_filename = os.path.join( plot_dir, "%s_%s_sigma_plot.%s" % (mydir, hist_name, OUTPUT_FMT)) plot.save(output_filename)
def main(in_args=sys.argv[1:]): print in_args parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--pairs", help="input ROOT file with matched pairs from RunMatcher") parser.add_argument("--res", help="input ROOT file with resolution plots from makeResolutionPlots.py") parser.add_argument("--checkcal", help="input ROOT file with calibration check plots from checkCalibration.py") parser.add_argument("--calib", help="input ROOT file from output of runCalibration.py") parser.add_argument("--oDir", help="Directory to save plots. Default is in same location as ROOT file.") parser.add_argument("--detail", help="Plot all the individual component hists for each eta bin. There are a lot!", action='store_true') parser.add_argument("--format", help="Format for plots (PDF, png, etc). Note that 2D correlation plots will " "always be PNGs to avoid large files.", default="pdf") parser.add_argument("--title", help="Title for plots.") parser.add_argument('--zip', help="Zip filename for zipping up all plots. Don't include extension") # FIXME # parser.add_argument("--etaInd", # help="list of eta bin index/indices to run over") parser.add_argument("--gifs", help="Make GIFs (only applicable if --detail is also used)", action='store_true') parser.add_argument("--gifexe", help='Convert executable to use. Default is result of `which convert`') args = parser.parse_args(args=in_args) print args if args.detail: print "Warning: producing all component hists. This could take a while..." if args.gifs: if args.detail: print "Making animated graphs from fit plots." else: print "To use the --gifs flag, you also need --detail" if not args.gifexe: args.gifexe = find_executable('convert') if not args.gifexe: print 'Cannot find convert exe, not making gifs' args.gif = False else: print 'Using %s to make GIFs' % args.gifexe # customise titles # note the use of global keyword if args.title: global plot_title plot_title = args.title if args.oDir == os.getcwd(): print "Warning: plots will be made in $PWD!" # auto determine output directory if not args.oDir: filename, stem = '', '' if args.pairs: filename, stem = args.pairs, 'pairs_' elif args.checkcal: filename, stem = args.checkcal, 'check_' elif args.res: filename, stem = args.res, 'res_' elif args.calib: filename, stem = args.calib, 'output_' new_dir = os.path.basename(filename).replace(".root", '').replace(stem, 'showoff_') args.oDir = os.path.join(os.path.dirname(os.path.abspath(filename)), new_dir) cu.check_dir_exists_create(args.oDir) print "Output directory:", args.oDir # Choose eta ptBins = binning.pt_bins_stage2 # Do plots with output from RunMatcher # ------------------------------------------------------------------------ if args.pairs: pairs_file = cu.open_root_file(args.pairs) pairs_tree = cu.get_from_file(pairs_file, "valid") # eta binned for emin, emax in pairwise(binning.eta_bins): plot_dR(pairs_tree, eta_min=emin, eta_max=emax, cut="1", oDir=args.oDir) plot_pt_both(pairs_tree, eta_min=emin, eta_max=emax, cut="1", oDir=args.oDir) # plot_dR(pairs_tree, eta_min=0, eta_max=5, cut="1", oDir=args.oDir) # all eta # plot_pt_both(pairs_tree, eta_min=0, eta_max=5, cut="1", oDir=args.oDir) # all eta plot_eta_both(pairs_tree, oDir=args.oDir) # all eta plot_dR(pairs_tree, eta_min=0, eta_max=3, cut="1", oDir=args.oDir) # central plot_pt_both(pairs_tree, eta_min=0, eta_max=3, cut="1", oDir=args.oDir) # central plot_dR(pairs_tree, eta_min=3, eta_max=5, cut="1", oDir=args.oDir) # forward plot_pt_both(pairs_tree, eta_min=3, eta_max=5, cut="1", oDir=args.oDir) # forward pairs_file.Close() # Do plots with output from makeResolutionPlots.py # ------------------------------------------------------------------------ if args.res: res_file = cu.open_root_file(args.res) # exclusive eta graphs for eta_min, eta_max in pairwise(binning.eta_bins): print eta_min, eta_max plot_res_all_pt(res_file, eta_min, eta_max, args.oDir, args.format) if args.detail: list_dir = os.path.join(args.oDir, 'eta_%g_%g' % (eta_min, eta_max)) cu.check_dir_exists_create(list_dir) pt_diff_filenames = [] ptBins = binning.pt_bins_stage2_8 if eta_min < 2.9 else binning.pt_bins_stage2_8_wide for pt_min, pt_max in pairwise(ptBins): pt_diff_fname = plot_pt_diff(res_file, eta_min, eta_max, pt_min, pt_max, args.oDir, 'png') pt_diff_filenames.append(pt_diff_fname) pt_diff_filenames_file = os.path.join(list_dir, 'list_pt_diff.txt') write_filelist(pt_diff_filenames, pt_diff_filenames_file) if args.gifs: make_gif(pt_diff_filenames_file, pt_diff_filenames_file.replace('.txt', '.gif'), args.gifexe) # inclusive eta graphs for (eta_min, eta_max) in [[0, 3], [3, 5]]: print eta_min, eta_max plot_res_all_pt(res_file, eta_min, eta_max, args.oDir, args.format) plot_ptDiff_Vs_pt(res_file, eta_min, eta_max, args.oDir, args.format) if args.detail: list_dir = os.path.join(args.oDir, 'eta_%g_%g' % (eta_min, eta_max)) cu.check_dir_exists_create(list_dir) pt_diff_filenames = [] ptBins = binning.pt_bins_stage2_8 if eta_min < 2.9 else binning.pt_bins_stage2_8_wide for pt_min, pt_max in pairwise(ptBins): pt_diff_fname = plot_pt_diff(res_file, eta_min, eta_max, pt_min, pt_max, args.oDir, 'png') pt_diff_filenames.append(pt_diff_fname) pt_diff_filenames_file = os.path.join(list_dir, 'list_pt_diff.txt') write_filelist(pt_diff_filenames, pt_diff_filenames_file) if args.gifs: make_gif(pt_diff_filenames_file, pt_diff_filenames_file.replace('.txt', '.gif'), args.gifexe) # plot_eta_pt_rsp_2d(res_file, binning.eta_bins, binning.pt_bins[4:], args.oDir, args.format) # components of these: # if args.detail: # ptBins = binning.pt_bins_stage2_8 if not forward_bin else binning.pt_bins_stage2_8_wide # for pt_min, pt_max in pairwise(ptBins): # plot_pt_diff(res_file, 0, 3, pt_min, pt_max, args.oDir, args.format) # plot_pt_diff(res_file, 0, 5, pt_min, pt_max, args.oDir, args.format) # plot_pt_diff(res_file, 3, 5, pt_min, pt_max, args.oDir, args.format) res_file.Close() # Do plots with output from checkCalibration.py # ------------------------------------------------------------------------ if args.checkcal: etaBins = binning.eta_bins check_file = cu.open_root_file(args.checkcal) # ptBinsWide = list(np.arange(10, 250, 8)) # indiviudal eta bins for eta_min, eta_max in pairwise(etaBins): for (normX, logZ) in product([True, False], [True, False]): plot_l1_Vs_ref(check_file, eta_min, eta_max, logZ, args.oDir, 'png') plot_rsp_Vs_l1(check_file, eta_min, eta_max, normX, logZ, args.oDir, 'png') plot_rsp_Vs_ref(check_file, eta_min, eta_max, normX, logZ, args.oDir, 'png') plot_rsp_Vs_pt_candle_violin(check_file, eta_min, eta_max, "l1", args.oDir, 'png') plot_rsp_Vs_pt_candle_violin(check_file, eta_min, eta_max, "gen", args.oDir, 'png') if args.detail: list_dir = os.path.join(args.oDir, 'eta_%g_%g' % (eta_min, eta_max)) cu.check_dir_exists_create(list_dir) # print individual histograms, and make a list suitable for imagemagick to turn into a GIF pt_plot_filenames = plot_rsp_pt_hists(check_file, eta_min, eta_max, ptBins, "pt", args.oDir, 'png') pt_plot_filenames_file = os.path.join(list_dir, 'list_pt.txt') write_filelist(pt_plot_filenames, pt_plot_filenames_file) # print individual histograms, and make a list suitable for imagemagick to turn into a GIF ptRef_plot_filenames = plot_rsp_pt_hists(check_file, eta_min, eta_max, ptBins, "ptRef", args.oDir, 'png') ptRef_plot_filenames_file = os.path.join(list_dir, 'list_ptRef.txt') write_filelist(ptRef_plot_filenames, ptRef_plot_filenames_file) # make dem GIFs if args.gifs: for inf in [pt_plot_filenames_file, ptRef_plot_filenames_file]: make_gif(inf, inf.replace('.txt', '.gif'), args.gifexe) # Graph of response vs pt, but in bins of eta x_range = [0, 150] # for zoomed-in low pt x_range = None plot_rsp_pt_binned_graph(check_file, etaBins, "pt", args.oDir, args.format, x_range=x_range) plot_rsp_pt_binned_graph(check_file, etaBins, "ptRef", args.oDir, args.format, x_range=x_range) all_rsp_pt_plot_filenames = [] all_rsp_ptRef_plot_filenames = [] # Loop over central/forward eta, do 2D plots, and graphs, and component hists for (eta_min, eta_max) in [[0, 3], [3, 5]]: print eta_min, eta_max for (normX, logZ) in product([True, False], [True, False]): plot_l1_Vs_ref(check_file, eta_min, eta_max, logZ, args.oDir, 'png') plot_rsp_Vs_l1(check_file, eta_min, eta_max, normX, logZ, args.oDir, 'png') plot_rsp_Vs_ref(check_file, eta_min, eta_max, normX, logZ, args.oDir, 'png') if args.detail: plot_rsp_pt_hists(check_file, eta_min, eta_max, ptBins, "pt", args.oDir, 'png') plot_rsp_pt_hists(check_file, eta_min, eta_max, ptBins, "ptRef", args.oDir, 'png') # graphs plot_rsp_eta_exclusive_graph(check_file, eta_min, eta_max, binning.check_pt_bins, 'pt', args.oDir, args.format) plot_rsp_eta_exclusive_graph(check_file, eta_min, eta_max, binning.check_pt_bins, 'ptRef', args.oDir, args.format) plot_rsp_pt_graph(check_file, eta_min, eta_max, args.oDir, args.format, x_range) plot_rsp_ptRef_graph(check_file, eta_min, eta_max, args.oDir, args.format, x_range) for etamin, etamax in pairwise(etaBins): if etamin < eta_min or etamax > eta_max: continue print etamin, etamax this_rsp_pt_plot_filenames = [] this_rsp_ptRef_plot_filenames = [] # component hists/fits for the eta graphs, binned by pt for pt_min, pt_max in binning.check_pt_bins: pt_filename = plot_rsp_eta_bin_pt(check_file, etamin, etamax, 'pt', pt_min, pt_max, args.oDir, 'png') this_rsp_pt_plot_filenames.append(pt_filename) ptRef_filename = plot_rsp_eta_bin_pt(check_file, etamin, etamax, 'ptRef', pt_min, pt_max, args.oDir, 'png') this_rsp_ptRef_plot_filenames.append(ptRef_filename) pt_list_file = os.path.join(args.oDir, 'list_pt_eta_%g_%g.txt' % (etamin, etamax)) write_filelist(this_rsp_pt_plot_filenames, pt_list_file) if args.gifs: make_gif(pt_list_file, pt_list_file.replace('.txt', '.gif'), args.gifexe) ptRef_list_file = os.path.join(args.oDir, 'list_ptRef_eta_%g_%g.txt' % (etamin, etamax)) write_filelist(this_rsp_ptRef_plot_filenames, ptRef_list_file) if args.gifs: make_gif(ptRef_list_file, ptRef_list_file.replace('.txt', '.gif'), args.gifexe) all_rsp_pt_plot_filenames.extend(this_rsp_pt_plot_filenames) all_rsp_ptRef_plot_filenames.extend(this_rsp_ptRef_plot_filenames) pt_list_file = os.path.join(args.oDir, 'list_pt_eta_%g_%g.txt' % (etaBins[0], etaBins[-1])) write_filelist(all_rsp_pt_plot_filenames, pt_list_file) if args.gifs: make_gif(pt_list_file, pt_list_file.replace('.txt', '.gif'), args.gifexe) ptRef_list_file = os.path.join(args.oDir, 'list_ptRef_eta_%g_%g.txt' % (etaBins[0], etaBins[-1])) write_filelist(all_rsp_ptRef_plot_filenames, ptRef_list_file) if args.gifs: make_gif(ptRef_list_file, ptRef_list_file.replace('.txt', '.gif'), args.gifexe) check_file.Close() # Do plots with output from runCalibration.py # ------------------------------------------------------------------------ if args.calib: calib_file = cu.open_root_file(args.calib) for eta_min, eta_max in pairwise(binning.eta_bins[:-1]): print eta_min, eta_max # 2D correlation heat maps for (normX, logZ) in product([True, False], [True, False]): plot_rsp_Vs_ref(calib_file, eta_min, eta_max, normX, logZ, args.oDir, 'png') plot_rsp_Vs_l1(calib_file, eta_min, eta_max, normX, logZ, args.oDir, 'png') # individual fit histograms for each pt bin if args.detail: list_dir = os.path.join(args.oDir, 'eta_%g_%g' % (eta_min, eta_max)) cu.check_dir_exists_create(list_dir) if eta_min > 2.9: ptBins = binning.pt_bins_stage2_hf rsp_plot_filenames = [] pt_plot_filenames = [] for pt_min, pt_max in pairwise(ptBins): rsp_name = plot_rsp_eta_pt_bin(calib_file, eta_min, eta_max, pt_min, pt_max, args.oDir, 'png') rsp_plot_filenames.append(rsp_name) pt_name = plot_pt_bin(calib_file, eta_min, eta_max, pt_min, pt_max, args.oDir, 'png') pt_plot_filenames.append(pt_name) # print individual histograms, and make a list suitable for imagemagick to turn into a GIF rsp_plot_filenames_file = os.path.join(list_dir, 'list_rsp.txt') write_filelist(rsp_plot_filenames, rsp_plot_filenames_file) # print individual histograms, and make a list suitable for imagemagick to turn into a GIF pt_plot_filenames_file = os.path.join(list_dir, 'list_pt.txt') write_filelist(pt_plot_filenames, pt_plot_filenames_file) # make dem gifs if args.gifs: for inf in [pt_plot_filenames_file, rsp_plot_filenames_file]: make_gif(inf, inf.replace('.txt', '.gif'), args.gifexe) else: print "To make animated gif from PNGs using a plot list:" print "convert -dispose Background -delay 50 -loop 0 @%s "\ "pt_eta_%g_%g.gif" % (pt_plot_filenames_file, eta_min, eta_max) # the correction curve graph plot_correction_graph(calib_file, eta_min, eta_max, args.oDir, args.format) calib_file.Close() if args.zip: print 'Zipping up files' zip_filename = os.path.basename(args.zip.split('.')[0]) make_archive(zip_filename, 'gztar', args.oDir)