def plot_correlation(self): # Make correlations between estimators log.info("Correlating N_ch of each estimator") corr_dir = self.results_post.GetPath().split(":")[1] + '/correlations' try: self.f.mkdir(corr_dir, recurse=True) except: pass # Take ntuple from the first estimator and then add friends to this one nt0 = self.sums[0].FindObject("fEventTuple") nt0.SetAlias(self.sums[0].GetName(), "fEventTuple") # build ntuple for est_dir in self.sums[1:]: nt0.AddFriend(est_dir.FindObject("fEventTuple"), est_dir.GetName()) for ref_est in self.considered_ests: for est_dir in self.sums: log.info("Correlating {0} with {1}".format(ref_est, est_dir.GetName())) corr_hist = Hist2D(400, 0, 400, 400, 0, 400, name="corr_hist_{0}_vs_{1}".format(ref_est, est_dir.GetName())) # Lables are deliberatly swaped, see Projection below! corr_hist.title = ("Correlation N_{{ch}} in {0} and {1};N_{{ch}} {1};N_{{ch}} {0}" .format(ref_est, est_dir.GetName())) # this projects onto y:x, to make coding more adventurous nt0.Project(corr_hist.name, "{0}.nch:{1}.nch".format(ref_est, est_dir.GetName()), "ev_weight") corr_hist.drawstyle = 'colz' self.f.cd(corr_dir) corr_hist.write()
def plot_correlation(self): # Make correlations between estimators log.info("Correlating N_ch of each estimator") corr_dir = self.results_post.GetPath().split(":")[1] + '/correlations' try: self.f_out.mkdir(corr_dir, recurse=True) except: pass # Take ntuple from the first estimator and then add friends to this one nt0 = self.sums[0].FindObject("fEventTuple") nt0.SetAlias(self.sums[0].GetName(), "fEventTuple") # build ntuple for est_dir in self.sums[1:]: nt0.AddFriend(est_dir.FindObject("fEventTuple"), est_dir.GetName()) for ref_est in self.considered_ests: for est_dir in self.sums: log.info("Correlating {0} with {1}".format(ref_est, est_dir.GetName())) corr_hist = Hist2D(400, 0, 400, 400, 0, 400, name="corr_hist_{0}_vs_{1}".format(ref_est, est_dir.GetName())) # Lables are deliberatly swaped, see Projection below! corr_hist.title = ("Correlation N_{{ch}} in {0} and {1};N_{{ch}} {1};N_{{ch}} {0}" .format(ref_est, est_dir.GetName())) # this projects onto y:x, to make coding more adventurous nt0.Project(corr_hist.name, "{0}.nch:{1}.nch".format(ref_est, est_dir.GetName()), "ev_weight") corr_hist.drawstyle = 'colz' self.f_out.cd(corr_dir) corr_hist.write()
def __iter__(self): passed_events = 0 entries = 0 total_entries = float(self._tree.GetEntries()) t2 = self._init_time for i in xrange(self._tree.GetEntries()): entries += 1 self._tree.GetEntry(i) for name, (coll_name, mix, decorate_func) in self._collections.items(): coll = xAODTreeCollection(self._tree, name, coll_name, mix=mix, decorate_func=decorate_func) object.__setattr__(self._tree, name, coll) if self._filters(self._tree): yield self._tree passed_events += 1 if self._events == passed_events: break if time.time() - t2 > 2: entry_rate = int(entries / (time.time() - self._init_time)) log.info("{0:d} entries per second. " "{1:.0f}% done current tree".format( entry_rate, 100 * entries / total_entries)) t2 = time.time() self._filters.finalize() self._store.clear()
def __iter__(self): passed_events = 0 entries = 0 total_entries = float(self._tree.GetEntries()) t2 = self._init_time for i in xrange(self._tree.GetEntries()): entries += 1 self._tree.GetEntry(i) for name, (coll_name, mix, decorate_func) in self._collections.items(): coll = xAODTreeCollection( self._tree, name, coll_name, mix=mix, decorate_func=decorate_func) object.__setattr__(self._tree, name, coll) if self._filters(self._tree): yield self._tree passed_events +=1 if self._events == passed_events: break if time.time() - t2 > 2: entry_rate = int(entries / (time.time() - self._init_time)) log.info( "{0:d} entries per second. " "{1:.0f}% done current tree".format( entry_rate, 100 * entries / total_entries)) t2 = time.time() self._filters.finalize() self._store.clear()
def plot_event_counters(self): log.info("Creating event counters") for est_dir in get_est_dirs(self.sums, self.considered_ests): results_est_dir = self.results_post.__getattr__(est_dir.GetName()) # Nasty, but just use a reference estimator here... corr = get_correlation_histogram(self.sums, est_dir.GetName(), "EtaLt05") counter = asrootpy(corr.ProjectionX()) counter.name = "event_counter" path = results_est_dir.GetPath().split(":")[1] # file.root:/internal/root/path self.f_out.cd(path) results_est_dir.WriteTObject(counter)
def plot_dNdpT(self, pid_selection): """ Plot dNdpT particles in pid_selection Parameters ---------- pid_selection : str Either all charged particles ('ch') or 'pi', 'K' or 'p' """ log.info("1/N_evts dN_ch/dpT plots") figs = [] for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests), get_est_dirs(self.results_post, self.considered_ests)): if sums_est_dir.GetName() != res_est_dir.GetName(): raise IndexError("Order of estimator dirs is different in sums and results_post") res_dir_str = res_est_dir.GetPath().split(":")[1] fig = Figure() fig.plot.palette = 'colorblind' # fig.plot.ncolors = 5 fig.legend.position = 'tr' fig.ytitle = "1/N_{evts} dN/dp_{T} (" + make_estimator_title(sums_est_dir.GetName()) + ")" fig.xtitle = "p_{T} (GeV)" fig.plot.logy = True hists = [] if pid_selection == 'ch': fig.legend.title = "#pi^{#pm}, K^{#pm}, p, #Lambda, #Xi, #Omega" pid_numbers = [kPIMINUS, kPIPLUS, kKMINUS, kKPLUS, kPROTON, kANTIPROTON, kLAMBDA, kANTILAMBDA, kXI, kANTIXI, kOMEGAMINUS, kOMEGAPLUS] if pid_selection == 'pi': fig.legend.title = "#pi^{#pm}" pid_numbers = [kPIMINUS, kPIPLUS] if pid_selection == 'K': fig.legend.title = "K^{#pm}" pid_numbers = [kKMINUS, kKPLUS] if pid_selection == 'p': fig.legend.title = "p, #bar{p}" pid_numbers = [kPROTON, kANTIPROTON] for perc_bin, classifier_bin in zip(self.perc_bins[sums_est_dir.GetName()], self.nch_edges[sums_est_dir.GetName()]): hists.append(get_pT_distribution(res_est_dir, pid_numbers, classifier_bin, normalized=False)) hists[-1].title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) # add MB last to be consistent with colors in other plots; the very first and very last bin we look at classifier_bin_mb = (self.nch_edges[sums_est_dir.GetName()][0][0], self.nch_edges[sums_est_dir.GetName()][-1][-1]) hists.append(get_pT_distribution(res_est_dir, pid_numbers, classifier_bin_mb, normalized=False)) hists[-1].title = "MB" # scale by bin width [h.Scale(1, "width") for h in hists] [fig.add_plottable(p, p.title) for p in hists] fig.save_to_root_file(self.f_out, "dN{0}dpT".format(pid_selection), res_dir_str) figs.append(fig) return figs
def plot_event_counters(self): log.info("Creating event counters") for est_dir in get_est_dirs(self.sums, self.considered_ests): results_est_dir = self.results_post.__getattr__(est_dir.GetName()) # Nasty, but just use a reference estimator here... corr = get_correlation_histogram(self.sums, est_dir.GetName(), "EtaLt05") counter = asrootpy(corr.ProjectionX()) counter.name = "event_counter" path = results_est_dir.GetPath().split(":")[1] # file.root:/internal/root/path self.f.cd(path) results_est_dir.WriteTObject(counter)
def plot_dNdpT(self, pid_selection): """ Plot dNdpT particles in pid_selection Parameters ---------- pid_selection : str Either all charged particles ('ch') or 'pi', 'K' or 'p' """ log.info("1/N_evts dN_ch/dpT plots") figs = [] for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests), get_est_dirs(self.results_post, self.considered_ests)): if sums_est_dir.GetName() != res_est_dir.GetName(): raise IndexError("Order of estimator dirs is different in sums and results_post") res_dir_str = res_est_dir.GetPath().split(":")[1] fig = Figure() fig.plot.palette = 'colorblind' # fig.plot.ncolors = 5 fig.legend.position = 'tr' fig.ytitle = "1/N_{evts} dN/dp_{T} (" + make_estimator_title(sums_est_dir.GetName()) + ")" fig.xtitle = "p_{T} (GeV)" fig.plot.logy = True hists = [] if pid_selection == 'ch': fig.legend.title = "#pi^{#pm}, K^{#pm}, p, #Lambda, #Xi, #Omega" pid_numbers = [kPIMINUS, kPIPLUS, kKMINUS, kKPLUS, kPROTON, kANTIPROTON, kLAMBDA, kANTILAMBDA, kXI, kANTIXI, kOMEGAMINUS, kOMEGAPLUS] if pid_selection == 'pi': fig.legend.title = "#pi^{#pm}" pid_numbers = [kPIMINUS, kPIPLUS] if pid_selection == 'K': fig.legend.title = "K^{#pm}" pid_numbers = [kKMINUS, kKPLUS] if pid_selection == 'p': fig.legend.title = "p, #bar{p}" pid_numbers = [kPROTON, kANTIPROTON] for perc_bin, classifier_bin in zip(self.perc_bins[sums_est_dir.GetName()], self.nch_edges[sums_est_dir.GetName()]): hists.append(get_pT_distribution(res_est_dir, pid_numbers, classifier_bin, normalized=False)) hists[-1].title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) # add MB last to be consistent with colors in other plots; the very first and very last bin we look at classifier_bin_mb = (self.nch_edges[sums_est_dir.GetName()][0][0], self.nch_edges[sums_est_dir.GetName()][-1][-1]) hists.append(get_pT_distribution(res_est_dir, pid_numbers, classifier_bin_mb, normalized=False)) hists[-1].title = "MB" # scale by bin width [h.Scale(1, "width") for h in hists] [fig.add_plottable(p, p.title) for p in hists] fig.save_to_root_file(self.f, "dN{0}dpT".format(pid_selection), res_dir_str) figs.append(fig) return figs
def __init__(self, chain, filters=None, events=-1): self._chain = chain self._tree = ROOT.xAOD.MakeTransientTree(self._chain) # Create the TStore that hold the shallow copies self._store = ROOT.xAOD.TStore() log.info(self._tree) self._collections = {} self._events = events self._init_time = time.time() if filters is None: self._filters = EventFilterList([]) else: self._filters = filters
def plot_meanpt_vs_ref_mult_for_pids(self): log.info("Creating mean pT plots") figs = [] for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests), get_est_dirs(self.results_post, self.considered_ests)): if sums_est_dir.GetName() != res_est_dir.GetName(): raise IndexError("Order of estimator dirs is different in sums and results_post") res_dir_str = res_est_dir.GetPath().split(":")[1] corr_hist = get_correlation_histogram(self.sums, sums_est_dir.GetName(), "EtaLt05") # Get the <pT> per classifier bin; then, re-map the classifier value to the reference classifier (eg EtaLt05) # This might not make a lot of sense, actually. Maybe it would be much more telling if I were to # put the percentile bins on the x-axis? As in the highest 1% of that classifier has a <pT> of ... graphs = [] graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kPI0, kPIMINUS, kPIPLUS]), corr_hist)) graphs[-1].title = "#pi" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kKMINUS, kKPLUS]), corr_hist)) graphs[-1].title = "K^{#pm}" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kPROTON, kANTIPROTON]), corr_hist)) graphs[-1].title = "p" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kK0S]), corr_hist)) graphs[-1].title = "K^{0}_{S}" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kLAMBDA, kANTILAMBDA]), corr_hist)) graphs[-1].title = "#Lambda" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kXI, kANTIXI]), corr_hist)) graphs[-1].title = "#Xi" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kOMEGAMINUS, kOMEGAPLUS]), corr_hist)) graphs[-1].title = "#Omega" # sanitize graphs: for g in graphs: remove_zero_value_points(g) remove_points_with_x_err_gt_1NchRef(g) remove_points_with_equal_x(g) fig = Figure() fig.plot.palette = 'root' fig.plot.ncolors = 7 fig.plot.xmin = 0 fig.plot.xmax = 40 fig.plot.ymin = 0.3 fig.plot.ymax = 2.1 fig.ytitle = "<p_{T}>" fig.xtitle = "N_{ch}|_{|#eta|<0.5}" fig.legend.title = make_estimator_title(sums_est_dir.GetName()) [fig.add_plottable(g, g.title) for g in graphs] fig.save_to_root_file(self.f, "mean_pt", res_dir_str) figs.append(fig) return figs
def plot_meanpt_vs_ref_mult_for_pids(self): log.info("Creating mean pT plots") figs = [] for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests), get_est_dirs(self.results_post, self.considered_ests)): if sums_est_dir.GetName() != res_est_dir.GetName(): raise IndexError("Order of estimator dirs is different in sums and results_post") res_dir_str = res_est_dir.GetPath().split(":")[1] corr_hist = get_correlation_histogram(self.sums, sums_est_dir.GetName(), "EtaLt05") # Get the <pT> per classifier bin; then, re-map the classifier value to the reference classifier (eg EtaLt05) # This might not make a lot of sense, actually. Maybe it would be much more telling if I were to # put the percentile bins on the x-axis? As in the highest 1% of that classifier has a <pT> of ... graphs = [] graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kPI0, kPIMINUS, kPIPLUS]), corr_hist)) graphs[-1].title = "#pi" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kKMINUS, kKPLUS]), corr_hist)) graphs[-1].title = "K^{#pm}" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kPROTON, kANTIPROTON]), corr_hist)) graphs[-1].title = "p" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kK0S]), corr_hist)) graphs[-1].title = "K^{0}_{S}" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kLAMBDA, kANTILAMBDA]), corr_hist)) graphs[-1].title = "#Lambda" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kXI, kANTIXI]), corr_hist)) graphs[-1].title = "#Xi" graphs.append(remap_x_values(get_meanpt_vs_estmult(res_est_dir, [kOMEGAMINUS, kOMEGAPLUS]), corr_hist)) graphs[-1].title = "#Omega" # sanitize graphs: for g in graphs: remove_zero_value_points(g) remove_points_with_x_err_gt_1NchRef(g) remove_points_with_equal_x(g) fig = Figure() fig.plot.palette = 'root' fig.plot.ncolors = 7 fig.plot.xmin = 0 fig.plot.xmax = 40 fig.plot.ymin = 0.3 fig.plot.ymax = 2.1 fig.ytitle = "<p_{T}>" fig.xtitle = "N_{ch}|_{|#eta|<0.5}" fig.legend.title = make_estimator_title(sums_est_dir.GetName()) [fig.add_plottable(g, g.title) for g in graphs] fig.save_to_root_file(self.f_out, "mean_pt", res_dir_str) figs.append(fig) return figs
def plot_nMPI_vs_Nch(self): log.info("Creating nMPI(Nch) summary plot") summary_fig = Figure() summary_fig.xtitle = "N_{ch}^{est}" summary_fig.ytitle = "<N_{MPI}>" summary_fig.plot.palette = 'root' summary_fig.legend.position = 'br' summary_fig.plot.logy = True summary_fig.plot.ymin = 1 for est_dir in get_est_dirs(self.sums, self.considered_ests): h_tmp = asrootpy(get_correlation_histogram(self.sums, est_dir.GetName(), "nMPI").ProfileX()) summary_fig.add_plottable(h_tmp, make_estimator_title(est_dir.GetName())) path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path summary_fig.save_to_root_file(self.f_out, "nMPI_summary", path=path) return [summary_fig]
def plot_nMPI_vs_Nch(self): log.info("Creating nMPI(Nch) summary plot") summary_fig = Figure() summary_fig.xtitle = "N_{ch}^{est}" summary_fig.ytitle = "<N_{MPI}>" summary_fig.plot.palette = 'root' summary_fig.legend.position = 'br' summary_fig.plot.logy = True summary_fig.plot.ymin = 1 for est_dir in get_est_dirs(self.sums, self.considered_ests): h_tmp = asrootpy(get_correlation_histogram(self.sums, est_dir.GetName(), "nMPI").ProfileX()) summary_fig.add_plottable(h_tmp, make_estimator_title(est_dir.GetName())) path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path summary_fig.save_to_root_file(self.f, "nMPI_summary", path=path) return [summary_fig]
def plot_pT_HM_div_pt_MB(self, scale_nMPI): log.info("Plot dN_{HM}/dpT / dN_{MB}/dpT ratios scaled with nMPI") figs = [] for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests), get_est_dirs(self.results_post, self.considered_ests)): if sums_est_dir.GetName() != res_est_dir.GetName(): raise IndexError("Order of estimator dirs is different in sums and results_post") res_dir_str = res_est_dir.GetPath().split(":")[1] fig = Figure() fig.plot.palette = 'root' fig.plot.ncolors = 7 fig.xtitle = "p_{T} (GeV)" fig.legend.title = make_estimator_title(sums_est_dir.GetName()) if scale_nMPI: fig.ytitle = ("#left[ #frac{dN^{HM}}{dp_{T}} / #frac{dN^{MB}}{dp_{T}} #right] " "#times #left[ #frac{<N_{MPI}^{MB}>}{<N_{MPI}^{HM}>} #right]") else: fig.ytitle = "#frac{dN^{HM}}{dp_{T}} / #frac{dN^{MB}}{dp_{T}}" charged_particles = [kPIMINUS, kPIPLUS, kKMINUS, kKPLUS, kPROTON, kANTIPROTON, kLAMBDA, kANTILAMBDA, kXI, kANTIXI, kOMEGAMINUS, kOMEGAPLUS] # get the MB distribution which will be used to devide the nch-binned distributions classifier_bin_mb = (self.nch_edges[sums_est_dir.GetName()][0][0], self.nch_edges[sums_est_dir.GetName()][-1][-1]) pt_dist_mb = get_pT_distribution(res_est_dir, charged_particles, classifier_bin_mb, normalized=False) mean_nmpi_mb = get_mean_nMPI(sums_est_dir, classifier_bin_mb) for perc_bin, classifier_bin in zip(self.perc_bins[sums_est_dir.GetName()], self.nch_edges[sums_est_dir.GetName()]): # get the pt distribution in this Nch interval pt_dist_in_interval = get_pT_distribution(res_est_dir, charged_particles, classifier_bin, normalized=False) title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) if scale_nMPI: mean_nmpi_hm = get_mean_nMPI(sums_est_dir, classifier_bin) fig.add_plottable((pt_dist_in_interval / pt_dist_mb) * (mean_nmpi_mb / mean_nmpi_hm), title) name = "pt_hm_div_pt_mb_scaled_nMPI" else: fig.add_plottable((pt_dist_in_interval / pt_dist_mb), title) name = "pt_hm_div_pt_mb" fig.save_to_root_file(self.f_out, name, res_dir_str) figs.append(fig) return figs
def plot_pT_HM_div_pt_MB(self, scale_nMPI): log.info("Plot dN_{HM}/dpT / dN_{MB}/dpT ratios scaled with nMPI") figs = [] for sums_est_dir, res_est_dir in zip(get_est_dirs(self.sums, self.considered_ests), get_est_dirs(self.results_post, self.considered_ests)): if sums_est_dir.GetName() != res_est_dir.GetName(): raise IndexError("Order of estimator dirs is different in sums and results_post") res_dir_str = res_est_dir.GetPath().split(":")[1] fig = Figure() fig.plot.palette = 'root' fig.plot.ncolors = 7 fig.xtitle = "p_{T} (GeV)" fig.legend.title = make_estimator_title(sums_est_dir.GetName()) if scale_nMPI: fig.ytitle = ("#left[ #frac{dN^{HM}}{dp_{T}} / #frac{dN^{MB}}{dp_{T}} #right] " "#times #left[ #frac{<N_{MPI}^{MB}>}{<N_{MPI}^{HM}>} #right]") else: fig.ytitle = "#frac{dN^{HM}}{dp_{T}} / #frac{dN^{MB}}{dp_{T}}" charged_particles = [kPIMINUS, kPIPLUS, kKMINUS, kKPLUS, kPROTON, kANTIPROTON, kLAMBDA, kANTILAMBDA, kXI, kANTIXI, kOMEGAMINUS, kOMEGAPLUS] # get the MB distribution which will be used to devide the nch-binned distributions classifier_bin_mb = (self.nch_edges[sums_est_dir.GetName()][0][0], self.nch_edges[sums_est_dir.GetName()][-1][-1]) pt_dist_mb = get_pT_distribution(res_est_dir, charged_particles, classifier_bin_mb, normalized=False) mean_nmpi_mb = get_mean_nMPI(sums_est_dir, classifier_bin_mb) for perc_bin, classifier_bin in zip(self.perc_bins[sums_est_dir.GetName()], self.nch_edges[sums_est_dir.GetName()]): # get the pt distribution in this Nch interval pt_dist_in_interval = get_pT_distribution(res_est_dir, charged_particles, classifier_bin, normalized=False) title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) if scale_nMPI: mean_nmpi_hm = get_mean_nMPI(sums_est_dir, classifier_bin) fig.add_plottable((pt_dist_in_interval / pt_dist_mb) * (mean_nmpi_mb / mean_nmpi_hm), title) name = "pt_hm_div_pt_mb_scaled_nMPI" else: fig.add_plottable((pt_dist_in_interval / pt_dist_mb), title) name = "pt_hm_div_pt_mb" fig.save_to_root_file(self.f, name, res_dir_str) figs.append(fig) return figs
def plot_PNch_summary(self): log.info("Creating P(Nch) summary plot") summary_fig = Figure() summary_fig.xtitle = "N_{ch}^{est}" summary_fig.ytitle = "P(N_{ch}^{est})" summary_fig.legend.position = 'tr' summary_fig.plot.logy = True for est_dir in get_est_dirs(self.sums, self.considered_ests): est_name = est_dir.GetName() h_tmp = get_PNch_vs_estmult(self.sums, est_name) if h_tmp.Integral() > 0: h_tmp.Scale(1.0 / h_tmp.Integral()) summary_fig.add_plottable(h_tmp, make_estimator_title(est_name)) path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path summary_fig.save_to_root_file(self.f_out, "PNch_summary", path=path) # list as return type is expected for making the pdf return [summary_fig]
def plot_PNch_summary(self): log.info("Creating P(Nch) summary plot") summary_fig = Figure() summary_fig.xtitle = "N_{ch}^{est}" summary_fig.ytitle = "P(N_{ch}^{est})" summary_fig.legend.position = 'tr' summary_fig.plot.logy = True for est_dir in get_est_dirs(self.sums, self.considered_ests): est_name = est_dir.GetName() h_tmp = get_PNch_vs_estmult(self.sums, est_name) if h_tmp.Integral() > 0: h_tmp.Scale(1.0 / h_tmp.Integral()) summary_fig.add_plottable(h_tmp, make_estimator_title(est_name)) path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path summary_fig.save_to_root_file(self.f, "PNch_summary", path=path) # list as return type is expected for making the pdf return [summary_fig]
def plot_mult_vs_pt(self): log.info("Makeing 2D pt plots for each particle kind") for est_dir in get_est_dirs(self.sums, self.considered_ests): path = (self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path + "/" + est_dir.GetName() + "/mult_pt") try: self.f_out.mkdir(path, recurse=True) except ValueError: pass self.f_out.cd(path) h3d = asrootpy(est_dir.FindObject('classifier_pT_PID_{0}'.format(est_dir.GetName()))) # loop through all particle kinds: nPIDs = h3d.zaxis.GetNbins() for ibin in range(1, nPIDs + 1): h3d.zaxis.SetRange(ibin, ibin) mult_pt = asrootpy(h3d.Project3D("yx")) mult_pt.name = h3d.zaxis.GetBinLabel(ibin) mult_pt.Write()
def plot_mult_vs_pt(self): log.info("Makeing 2D pt plots for each particle kind") for est_dir in get_est_dirs(self.sums, self.considered_ests): path = (self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path + "/" + est_dir.GetName() + "/mult_pt") try: self.f.mkdir(path, recurse=True) except ValueError: pass self.f.cd(path) h3d = asrootpy(est_dir.FindObject('classifier_pT_PID_{0}'.format(est_dir.GetName()))) # loop through all particle kinds: nPIDs = h3d.zaxis.GetNbins() for ibin in range(1, nPIDs + 1): h3d.zaxis.SetRange(ibin, ibin) mult_pt = asrootpy(h3d.Project3D("yx")) mult_pt.name = h3d.zaxis.GetBinLabel(ibin) mult_pt.Write()
def plot_dNdetas(self, ratio_to_mb): # Loop over all estimators in the Sums list: log.info("Creating dN/deta bin in multiplicity") figs = [] for est_dir in get_est_dirs(self.sums, self.considered_ests): # does this estimator have several multiplicity bins? # Q2, for example only works with pythia and makes no sense to plot # on Dipsy as it would only be the MB line if len(self.nch_edges[est_dir.GetName()]) == 1: continue results_est_dir = self.results_post.Get(est_dir.GetName()) event_counter = asrootpy(results_est_dir.Get("event_counter")) fig = Figure() fig.plot.palette = 'colorblind' fig.xtitle = '#eta' fig.ytitle = 'Ratio of dN_{ch}/d#eta over MB result' if ratio_to_mb else '1/N #times dN_{ch}/d#eta' fig.legend.title = make_estimator_title(est_dir.GetName()) fig.plot.ymin = 0 dNdeta_mb = get_dNdeta_in_classifier_bin_interval(est_dir, event_counter, [1, event_counter.GetXaxis().GetNbins()]) for cls_bin, perc_bin in zip(self.nch_edges[est_dir.GetName()], self.perc_bins[est_dir.GetName()]): title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) dNdeta_in_interval = get_dNdeta_in_classifier_bin_interval(est_dir, event_counter, cls_bin) if ratio_to_mb: fig.add_plottable(dNdeta_in_interval / dNdeta_mb, legend_title=title) else: fig.add_plottable(dNdeta_in_interval, legend_title=title) # add MB as well, if it is not the ratio plots we are making if not ratio_to_mb: title = "MB" fig.add_plottable(dNdeta_mb, legend_title=title) path = results_est_dir.GetPath().split(":")[1] # file.root:/internal/root/path if ratio_to_mb: fig.save_to_root_file(self.f, "dNdeta_MB_ratio_summary", path=path) else: fig.save_to_root_file(self.f, "dNdeta_summary", path=path) figs.append(fig) return figs
def plot_dNdetas(self, ratio_to_mb): # Loop over all estimators in the Sums list: log.info("Creating dN/deta bin in multiplicity") figs = [] for est_dir in get_est_dirs(self.sums, self.considered_ests): # does this estimator have several multiplicity bins? # Q2, for example only works with pythia and makes no sense to plot # on Dipsy as it would only be the MB line if len(self.nch_edges[est_dir.GetName()]) == 1: continue results_est_dir = self.results_post.Get(est_dir.GetName()) event_counter = asrootpy(results_est_dir.Get("event_counter")) fig = Figure() fig.plot.palette = 'colorblind' fig.xtitle = '#eta' fig.ytitle = 'Ratio of dN_{ch}/d#eta over MB result' if ratio_to_mb else '1/N #times dN_{ch}/d#eta' fig.legend.title = make_estimator_title(est_dir.GetName()) fig.plot.ymin = 0 dNdeta_mb = get_dNdeta_in_classifier_bin_interval(est_dir, event_counter, [1, event_counter.GetXaxis().GetNbins()]) for cls_bin, perc_bin in zip(self.nch_edges[est_dir.GetName()], self.perc_bins[est_dir.GetName()]): title = "{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) dNdeta_in_interval = get_dNdeta_in_classifier_bin_interval(est_dir, event_counter, cls_bin) if ratio_to_mb: fig.add_plottable(dNdeta_in_interval / dNdeta_mb, legend_title=title) else: fig.add_plottable(dNdeta_in_interval, legend_title=title) # add MB as well, if it is not the ratio plots we are making if not ratio_to_mb: title = "MB" fig.add_plottable(dNdeta_mb, legend_title=title) path = results_est_dir.GetPath().split(":")[1] # file.root:/internal/root/path if ratio_to_mb: fig.save_to_root_file(self.f_out, "dNdeta_MB_ratio_summary", path=path) else: fig.save_to_root_file(self.f_out, "dNdeta_summary", path=path) figs.append(fig) return figs
def run(argv=sys.argv): # Must be done before :py:mod:`rootpy` logs any messages. import logging log = logging.getLogger('pyroplot') # set up logging try: import ROOT except ImportError: # module failed to load - maybe PYTHONPATH is not set correctly? # guess the right path, but that is only possible if ROOTSYS is set: if os.environ.get('ROOTSYS') is None: print "ERROR: Could not load the Python ROOT module. Please make sure that your ROOT installation is compiled with Python support and that your PYTHONPATH is set correctly and includes libPyROOT.so" exit(1) sys.path.append(os.path.join(os.environ.get('ROOTSYS'), "lib")) sys.path.append(os.path.join(os.environ.get('ROOTSYS'), "lib", "root")) # try again: try: import ROOT except ImportError: print "ERROR: Could not load the Python ROOT module. Please make sure that your ROOT installation is compiled with Python support and that your PYTHONPATH is set correctly and includes libPyROOT.so" exit(1) try: import rootpy except ImportError: # rootpy is not installed; use (old) version provided with EUTelescope # determine (real) path to subdirectory pymodules (relative to current path) libdir = os.path.join( os.path.dirname(os.path.abspath(os.path.realpath(__file__))), "pymodules", "rootpy") # search for any rootpy folders import glob rootpydirs = glob.glob(libdir + "*") if not rootpydirs: print "Error: Could not find the rootpy module provided with EUTelescope in %s!" % ( libdir) else: # add last entry to python search path (subfolder rootpy where the modules are located) sys.path.append(rootpydirs[-1]) # try again loading the module try: import rootpy except ImportError: print "Error: Could not load the rootpy modules. Please install them from http://www.rootpy.org/install.html" exit(1) except SyntaxError: req_version = (2, 5) cur_version = sys.version_info if cur_version < req_version: print "Error: Python version too old: due to its dependency on rootpy, this script requires a Python interpreter version 2.6 or later (installed: %s.%s.%s)!" % ( cur_version[:3]) exit(1) print "Error: Failed to load rootpy module! Possibly incompatible with installed Python version (%s.%s.%s)?" % ( cur_version[:3]) exit(1) from rootpy import log log = log["/pyroplot"] rootpy.log.basic_config_colorized() ROOT.gROOT.SetBatch(True) ROOT.gErrorIgnoreLevel = 1001 import argparse # command line argument parsing parser = argparse.ArgumentParser( description= "Python ROOT plotter - A tool for selecting and assembling histogram plots and comparision plots from multiple ROOT files at once" ) parser.add_argument('--version', action='version', version='Revision: $Revision$, $LastChangedDate$') parser.add_argument( "-l", "--log-level", default="info", help= "Sets the verbosity of log messages where LEVEL is either debug, info, warning or error", metavar="LEVEL") parser.add_argument( "--compare", action="store_true", default=False, help= "Compare the selected histograms between files (ratio plots, chi2) where the first file provides the reference." ) parser.add_argument( "-log", "--log-scale", action="store_true", default=False, help= "Uses a logarithmic scale for the y axis; only relevant when not using '--compare'." ) parser.add_argument( '--select', '-s', action='append', help="Specify regular expression(s) for histogram selection.") parser.add_argument( "--selection-from-file", help= "Load list of regular expressions for histogram selection from file (plain text file, one reg ex per line).", metavar="FILE") parser.add_argument( "--one-file-per-histogram", action="store_true", default=False, help= "Writes one file per histogram instead of storing all plots in one single file." ) parser.add_argument( "-o", "--output", default="./overview.pdf", help= "Output path and file name. If the file does not end in '.pdf' it will be assumed to be a path and created if needed. If --one-file-per-histogram is set, this will be the output directory for the plots.", metavar="FILE/PATH") parser.add_argument("--with-2D", "-2D", action="store_true", default=False, help="Also loads TH2-type histograms.") parser.add_argument( "--with-3D", "-3D", action="store_true", default=False, help= "Also loads TH3-type and Profile2D-type histograms, implies --with-2D." ) parser.add_argument( "--list-only", "--list", action="store_true", default=False, help= "Do not generate plots but only list objects in ROOT file(s) and indicate which ones would be selected." ) parser.add_argument( "--strict", action="store_true", default=False, help= "Require the selection to match the full histogram path and name (with implied '^' and '$') instead of only a partial match." ) parser.add_argument( "files", help= "The files to be processed; additional info STRING to be included in the plot legend can be added by specifiying FILE:STRING", nargs='+') # parse the arguments args = parser.parse_args(argv) # set the logging level numeric_level = getattr(logging, "INFO", None) # default: INFO messages and above if args.log_level: # Convert log level to upper case to allow the user to specify --log-level=DEBUG or --log-level=debug numeric_level = getattr(logging, args.log_level.upper(), None) if not isinstance(numeric_level, int): log.error('Invalid log level: %s' % args.log_level) exit(2) log.setLevel(numeric_level) log.debug("Command line arguments used: %s ", args) log.debug("Using rootpy %s from %s" % (rootpy.__version__, rootpy.__file__)) # laod and combine all specified reg ex regexs = [] # first from file if args.selection_from_file: f = open(args.selection_from_file, 'r') try: lines = f.read().splitlines() for line in lines: if line: # test if line is not empty (would match anything) log.debug("Loading reg ex from file " + args.selection_from_file + ": '" + line + "'") regexs.append(line) finally: f.close() if args.select: for arg in args.select: log.debug("Using reg ex from command line: " + arg) regexs.append(arg) # still nothing to select? use default if not regexs: import inspect filepath = os.path.join( os.path.dirname(os.path.abspath(os.path.realpath(__file__))), "default.sel") try: f = open(filepath, 'r') try: lines = f.read().splitlines() for line in lines: if line: # test if line is not empty (would match anything) log.debug("Loading reg ex from file " + filepath + ": '" + line + "'") regexs.append(line) finally: f.close() except IOError: log.warn("Could not find the file with the default selection ('" + filepath + "'), will use default of '.*' (select all)") regexs.append('.*') # parse output file name and verify that it ends in '.pdf' outputFilePath = "" fileName, fileExtension = os.path.splitext(args.output) if not fileExtension == '.pdf': log.debug( "Output argument does not end in '.pdf': '%s'. Assuming it's meant to be a path" % args.output) if not args.one_file_per_histogram: # append default name for single histogram file outputFilePath = os.path.join(args.output, "overview.pdf") else: outputFilePath = args.output else: if args.one_file_per_histogram: # all we need is the path, strip the file and append a slash outputFilePath = os.path.dirname(args.output) else: outputFilePath = args.output # parse file names and extract additionally provided info fileNames = [] fileDescr = {} for thisFile in args.files: s = thisFile.strip().split(':', 1) # try to split the string if (len(s) == 1): # didn't work, only have one entry fileNames.append(s[0]) fileDescr[s[0]] = "" else: fileNames.append(s[0]) fileDescr[s[0]] = s[1] histoDicts = [] # our histograms: each element will store a dict() # of histogram objects with its full path in the # root file as key selectedHistos = [] # loop over all files for idx, thisFile in enumerate(fileNames): # only search for matching histo names on first iteration if doing a comparison between files if not idx or not args.compare: selectedHistos = findHistogramsInFile(thisFile, regexs, args.strict, args.list_only) if args.list_only: continue h = loadHistogramsFromFile(thisFile, selectedHistos, args.with_2D, args.with_3D) histoDicts.append(h) # append to main histo list if histoDicts: log.info( "Input file(s) read. %d histograms matched selection criteria and were loaded" % (sum(len(histos) for histos in histoDicts))) makePlotCollection(histoDicts, fileNames, fileDescr, outputFilePath, args.compare, args.one_file_per_histogram, args.log_scale) log.info("done")
def plot_pt_distribution_ratios(self): # create particle ratio vs pT plots log.info("Computing histograms vs pt") results_path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path # Loop over all estimators in the Sums list: figs = [] def get_new_figure(): fig = Figure() fig.xtitle = 'p_{T} (GeV)' fig.plot.ymin = 0 fig.plot.xmax = 10 fig.plot.palette = 'colorblind' # fig.plot.palette_ncolors = len(nch_edges) - 1 fig.legend.position = 'br' return fig for est_dir in get_est_dirs(self.results_post, self.considered_ests): dirname = '{0}/{1}/pid_ratios/'.format(results_path, est_dir.GetName()) mult_binned_pt_dists = {} mult_binned_pt_dists['proton'] = [ get_pT_distribution(est_dir, [kANTIPROTON, kPROTON], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['pi_ch'] = [ get_pT_distribution(est_dir, [kPIMINUS, kPIPLUS], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['xi'] = [ get_pT_distribution(est_dir, [kANTIXI, kXI], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['omega'] = [ get_pT_distribution(est_dir, [kOMEGAMINUS, kOMEGAPLUS], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['lambda'] = [ get_pT_distribution(est_dir, [kANTILAMBDA, kLAMBDA], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['k0s'] = [ get_pT_distribution(est_dir, [kK0S], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['k_ch'] = [ get_pT_distribution(est_dir, [kKPLUS, kKMINUS], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['pi0'] = [ get_pT_distribution(est_dir, [kPI0], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] perc_titles = ["{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) for perc_bin in self.perc_bins[est_dir.GetName()]] fig = get_new_figure() name = "proton_over_pich__vs__pt" fig.ytitle = "(p+#bar{p})/#pi^{+-}" fig.plot.ymax = .3 fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Xi_over_pich__vs__pt" fig.plot.ymax = .06 fig.legend.position = 'tl' fig.ytitle = "#Xi/#pi^{+-}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "OmegaCh_over_pich__vs__pt" fig.plot.ymax = .005 fig.legend.position = 'tl' fig.ytitle = "#Omega_{ch}/#pi^{+-} " fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) # Ratios to pi0 fig = get_new_figure() name = "pich_over_pi0__vs__pt" fig.plot.ymax = 2.5 fig.legend.position = 'bl' fig.ytitle = "#pi^{+-}/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['pi_ch'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "proton_over_pi0__vs__pt" fig.plot.ymax = 1 fig.legend.position = 'tr' fig.ytitle = "p/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "K0S_over_pi0__vs__pt" fig.plot.ymax = 1.4 fig.legend.position = 'tl' fig.ytitle = "K^{0}_{S}/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['k0s'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Lambda_over_pi0__vs__pt" fig.plot.ymax = .9 fig.legend.position = 'tl' fig.ytitle = "#Lambda/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['lambda'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Xi_over_pi0__vs__pt" fig.plot.ymax = .08 fig.legend.position = 'tl' fig.ytitle = "#Xi/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "OmegaCh_over_pi0__vs__pt" fig.plot.ymax = .005 fig.legend.position = 'tl' fig.ytitle = "#Omega_{ch}/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) # Ratios to K0S fig = get_new_figure() name = "proton_over_K0S__vs__pt" fig.plot.ymax = 2.6 fig.legend.position = 'tr' fig.ytitle = "p/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Lambda_over_K0S__vs__pt" fig.plot.ymax = 1 fig.legend.position = 'bl' fig.ytitle = "#Lambda/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['lambda'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Xi_over_K0S__vs__pt" fig.plot.ymax = .2 fig.legend.position = 'tl' fig.ytitle = "#Xi/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "OmegaCh_over_K0S__vs__pt" fig.plot.ymax = .012 fig.legend.position = 'tl' fig.ytitle = "#Omega_{ch}/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) fig = get_new_figure() name = "Kaon_over_pich__vs__pt" fig.plot.ymax = 1 fig.legend.position = 'tl' fig.ytitle = "(K^{+} + K^{-}) / (#pi^{+} +#pi^{-})" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['k_ch'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f, name, dirname) figs.append(fig) return figs
'ZDC': [(1, 0.7), (.7, .3), (.3, .05), (0.001, 0.0)], 'nMPI': [(1, 0.7), (.7, .4), (.3, .05), (0.001, 0.0)], 'Q2': [(1, 0.7), (.7, .4), (.3, .05), (0.001, 0.0)], 'spherocity': [(1, 0.7), (.7, .4), (.3, .05), (0.001, 0.0)], 'sphericity': [(1, 0.7), (.7, .4), (.3, .05), (0.001, 0.0)], } if __name__ == "__main__": if len(sys.argv) < 4: print """Usage: python post.py file.root {Inel, InelGt0, V0AND} "<summary name>" """ quit() # go into batch mode ROOT.gROOT.SetBatch(True) log = log["/post"] # set name of this script in logger log.info("IsBatch: {0}".format(ROOT.gROOT.IsBatch())) try: global_trigger = sys.argv[2] except IndexError: global_trigger = "" sums_dir_name = "Sums" + global_trigger results_dir_name = "results_post" + global_trigger plotting = Plotting(f_name=sys.argv[1], sums_dir_name=sums_dir_name, results_dir_name=results_dir_name, percentile_bins=percentile_bins, considered_ests=considered_ests) latexdoc = Beamerdoc(author="Christian Bourjau", title=sys.argv[3]) # run the actual plots: sec = latexdoc.add_section(r"$dN/d\eta$") [sec.add_figure(fig) for fig in plotting.plot_dNdetas(ratio_to_mb=False)]
def run( argv = sys.argv ): # Must be done before :py:mod:`rootpy` logs any messages. import logging; log = logging.getLogger('pyroplot') # set up logging try: import ROOT except ImportError: # module failed to load - maybe PYTHONPATH is not set correctly? # guess the right path, but that is only possible if ROOTSYS is set: if os.environ.get('ROOTSYS') is None: print "ERROR: Could not load the Python ROOT module. Please make sure that your ROOT installation is compiled with Python support and that your PYTHONPATH is set correctly and includes libPyROOT.so" exit(1) sys.path.append(os.path.join(os.environ.get('ROOTSYS'),"lib")) sys.path.append(os.path.join(os.environ.get('ROOTSYS'),"lib","root")) # try again: try: import ROOT except ImportError: print "ERROR: Could not load the Python ROOT module. Please make sure that your ROOT installation is compiled with Python support and that your PYTHONPATH is set correctly and includes libPyROOT.so" exit(1) try: import rootpy except ImportError: # rootpy is not installed; use (old) version provided with EUTelescope # determine (real) path to subdirectory pymodules (relative to current path) libdir = os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))),"pymodules","rootpy") # search for any rootpy folders import glob rootpydirs = glob.glob(libdir+"*") if not rootpydirs: print "Error: Could not find the rootpy module provided with EUTelescope in %s!"%(libdir) else: # add last entry to python search path (subfolder rootpy where the modules are located) sys.path.append(rootpydirs[-1]) # try again loading the module try: import rootpy except ImportError: print "Error: Could not load the rootpy modules. Please install them from http://www.rootpy.org/install.html" exit(1) except SyntaxError: req_version = (2,5) cur_version = sys.version_info if cur_version < req_version: print "Error: Python version too old: due to its dependency on rootpy, this script requires a Python interpreter version 2.6 or later (installed: %s.%s.%s)!"%(cur_version[:3]) exit(1) print "Error: Failed to load rootpy module! Possibly incompatible with installed Python version (%s.%s.%s)?"%(cur_version[:3]) exit(1) from rootpy import log; log = log["/pyroplot"] rootpy.log.basic_config_colorized() ROOT.gROOT.SetBatch(True) ROOT.gErrorIgnoreLevel = 1001 import argparse # command line argument parsing parser = argparse.ArgumentParser(description="Python ROOT plotter - A tool for selecting and assembling histogram plots and comparision plots from multiple ROOT files at once") parser.add_argument('--version', action='version', version='Revision: $Revision: 2757 $, $LastChangedDate: 2013-06-24 18:14:16 +0200 (Mon, 24 Jun 2013) $') parser.add_argument("-l", "--log-level", default="info", help="Sets the verbosity of log messages where LEVEL is either debug, info, warning or error", metavar="LEVEL") parser.add_argument("--compare", action="store_true", default=False, help="Compare the selected histograms between files (ratio plots, chi2) where the first file provides the reference.") parser.add_argument("-log", "--log-scale", action="store_true", default=False, help="Uses a logarithmic scale for the y axis; only relevant when not using '--compare'.") parser.add_argument('--select', '-s', action='append', help="Specify regular expression(s) for histogram selection.") parser.add_argument("--selection-from-file", help="Load list of regular expressions for histogram selection from file (plain text file, one reg ex per line).", metavar="FILE") parser.add_argument("--one-file-per-histogram", action="store_true", default=False, help="Writes one file per histogram instead of storing all plots in one single file.") parser.add_argument("-o","--output", default="./overview.pdf", help="Output path and file name. If the file does not end in '.pdf' it will be assumed to be a path and created if needed. If --one-file-per-histogram is set, this will be the output directory for the plots.", metavar="FILE/PATH") parser.add_argument("--with-2D","-2D", action="store_true", default=False, help="Also loads TH2-type histograms.") parser.add_argument("--with-3D","-3D", action="store_true", default=False, help="Also loads TH3-type and Profile2D-type histograms, implies --with-2D.") parser.add_argument("--list-only", "--list", action="store_true", default=False, help="Do not generate plots but only list objects in ROOT file(s) and indicate which ones would be selected.") parser.add_argument("--strict", action="store_true", default=False, help="Require the selection to match the full histogram path and name (with implied '^' and '$') instead of only a partial match.") parser.add_argument("files", help="The files to be processed; additional info STRING to be included in the plot legend can be added by specifiying FILE:STRING", nargs='+') # parse the arguments args = parser.parse_args(argv) # set the logging level numeric_level = getattr(logging, "INFO", None) # default: INFO messages and above if args.log_level: # Convert log level to upper case to allow the user to specify --log-level=DEBUG or --log-level=debug numeric_level = getattr(logging, args.log_level.upper(), None) if not isinstance(numeric_level, int): log.error('Invalid log level: %s' % args.log_level) exit(2) log.setLevel(numeric_level) log.debug( "Command line arguments used: %s ", args ) log.debug("Using rootpy %s from %s"%(rootpy.__version__,rootpy.__file__)) # laod and combine all specified reg ex regexs = [] # first from file if args.selection_from_file: f = open(args.selection_from_file, 'r') try: lines = f.read().splitlines() for line in lines: if line: # test if line is not empty (would match anything) log.debug("Loading reg ex from file " + args.selection_from_file + ": '" + line +"'") regexs.append(line) finally: f.close() if args.select: for arg in args.select: log.debug("Using reg ex from command line: " + arg) regexs.append(arg) # still nothing to select? use default if not regexs: import inspect filepath = os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))),"default.sel") try: f = open(filepath, 'r') try: lines = f.read().splitlines() for line in lines: if line: # test if line is not empty (would match anything) log.debug("Loading reg ex from file " + filepath + ": '" + line +"'") regexs.append(line) finally: f.close() except IOError: log.warn("Could not find the file with the default selection ('"+filepath+"'), will use default of '.*' (select all)") regexs.append('.*') # parse output file name and verify that it ends in '.pdf' outputFilePath = "" fileName, fileExtension = os.path.splitext(args.output) if not fileExtension == '.pdf': log.debug("Output argument does not end in '.pdf': '%s'. Assuming it's meant to be a path"%args.output) if not args.one_file_per_histogram: # append default name for single histogram file outputFilePath = os.path.join(args.output,"overview.pdf") else: outputFilePath = args.output else: if args.one_file_per_histogram: # all we need is the path, strip the file and append a slash outputFilePath = os.path.dirname(args.output) else: outputFilePath = args.output # parse file names and extract additionally provided info fileNames = [] fileDescr = {} for thisFile in args.files: s = thisFile.strip().split(':', 1) # try to split the string if (len(s)==1): # didn't work, only have one entry fileNames.append(s[0]) fileDescr[s[0]] = "" else: fileNames.append(s[0]) fileDescr[s[0]] = s[1] histoDicts = [] # our histograms: each element will store a dict() # of histogram objects with its full path in the # root file as key selectedHistos = [] # loop over all files for idx, thisFile in enumerate( fileNames ): # only search for matching histo names on first iteration if doing a comparison between files if not idx or not args.compare: selectedHistos = findHistogramsInFile(thisFile, regexs, args.strict, args.list_only) if args.list_only: continue h = loadHistogramsFromFile( thisFile , selectedHistos, args.with_2D, args.with_3D) histoDicts.append(h) # append to main histo list if histoDicts: log.info("Input file(s) read. %d histograms matched selection criteria and were loaded"%(sum(len(histos) for histos in histoDicts))) makePlotCollection(histoDicts, fileNames, fileDescr, outputFilePath, args.compare, args.one_file_per_histogram,args.log_scale) log.info("done")
def plot_PNch(self): log.info("Creating P(Nch_est) and P(Nch_refest) histograms") # mult_bin_size = 10 figs = [] for ref_est_name in self.ref_ests: for res_est_dir in get_est_dirs(self.results_post, self.considered_ests): est_name = res_est_dir.GetName() # Figure properties: fig_vs_estmult = Figure() fig_vs_refmult = Figure() fig_vs_estmult.plot.logy = True fig_vs_refmult.plot.logy = True fig_vs_estmult.plot.palette = 'colorblind' fig_vs_refmult.plot.palette = 'colorblind' fig_vs_estmult.legend.position = 'tr' fig_vs_refmult.legend.position = 'tr' fig_vs_estmult.xtitle = "N_{{ch}}^{{{0}}}".format(est_name) fig_vs_refmult.xtitle = "N_{{ch}}^{{{0}}}".format(ref_est_name) fig_vs_estmult.ytitle = "P(N_{{ch}}^{{{0}}})".format(est_name) fig_vs_refmult.ytitle = "P(N_{{ch}}^{{{0}}})".format(ref_est_name) corr_hist = get_correlation_histogram(self.sums, est_name, ref_est_name) # logic when dealing with fixed bins given in Nch: # ------------------------------------------------ # mean_nch_est = corr_hist.GetMean(1) # mean of x axis # nch_max = corr_hist.xaxis.GetNbins() # nch_cutoff = mean_nch_est * mean_mult_cutoff_factor # nch_bins = [(low, low + mult_bin_size) for low in range(0, int(nch_cutoff), mult_bin_size)] # # a large last bin covering the rest: # nch_bins += [(nch_bins[-1][2], nch_max)] # legend_tmpl = "{} < N_{ch} < {}" # logic when dealing with percentile bins: # ---------------------------------------- # event_counter_est = asrootpy(getattr(res_est_dir, "event_counter")) legend_tmpl = "{0}% - {1}%" fig_vs_estmult.legend.title = "Selected in {0}".format(make_estimator_title(ref_est_name)) fig_vs_refmult.legend.title = "Selected in {0}".format(make_estimator_title(est_name)) # WARNING: the following needs tweeking when going back to fixed N_ch bins! for nch_bin, perc_bin in zip(self.nch_edges[ref_est_name], self.perc_bins[ref_est_name]): # vs est_mult: corr_hist.xaxis.SetRange(0, 0) # reset x axis corr_hist.yaxis.SetRange(nch_bin[0], nch_bin[1]) h_vs_est = asrootpy(corr_hist.ProjectionX(gen_random_name())) if h_vs_est.Integral() > 0: h_vs_est.Scale(1.0 / h_vs_est.Integral()) fig_vs_estmult.add_plottable(h_vs_est, legend_tmpl.format(perc_bin[1] * 100, perc_bin[0] * 100)) else: log.info("No charged particles in {0}*100 percentile bin of estimator {1}. This should not happen". format(perc_bin, ref_est_name)) for nch_bin, perc_bin in zip(self.nch_edges[est_name], self.perc_bins[est_name]): # vs ref_mult: corr_hist.yaxis.SetRange(0, 0) # reset y axis corr_hist.xaxis.SetRange(*nch_bin) h_vs_ref = asrootpy(corr_hist.ProjectionY(gen_random_name())) if h_vs_ref.Integral() > 0: h_vs_ref.Scale(1.0 / h_vs_ref.Integral()) fig_vs_refmult.add_plottable(h_vs_ref, legend_tmpl.format(perc_bin[1] * 100, perc_bin[0] * 100)) else: log.info( "No charged particles in {0}*100 percentile bin of estimator {1}. This should not happen". format(perc_bin, est_name)) path = res_est_dir.GetPath().split(":")[1] # vs est_mult fig_vs_estmult.save_to_root_file(self.f, "PNchEst_binned_in_Nch{0}".format(ref_est_name), path) # vs est_mult fig_vs_refmult.save_to_root_file(self.f, "PNch{0}_binned_in_NchEst".format(ref_est_name), path) figs.append(fig_vs_estmult) figs.append(fig_vs_refmult) return figs
def plot_pid_ratio_vs_refmult(self): log.info("Creating plots vs refmult") ratios_dir = self.results_post.GetPath().split(":")[1] + '/pid_ratios_vs_refmult' def get_new_figure(): fig = Figure() fig.plot.ncolors = len(self.considered_ests) fig.xtitle = "N_{ch}|_{" + make_estimator_title('EtaLt05') + "}" fig.plot.xmin = 0 fig.plot.xmax = 60 return fig figs = [] # Proton / pi_ch fig = get_new_figure() pids1, pids2 = ['-2212', '2212'], ['-211', '211'] fig.ytitle = "p/#pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.04, 0.13 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2, ) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # K / pi_ch fig = get_new_figure() pids1, pids2 = ['310', '321', '-321'], ['-211', '211'] fig.ytitle = "K^{*}/#pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.09, 0.30 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Lambda / pi_ch fig = get_new_figure() pids1, pids2 = ['3122'], ['-211', '211'] fig.ytitle = "#Lambda / #pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.005, 0.035 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Xi / pi_ch fig = get_new_figure() pids1, pids2 = ['3312'], ['-211', '211'] fig.ytitle = "#Xi / #pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.0004, 0.003 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Omega / pi_ch fig = get_new_figure() pids1, pids2 = ['3334', '-3334'], ['-211', '211'] fig.ytitle = "#Omega / #pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.00001, 0.0005 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # pi_ch/pi0 fig = get_new_figure() pids1, pids2 = ['-211', '211'], ['111'] fig.ytitle = "#pi^{+-}/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 1.5, 2.2 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # proton / pi0 fig = get_new_figure() pids1, pids2 = ['-2212', '2212'], ['111'] fig.ytitle = "p/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.09, 0.30 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # K / pi0 fig = get_new_figure() pids1, pids2 = ['310', '321', '-321'], ['111'] fig.ytitle = "K^{*}/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.15, 0.50 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Lambda / pi0 fig = get_new_figure() pids1, pids2 = ['3122'], ['111'] fig.ytitle = "#Lambda/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.014, 0.045 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Xi / pi0 fig = get_new_figure() pids1, pids2 = ['3312'], ['111'] fig.ytitle = "#Xi/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.0010, 0.005 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # Omega / pi0 fig = get_new_figure() pids1, pids2 = ['3334', '-3334'], ['111'] fig.ytitle = "#Omega/#pi^{0}" fig.legend.position = 'tl' fig.plot.ymin, fig.plot.ymax = 0.00002, 0.0008 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # K_ch / K0_S fig = get_new_figure() pids1, pids2 = ['321', '-321'], ['310'] fig.ytitle = "(K^{+}+K^{-}) / (2#timesK^{0}_{S})" fig.plot.ymin, fig.plot.ymax = 0.4, 1.5 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2, scale=.5) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # K0_S / Lambda fig = get_new_figure() pids1, pids2 = ['310'], ['-3122', '3122'] fig.ytitle = "K^{0}_{S} / #Lambda" fig.plot.ymin, fig.plot.ymax = 1.3, 3.7 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) # K0_S / Xi fig = get_new_figure() pids1, pids2 = ['310'], ['3312'] fig.ytitle = "K^{0}_{S} / #Xi" fig.plot.ymin, fig.plot.ymax = 15, 80 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f, name, ratios_dir) figs.append(fig) return figs
def convert(rfile, hfile, rpath='', entries=-1, userfunc=None, selection=None, indexes=[]): isatty = check_tty(sys.stdout) if isatty: widgets = [Percentage(), ' ', Bar(), ' ', ETA()] own_h5file = False if isinstance(hfile, basestring): hfile = pd.HDFStore(hfile) own_h5file = True own_rootfile = False if isinstance(rfile, basestring): rfile = root_open(rfile) own_rootfile = True for dirpath, dirnames, treenames in rfile.walk( rpath, class_pattern='TTree'): # skip root if not dirpath and not treenames: continue # skip directories w/o trees or subdirs if not dirnames and not treenames: continue where_group = '/' + os.path.dirname(dirpath) current_dir = os.path.basename(dirpath) #if not current_dir: #group = hfile.root #else: #group = hfile.createGroup(where_group, current_dir, "") ntrees = len(treenames) log.info( "Will convert {0:d} tree{1} in this directory".format( ntrees, 's' if ntrees != 1 else '')) for treename in treenames: input_tree = rfile.Get(os.path.join(dirpath, treename)) path_to_tree = os.path.join(dirpath, treename) if userfunc is not None: tmp_file = TemporaryFile() # call user-defined function on tree and get output trees log.info("Calling user function on tree '{0}'".format( input_tree.GetName())) trees = userfunc(input_tree) if not isinstance(trees, list): trees = [trees] else: trees = [input_tree] tmp_file = None for tree in trees: log.info("Converting tree '{0}' with {1:d} entries ...".format( tree.GetName(), tree.GetEntries())) #if tree.GetName() in group: #log.warning( #"skipping tree '{0}' that already exists " #"in the output file".format(tree.GetName())) #continue total_entries = tree.GetEntries() pbar = None if isatty and total_entries > 0: pbar = ProgressBar(widgets=widgets, maxval=total_entries) if entries <= 0: # read the entire tree if pbar is not None: pbar.start() recarray = tree2rec(tree, selection=selection) recarray = pd.DataFrame(_drop_object_col(recarray)) hfile.append(path_to_tree, recarray, data_columns = indexes) #table = hfile.createTable( #group, tree.GetName(), #recarray, tree.GetTitle()) ## flush data in the table #table.flush() ## flush all pending data #hfile.flush() else: # read the tree in chunks offset = 0 while offset < total_entries or offset == 0: if offset > 0: with warnings.catch_warnings(): warnings.simplefilter( "ignore", RootNumpyUnconvertibleWarning) recarray = tree2rec( tree, entries=entries, offset=offset, selection=selection) recarray = pd.DataFrame(_drop_object_col(recarray, warn=False)) #table.append(recarray) hfile.append(path_to_tree,recarray, data_columns = indexes) else: recarray = tree2rec( tree, entries=entries, offset=offset, selection=selection) recarray = pd.DataFrame(_drop_object_col(recarray)) if pbar is not None: # start after any output from root_numpy pbar.start() #table = hfile.createTable( #group, tree.GetName(), #recarray, tree.GetTitle()) hfile.append(path_to_tree,recarray, data_columns = indexes) offset += entries if offset <= total_entries and pbar is not None: pbar.update(offset) ## flush data in the table #table.flush() ## flush all pending data #hfile.flush() if pbar is not None: pbar.finish() input_tree.Delete() if userfunc is not None: for tree in trees: tree.Delete() tmp_file.Close() if own_h5file: hfile.close() if own_rootfile: rfile.Close()
def plot_pt_distribution_ratios(self): # create particle ratio vs pT plots log.info("Computing histograms vs pt") results_path = self.results_post.GetPath().split(":")[1] # file.root:/internal/root/path # Loop over all estimators in the Sums list: figs = [] def get_new_figure(): fig = Figure() fig.xtitle = 'p_{T} (GeV)' fig.plot.ymin = 0 fig.plot.xmax = 10 fig.plot.palette = 'colorblind' # fig.plot.palette_ncolors = len(nch_edges) - 1 fig.legend.position = 'br' return fig for est_dir in get_est_dirs(self.results_post, self.considered_ests): dirname = '{0}/{1}/pid_ratios/'.format(results_path, est_dir.GetName()) mult_binned_pt_dists = {} mult_binned_pt_dists['proton'] = [ get_pT_distribution(est_dir, [kANTIPROTON, kPROTON], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['pi_ch'] = [ get_pT_distribution(est_dir, [kPIMINUS, kPIPLUS], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['xi'] = [ get_pT_distribution(est_dir, [kANTIXI, kXI], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['omega'] = [ get_pT_distribution(est_dir, [kOMEGAMINUS, kOMEGAPLUS], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['lambda'] = [ get_pT_distribution(est_dir, [kANTILAMBDA, kLAMBDA], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['k0s'] = [ get_pT_distribution(est_dir, [kK0S], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['k_ch'] = [ get_pT_distribution(est_dir, [kKPLUS, kKMINUS], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] mult_binned_pt_dists['pi0'] = [ get_pT_distribution(est_dir, [kPI0], classifier_bin_interval) for classifier_bin_interval in self.nch_edges[est_dir.GetName()] ] perc_titles = ["{0}%-{1}%".format(perc_bin[1] * 100, perc_bin[0] * 100) for perc_bin in self.perc_bins[est_dir.GetName()]] fig = get_new_figure() name = "proton_over_pich__vs__pt" fig.ytitle = "(p+#bar{p})/#pi^{+-}" fig.plot.ymax = .3 fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) fig = get_new_figure() name = "Xi_over_pich__vs__pt" fig.plot.ymax = .06 fig.legend.position = 'tl' fig.ytitle = "#Xi/#pi^{+-}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) fig = get_new_figure() name = "OmegaCh_over_pich__vs__pt" fig.plot.ymax = .005 fig.legend.position = 'tl' fig.ytitle = "#Omega_{ch}/#pi^{+-} " fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) # Ratios to pi0 fig = get_new_figure() name = "pich_over_pi0__vs__pt" fig.plot.ymax = 2.5 fig.legend.position = 'bl' fig.ytitle = "#pi^{+-}/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['pi_ch'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) fig = get_new_figure() name = "proton_over_pi0__vs__pt" fig.plot.ymax = 1 fig.legend.position = 'tr' fig.ytitle = "p/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) fig = get_new_figure() name = "K0S_over_pi0__vs__pt" fig.plot.ymax = 1.4 fig.legend.position = 'tl' fig.ytitle = "K^{0}_{S}/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['k0s'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) fig = get_new_figure() name = "Lambda_over_pi0__vs__pt" fig.plot.ymax = .9 fig.legend.position = 'tl' fig.ytitle = "#Lambda/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['lambda'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) fig = get_new_figure() name = "Xi_over_pi0__vs__pt" fig.plot.ymax = .08 fig.legend.position = 'tl' fig.ytitle = "#Xi/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) fig = get_new_figure() name = "OmegaCh_over_pi0__vs__pt" fig.plot.ymax = .005 fig.legend.position = 'tl' fig.ytitle = "#Omega_{ch}/#pi^{0}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['pi0'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) # Ratios to K0S fig = get_new_figure() name = "proton_over_K0S__vs__pt" fig.plot.ymax = 2.6 fig.legend.position = 'tr' fig.ytitle = "p/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['proton'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) fig = get_new_figure() name = "Lambda_over_K0S__vs__pt" fig.plot.ymax = 1 fig.legend.position = 'bl' fig.ytitle = "#Lambda/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['lambda'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) fig = get_new_figure() name = "Xi_over_K0S__vs__pt" fig.plot.ymax = .2 fig.legend.position = 'tl' fig.ytitle = "#Xi/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['xi'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) fig = get_new_figure() name = "OmegaCh_over_K0S__vs__pt" fig.plot.ymax = .012 fig.legend.position = 'tl' fig.ytitle = "#Omega_{ch}/K^{0}_{S}" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['omega'], mult_binned_pt_dists['k0s'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) fig = get_new_figure() name = "Kaon_over_pich__vs__pt" fig.plot.ymax = 1 fig.legend.position = 'tl' fig.ytitle = "(K^{+} + K^{-}) / (#pi^{+} +#pi^{-})" fig.legend.title = make_estimator_title(est_dir.GetName()) [ fig.add_plottable(h1 / h2, legend_title=title) for h1, h2, title in zip(mult_binned_pt_dists['k_ch'], mult_binned_pt_dists['pi_ch'], perc_titles) ] fig.save_to_root_file(self.f_out, name, dirname) figs.append(fig) return figs
def plot_PNch(self): log.info("Creating P(Nch_est) and P(Nch_refest) histograms") # mult_bin_size = 10 figs = [] for ref_est_name in self.ref_ests: for res_est_dir in get_est_dirs(self.results_post, self.considered_ests): est_name = res_est_dir.GetName() # Figure properties: fig_vs_estmult = Figure() fig_vs_refmult = Figure() fig_vs_estmult.plot.logy = True fig_vs_refmult.plot.logy = True fig_vs_estmult.plot.palette = 'colorblind' fig_vs_refmult.plot.palette = 'colorblind' fig_vs_estmult.legend.position = 'tr' fig_vs_refmult.legend.position = 'tr' fig_vs_estmult.xtitle = "N_{{ch}}^{{{0}}}".format(est_name) fig_vs_refmult.xtitle = "N_{{ch}}^{{{0}}}".format(ref_est_name) fig_vs_estmult.ytitle = "P(N_{{ch}}^{{{0}}})".format(est_name) fig_vs_refmult.ytitle = "P(N_{{ch}}^{{{0}}})".format(ref_est_name) corr_hist = get_correlation_histogram(self.sums, est_name, ref_est_name) # logic when dealing with fixed bins given in Nch: # ------------------------------------------------ # mean_nch_est = corr_hist.GetMean(1) # mean of x axis # nch_max = corr_hist.xaxis.GetNbins() # nch_cutoff = mean_nch_est * mean_mult_cutoff_factor # nch_bins = [(low, low + mult_bin_size) for low in range(0, int(nch_cutoff), mult_bin_size)] # # a large last bin covering the rest: # nch_bins += [(nch_bins[-1][2], nch_max)] # legend_tmpl = "{} < N_{ch} < {}" # logic when dealing with percentile bins: # ---------------------------------------- # event_counter_est = asrootpy(getattr(res_est_dir, "event_counter")) legend_tmpl = "{0}% - {1}%" fig_vs_estmult.legend.title = "Selected in {0}".format(make_estimator_title(ref_est_name)) fig_vs_refmult.legend.title = "Selected in {0}".format(make_estimator_title(est_name)) # WARNING: the following needs tweeking when going back to fixed N_ch bins! for nch_bin, perc_bin in zip(self.nch_edges[ref_est_name], self.perc_bins[ref_est_name]): # vs est_mult: corr_hist.xaxis.SetRange(0, 0) # reset x axis corr_hist.yaxis.SetRange(nch_bin[0], nch_bin[1]) h_vs_est = asrootpy(corr_hist.ProjectionX(gen_random_name())) if h_vs_est.Integral() > 0: h_vs_est.Scale(1.0 / h_vs_est.Integral()) fig_vs_estmult.add_plottable(h_vs_est, legend_tmpl.format(perc_bin[1] * 100, perc_bin[0] * 100)) else: log.info("No charged particles in {0}*100 percentile bin of estimator {1}. This should not happen". format(perc_bin, ref_est_name)) for nch_bin, perc_bin in zip(self.nch_edges[est_name], self.perc_bins[est_name]): # vs ref_mult: corr_hist.yaxis.SetRange(0, 0) # reset y axis corr_hist.xaxis.SetRange(*nch_bin) h_vs_ref = asrootpy(corr_hist.ProjectionY(gen_random_name())) if h_vs_ref.Integral() > 0: h_vs_ref.Scale(1.0 / h_vs_ref.Integral()) fig_vs_refmult.add_plottable(h_vs_ref, legend_tmpl.format(perc_bin[1] * 100, perc_bin[0] * 100)) else: log.info( "No charged particles in {0}*100 percentile bin of estimator {1}. This should not happen". format(perc_bin, est_name)) path = res_est_dir.GetPath().split(":")[1] # vs est_mult fig_vs_estmult.save_to_root_file(self.f_out, "PNchEst_binned_in_Nch{0}".format(ref_est_name), path) # vs est_mult fig_vs_refmult.save_to_root_file(self.f_out, "PNch{0}_binned_in_NchEst".format(ref_est_name), path) figs.append(fig_vs_estmult) figs.append(fig_vs_refmult) return figs
def plot_pid_ratio_vs_refmult(self): log.info("Creating plots vs refmult") ratios_dir = self.results_post.GetPath().split(":")[1] + '/pid_ratios_vs_refmult' def get_new_figure(): fig = Figure() fig.plot.ncolors = len(self.considered_ests) fig.xtitle = "N_{ch}|_{" + make_estimator_title('EtaLt05') + "}" fig.plot.xmin = 0 fig.plot.xmax = 60 return fig figs = [] # Proton / pi_ch fig = get_new_figure() pids1, pids2 = ['-2212', '2212'], ['-211', '211'] fig.ytitle = "p/#pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.04, 0.13 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2, ) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # K / pi_ch fig = get_new_figure() pids1, pids2 = ['310', '321', '-321'], ['-211', '211'] fig.ytitle = "K^{*}/#pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.09, 0.30 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # Lambda / pi_ch fig = get_new_figure() pids1, pids2 = ['3122'], ['-211', '211'] fig.ytitle = "#Lambda / #pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.005, 0.035 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # Xi / pi_ch fig = get_new_figure() pids1, pids2 = ['3312'], ['-211', '211'] fig.ytitle = "#Xi / #pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.0004, 0.003 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # Omega / pi_ch fig = get_new_figure() pids1, pids2 = ['3334', '-3334'], ['-211', '211'] fig.ytitle = "#Omega / #pi^{+-}" fig.plot.ymin, fig.plot.ymax = 0.00001, 0.0005 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # pi_ch/pi0 fig = get_new_figure() pids1, pids2 = ['-211', '211'], ['111'] fig.ytitle = "#pi^{+-}/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 1.5, 2.2 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # proton / pi0 fig = get_new_figure() pids1, pids2 = ['-2212', '2212'], ['111'] fig.ytitle = "p/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.09, 0.30 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # K / pi0 fig = get_new_figure() pids1, pids2 = ['310', '321', '-321'], ['111'] fig.ytitle = "K^{*}/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.15, 0.50 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # Lambda / pi0 fig = get_new_figure() pids1, pids2 = ['3122'], ['111'] fig.ytitle = "#Lambda/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.014, 0.045 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # Xi / pi0 fig = get_new_figure() pids1, pids2 = ['3312'], ['111'] fig.ytitle = "#Xi/#pi^{0}" fig.plot.ymin, fig.plot.ymax = 0.0010, 0.005 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # Omega / pi0 fig = get_new_figure() pids1, pids2 = ['3334', '-3334'], ['111'] fig.ytitle = "#Omega/#pi^{0}" fig.legend.position = 'tl' fig.plot.ymin, fig.plot.ymax = 0.00002, 0.0008 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # K_ch / K0_S fig = get_new_figure() pids1, pids2 = ['321', '-321'], ['310'] fig.ytitle = "(K^{+}+K^{-}) / (2#timesK^{0}_{S})" fig.plot.ymin, fig.plot.ymax = 0.4, 1.5 fig.legend.position = 'tl' graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2, scale=.5) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # K0_S / Lambda fig = get_new_figure() pids1, pids2 = ['310'], ['-3122', '3122'] fig.ytitle = "K^{0}_{S} / #Lambda" fig.plot.ymin, fig.plot.ymax = 1.3, 3.7 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) # K0_S / Xi fig = get_new_figure() pids1, pids2 = ['310'], ['3312'] fig.ytitle = "K^{0}_{S} / #Xi" fig.plot.ymin, fig.plot.ymax = 15, 80 graphs = get_graphs_particle_ratios_vs_refmult(self, pids1, pids2) [fig.add_plottable(g, legend_title=g.GetTitle()) for g in graphs] name = "_".join(pids1) + "_div_" + "_".join(pids2) fig.save_to_root_file(self.f_out, name, ratios_dir) figs.append(fig) return figs