def plot_cen_fwd_in_one_canvas(fr_mc_fwd, fr_data_fwd, fr_mc_cen, fr_data_cen, output_name): # Parse the input arguments try: ntuple_version = sys.argv[1] tag = sys.argv[2] except: usage() if "2016" in ntuple_version: lumi = 35.9 if "2017" in ntuple_version: lumi = 41.3 if "2018" in ntuple_version: lumi = 59.74 basedir = "plots/{}/{}/lin/".format(ntuple_version, tag) # Options alloptions = { "ratio_range": [0.0, 2.0], "nbins": 180, "autobin": False, "legend_scalex": 1.5, "legend_scaley": 0.8, "output_name": basedir + "/" + output_name + ".pdf", "bkg_sort_method": "unsorted", "no_ratio": False, "print_yield": True, "yield_prec": 3, "draw_points": True, "hist_line_none": True, "show_bkg_errors": True, "lumi_value": lumi, # "yaxis_range": [0., 1.5] if "_mu" in output_name else [0., 1.0], "legend_datalabel": "Data fake-rate (|#eta|#geq1.6)" } p.plot_hist(sigs=[ fr_mc_cen.Clone("MC t#bar{t} fake-rate (|#eta|<1.6)"), fr_data_cen.Clone("Data fake-rate (|#eta|<1.6)") ], bgs=[fr_mc_fwd.Clone()], data=fr_data_fwd.Clone(), syst=None, colors=[2001], legend_labels=["MC t#bar{t} fake-rate (|#eta|#geq1.6)"], options=alloptions)
def compare(output_name, sample1, sample2, histname1, histname2, legend1, legend2): ref_file = r.TFile("hadds/mainAnalysis_Run2/{}.root".format(sample1)) mbb_ref = ref_file.Get("{}".format(histname1)).Clone() tar_file = r.TFile("hadds/mainAnalysis_Run2/{}.root".format(sample2)) mbb_tar = tar_file.Get("{}".format(histname2)).Clone() mbb_ref.Scale(1. / mbb_ref.Integral()) mbb_tar.Scale(1. / mbb_tar.Integral()) colors = { "ttw": 4021, "ttz": 4024, "tt1l": 4020, "tt2l": 4023, "raretop": 2001, "alltop": r.kGray, } options = { "legend_datalabel": legend2, "legend_ncolumns": 2, "legend_scalex": 2.0, "legend_scaley": 0.8, "nbins": 20, "output_name": "mbb_study/{}.pdf".format(output_name), "ratio_range": [0.5, 1.5] if "__MbbInOut" in histname1 else [0., 2.], "remove_underflow": True, "yaxis_label": "Normalized", "xaxis_label": "" if "__MbbInOut" in histname1 else "m_{bb} [GeV]", "lumi_value": 137, } if "__MbbInOut" in histname1: options["bin_labels"] = ["Out", "In"] p.plot_hist( bgs=[mbb_ref.Clone()], data=mbb_tar.Clone(), legend_labels=[legend1], colors=[colors[sample1]], options=options, )
def plot_v1(): h_nmiss_0 = f.Get("Root__pt_w_hit_miss0").Clone() h_nmiss_1 = f.Get("Root__pt_w_hit_miss1").Clone() h_nmiss_2 = f.Get("Root__pt_w_hit_miss2").Clone() h_nmiss_3 = f.Get("Root__pt_w_hit_miss3").Clone() h_nmiss_4 = f.Get("Root__pt_w_hit_miss4").Clone() h_nmiss_5 = f.Get("Root__pt_w_hit_miss5").Clone() h_nmiss_6 = f.Get("Root__pt_w_hit_miss6").Clone() h_nmiss_7 = f.Get("Root__pt_w_hit_miss7").Clone() h_nmiss_8 = f.Get("Root__pt_w_hit_miss8").Clone() h_nmiss_9 = f.Get("Root__pt_w_hit_miss9").Clone() h_nmiss_10 = f.Get("Root__pt_w_hit_miss10").Clone() # h_all = f.Get("Root__pt_all_w_last_layer").Clone() h_all = f.Get("Root__pt_all").Clone() p.plot_hist( bgs=[h_nmiss_0, h_nmiss_1, h_nmiss_2, h_nmiss_3, h_nmiss_4, h_nmiss_5], data=h_all, legend_labels=[ "Nmiss0", "Nmiss1", "Nmiss2", "Nmiss3", "Nmiss4", "Nmiss5" ], options={"output_name": "plots_conditional/hit_efficiency.pdf"})
tfiles[fname] = r.TFile(dirpath + fname) thists[fname] = tfiles[fname].Get(histname) hother = thists["othernoh.root"].Clone("other") hother.Add(thists["higgs.root"]) hother.Add(thists["wz.root"]) hother.Add(thists["sig.root"]) htwz = thists["twz.root"].Clone("twz") httz = thists["ttz.root"].Clone("ttz") hzz = thists["zz.root"].Clone("zz") hdata = thists["data.root"].Clone("data") bgs = [httz, hzz, hother, htwz] p.plot_hist(bgs=[httz.Clone(), hzz.Clone(), hother.Clone(), htwz.Clone()], data=hdata, options={"output_name":"fit_onenb.pdf", "ratio_range":[0.,2.]}) njet = r.RooRealVar("njet", "njet", 0., 6.) datahists = {} pdfhists = {} normhists = {} for bg in bgs + [hdata]: name = bg.GetName() datahists[name] = r.RooDataHist(name, name, r.RooArgList(njet), bg) pdfhists[name] = r.RooHistPdf(name+"_pdf", name+"_pdf", r.RooArgSet(njet), datahists[name]) if name not in ["twz", "ttz"]: normhists[name] = r.RooRealVar(name+"_n", name+"_n", bg.Integral(), bg.Integral() * 0.999, bg.Integral() * 1.001) else: normhists[name] = r.RooRealVar(name+"_n", name+"_n", bg.Integral(), bg.Integral() * 0.1, bg.Integral() * 10.0)
def closure_plot(predict, estimate, nbins=1): # Glob the file lists bkg_list_wjets = [output_dirpath + "/wj_ht.root"] bkg_list_ttbar = [output_dirpath + "/tt_1l.root"] # Get all the histogram objects h_wjets_predict = ru.get_summed_histogram(bkg_list_wjets, predict) h_ttbar_predict = ru.get_summed_histogram(bkg_list_ttbar, predict) h_wjets_estimate = ru.get_summed_histogram(bkg_list_wjets, estimate) h_ttbar_estimate = ru.get_summed_histogram(bkg_list_ttbar, estimate) # Set the names of the histograms h_wjets_predict.SetTitle("W predict") h_ttbar_predict.SetTitle("Top predict") h_wjets_estimate.SetTitle("W estimate") h_ttbar_estimate.SetTitle("Top estimate") # Color settings colors = [ 2005, 2001, ] # Options alloptions = { "ratio_range": [0.0, 2.0], "nbins": nbins, "autobin": False, "legend_scalex": 1.8, "legend_scaley": 1.1, "output_name": "plots/{}/{}/{}/closure/{}_nbins{}.pdf".format( input_ntup_tag, analysis_tag, "ss" if isSS else "3l", predict + "__" + estimate, nbins), "bkg_sort_method": "unsorted", "no_ratio": False, "print_yield": False, "yaxis_log": False, "legend_smart": True, "lumi_value": lumi, "legend_datalabel": "Estimate", "yield_prec": 3, "print_yield": True, } # The bkg histogram list bgs_list = [h_ttbar_predict.Clone(), h_wjets_predict.Clone()] #bgs_list = [ h_ttbar_predict ] h_estimate = h_wjets_estimate.Clone("Estimate") h_estimate.Add(h_ttbar_estimate) ll = ["Top Predict", "W Predict"] # Plot them p.plot_hist(bgs=bgs_list, data=h_estimate, colors=colors, syst=None, legend_labels=ll, options=alloptions)
def main_onz_ttz_only(): p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MET", "mc"), options={ "output_name": "exp/mc_eff_ttz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MET", "data"), options={ "output_name": "exp/data_eff_ttz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MET", "ratio"), options={ "output_name": "exp/eff_ratio_ttz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MET", "mc_num"), options={ "output_name": "exp/eff_mc_num_ttz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MET", "mc_den"), options={ "output_name": "exp/eff_mc_den_ttz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MET", "data_num"), options={ "output_name": "exp/eff_data_num_ttz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MET", "data_den"), options={ "output_name": "exp/eff_data_den_ttz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MT", "mc"), options={ "output_name": "exp/mc_eff_ttz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MT", "data"), options={ "output_name": "exp/data_eff_ttz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MT", "ratio"), options={ "output_name": "exp/eff_ratio_ttz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MT", "mc_num"), options={ "output_name": "exp/eff_mc_num_ttz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MT", "mc_den"), options={ "output_name": "exp/eff_mc_den_ttz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MT", "data_num"), options={ "output_name": "exp/eff_data_num_ttz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MT", "data_den"), options={ "output_name": "exp/eff_data_den_ttz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "OffZ", "MET", "mc"), options={ "output_name": "exp/mc_eff_ttz_sr_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "OffZ", "MET", "data"), options={ "output_name": "exp/data_eff_ttz_sr_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "OffZ", "MET", "ratio"), options={ "output_name": "exp/eff_ratio_ttz_sr_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "OffZ", "MET", "mc_num"), options={ "output_name": "exp/eff_mc_num_ttz_sr_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "OffZ", "MET", "mc_den"), options={ "output_name": "exp/eff_mc_den_ttz_sr_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "OffZ", "MET", "data_num"), options={ "output_name": "exp/eff_data_num_ttz_sr_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "OffZ", "MET", "data_den"), options={ "output_name": "exp/eff_data_den_ttz_sr_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "EMu", "MT", "mc"), options={ "output_name": "exp/mc_eff_ttz_sr_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "EMu", "MT", "data"), options={ "output_name": "exp/data_eff_ttz_sr_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "EMu", "MT", "ratio"), options={ "output_name": "exp/eff_ratio_ttz_sr_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "EMu", "MT", "mc_num"), options={ "output_name": "exp/eff_mc_num_ttz_sr_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "EMu", "MT", "mc_den"), options={ "output_name": "exp/eff_mc_den_ttz_sr_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "EMu", "MT", "data_num"), options={ "output_name": "exp/eff_data_num_ttz_sr_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "EMu", "MT", "data_den"), options={ "output_name": "exp/eff_data_den_ttz_sr_mt.pdf", "print_yield": True })
def compare_fake(lepflav, variable): tag = "test" sample_dir = "outputs/Trilep2016_v0.1.11_Trilep2017_v0.1.11_Trilep2018_v0.1.11/y2016_{}_y2017_{}_y2018_{}/".format( tag, tag, tag) all_process_fnames = [sample_dir + "ttbar.root", sample_dir + "dy.root"] histogram_name = "ThreeLeptonFakeValidationEvents{}__{}".format( lepflav, variable) hists_threelep = pr.get_histograms(all_process_fnames, histogram_name) histogram_name = "FourLeptonFakeValidationEvents{}__{}".format( lepflav, variable) hists_fourlep = pr.get_histograms([sample_dir + "wz.root"], histogram_name) for hist in hists_threelep: if hist.Integral() != 0: hist.Scale(1. / hist.Integral()) for hist in hists_fourlep: if hist.Integral() != 0: hist.Scale(1. / hist.Integral()) xaxis_label = "" if "MotherID" in variable and lepflav == "El": xaxis_label = "Flavor of misid'd electron" if "MotherID" in variable and lepflav == "Mu": xaxis_label = "Flavor of misid'd muon" if "relIso03EA" in variable: xaxis_label = "I_{rel,0.3,EA,e}" if "relIso04DB" in variable: xaxis_label = "I_{rel,0.4,DB,#mu}" extraoptions = { "print_yield": True, "nbins": 45, "signal_scale": 1, "legend_scalex": 1.8, "legend_scaley": 0.5, "legend_ncolumns": 3, "legend_smart": False, "yaxis_log": True, "ymax_scale": 1.2, "lumi_value": -1, "remove_underflow": True, "xaxis_ndivisions": 505, "xaxis_label": xaxis_label, "ratio_xaxis_title": xaxis_label, "ratio_range": [0., 4.], "output_name": "plots_fakes/wz_v_dy_{}.pdf".format(histogram_name), "legend_datalabel": "WZ", } p.plot_hist(data=hists_fourlep[0], bgs=[hists_threelep[1]], options=extraoptions, colors=[2012], sig_labels=[], legend_labels=["DY"]) extraoptions = { "print_yield": True, "nbins": 45, "signal_scale": 1, "legend_scalex": 1.8, "legend_scaley": 0.5, "legend_ncolumns": 3, "legend_smart": False, "yaxis_log": True, "ymax_scale": 1.2, "lumi_value": -1, "remove_underflow": True, "xaxis_ndivisions": 505, "xaxis_label": xaxis_label, "ratio_xaxis_title": xaxis_label, "ratio_range": [0., 4.], "output_name": "plots_fakes/wz_v_ttbar_{}.pdf".format(histogram_name), "legend_datalabel": "WZ", } p.plot_hist(data=hists_fourlep[0], bgs=[hists_threelep[0]], options=extraoptions, colors=[2011], sig_labels=[], legend_labels=["t#bar{t}"])
def main(args): print ("") print (" -----------") print (" Plotter ") print (" -----------") print ("") yearstring = "" if args.year == 0: yearstring = "combined" elif args.year >= 2016 and args.year <= 2018: yearstring = str(args.year) elif args.year == -1: yearstring = "" #for backwards compatibility else: print("The year you selected ("+str(args.year)+") does not exist.") return # Constructing input directory name username = os.environ['USER'] #input_dir = "/nfs-7/userdata/{}/tupler_babies/merged/VVV/{}/output/".format(username, args.tag) input_dir = "/nfs-7/userdata/{}/tupler_babies/merged/VVV/{}/output/{}/".format(username, args.tag,yearstring) # Printing input directory name print(">>> Input directory: {}".format(input_dir)) # Get list of files list_of_root_files = glob.glob("{}/*.root".format(input_dir)) list_of_root_files.sort() # Sort the files # Printing list of input histogram files print(">>>") print(">>> List of files in the input directory") print(">>> {}".format(input_dir)) for root_file in list_of_root_files: print(">>> {}".format(os.path.basename(root_file))) # Histogram grouping # Default grouping setting is shown below and each different grouping option will group them differently # The grouping will be defined in python/grouping.py grouping_setting = { "DY_high.root" : "DY", "DY_low.root" : "DY", "DYzpt150.root" : "NOTUSED", "GGHtoZZto4L.root" : "HZZ", "HHAT_0p0.root" : "NOTUSED", "HHAT_0p08.root" : "NOTUSED", "HHAT_0p12.root" : "NOTUSED", "HHAT_0p16.root" : "NOTUSED", "QQWW.root" : "WW", "STantitop.root" : "ST", "STtop.root" : "ST", "TGext.root" : "NOTUSED", "TTBAR_PH.root" : "TT", "TTDL.root" : "TT", "TTGdilep.root" : "NOTUSED", "TTGsinglelep.root" : "NOTUSED", "TTGsinglelepbar.root" : "NOTUSED", "TTHH.root" : "TTHH", "TTSL.root" : "TT", "TTSLtop.root" : "NOTUSED", "TTSLtopbar.root" : "NOTUSED", "TTTJ.root" : "RareTop", "TTTTnew.root" : "TTTT", "TTTW.root" : "RareTop", "TTWH.root" : "RareTop", "TTWW.root" : "RareTop", "TTWZ.root" : "RareTop", "TTWnlo.root" : "TTW", "TTZH.root" : "RareTop", "TTZLOW.root" : "TTZ", "TTZZ.root" : "RareTop", "TTZnlo.root" : "TTZ", "TTdilep0jet.root" : "NOTUSED", "TTdilep1jet.root" : "NOTUSED", "TZQ.root" : "RareTop", "VHtoNonBB.root" : "VH", "W2Jets.root" : "NOTUSED", "W4Jets.root" : "NOTUSED", "WGToLNuGext.root" : "NOTUSED", "WJets.root" : "W", "WJets_HT100To200.root" : "NOTUSED", "WJets_HT200To400.root" : "NOTUSED", # HT gen level variable not implemented "WJets_HT400To600.root" : "NOTUSED", # HT gen level variable not implemented "WJets_HT600To800.root" : "NOTUSED", # HT gen level variable not implemented "WJets_HT800To1200.root" : "NOTUSED", # HT gen level variable not implemented "WW.root" : "WW", "WWDPS.root" : "WW", "WWG.root" : "NOTUSED", "WWW.root" : "WWW", "WWZ.root" : "WWZ", "WZ.root" : "WZ", "WZG.root" : "NOTUSED", "WZZ.root" : "WZZ", "ZG.root" : "NOTUSED", "ZZ.root" : "ZZ", "ZZZ.root" : "ZZZ", "ZZcontTo2e2mu.root" : "ZZ", "ZZcontTo2e2tau.root" : "ZZ", "ZZcontTo2mu2tau.root" : "ZZ", "ZZcontTo4mu.root" : "ZZ", "ZZcontTo4tau.root" : "ZZ", } # If user wants to use different grouping use the following hooks if args.style == 0: grouping_setting = style.grouping_4LepMET if args.style == 1: grouping_setting = style.grouping_3LepMET if args.style == 5: grouping_setting = style.grouping_OS2jet if args.style == 7: grouping_setting = style.grouping_1Lep4jet if args.style == 8: grouping_setting = style.grouping_OS2Fatjet # Now loop over files to check the grouping and print a warning that it is skipping some stuff root_file_groups = {} # Looping over the files found in the input_dir for f in list_of_root_files: f_basename = os.path.basename(f) #add here check for data if f_basename not in grouping_setting.keys(): print("") print(">>> Warning:: {} was not defined in the group setting!! {} is pushed to NOTUSED category.".format(f, f)) group_name = "NOTUSED" else: # Get the group name group_name = grouping_setting[f_basename] # if the group name list is not created yet create one if group_name not in root_file_groups: root_file_groups[group_name] = [] # Now push the root_file_groups[group_name].append(f) # Print warning on skipped files print("") print(">>> Warning:: following histogram files are skipped. Make sure this is OK.") if "NOTUSED" in root_file_groups: for f in root_file_groups["NOTUSED"]: print(">>> {}".format(os.path.basename(f))) # Print grouping information print("") print(">>> Following is how the histograms are grouped.") print(">>>") print(">>> Group : Sample1, Sample2, Sample3, ...") print(">>> --------------------------------------") for group in sorted(root_file_groups.keys()): if "NOTUSED" in group: continue print(">>> {} : {}".format(group, ", ".join([ os.path.basename(f).replace(".root", "") for f in root_file_groups[group]]))) # Setting plotting styles # Plotting order bkg_plot_order = [ "DY", "HZZ", "RareTop", "ST", "TT", "TTHH", "TTTT", "TTW", "TTZ", "W", "WW", "WZ", "ZZ", ] sig_plot_order = [ "WWW", "WWZ", "WZZ", "ZZZ", "VH", ] legend_labels = [] # Default option don't care sig_labels = [] # Default option don't care colors = [] # Default option don't care # over write styles if args.style == 0: # 4LepMET style bkg_plot_order = style.bkg_plot_order_4LepMET sig_plot_order = style.sig_plot_order_4LepMET legend_labels = style.legend_labels_4LepMET sig_labels = style.sig_labels_4LepMET colors = style.colors_4LepMET if args.style == 1: # 3LepMET style bkg_plot_order = style.bkg_plot_order_3LepMET sig_plot_order = style.sig_plot_order_3LepMET legend_labels = style.legend_labels_3LepMET sig_labels = style.sig_labels_3LepMET colors = style.colors_3LepMET if args.style == 5: # OS+jets style bkg_plot_order = style.bkg_plot_order_OS2jet sig_plot_order = style.sig_plot_order_OS2jet legend_labels = style.legend_labels_OS2jet sig_labels = style.sig_labels_OS2jet colors = style.colors_OS2jet if args.style == 7: # 1Lep4jet style bkg_plot_order = style.bkg_plot_order_1Lep4jet sig_plot_order = style.sig_plot_order_1Lep4jet legend_labels = style.legend_labels_1Lep4jet sig_labels = style.sig_labels_1Lep4jet colors = style.colors_1Lep4jet if args.style == 8: # OS+Fatjets style bkg_plot_order = style.bkg_plot_order_OS2Fatjet sig_plot_order = style.sig_plot_order_OS2Fatjet legend_labels = style.legend_labels_OS2Fatjet sig_labels = style.sig_labels_OS2Fatjet colors = style.colors_OS2Fatjet # Print the options set print("") print(">>> ===== Summary of plotting options =====") print(">>> Bkg plot order : {} ".format(", ".join(bkg_plot_order))) print(">>> Bkg legend : {} ".format(", ".join(legend_labels))) print(">>> Bkg colors : {} ".format(", ".join([str(c) for c in colors]))) print(">>> Sig plot order : {} ".format(", ".join(sig_plot_order))) print(">>> Sig legend : {} ".format(", ".join(sig_labels))) print("") #------------------------------------------------------- # # Now accessing TFiles and THists and plotting... # #------------------------------------------------------- import ROOT as r # Open all TFiles tfiles_by_group = {} for group in sorted(root_file_groups.keys()): # If NOTUSED skip the group if "NOTUSED" in group: continue # Create a list that holds tfiles for the group tfiles_by_group[group] = [] # Now open and push it to tfiles_by_group for f in root_file_groups[group]: tfiles_by_group[group].append(r.TFile(f)) # Open last group's first TFile (assuming same histograms exist on every file) and obtain list of histogram names hist_names = [] for key in tfiles_by_group[group][0].GetListOfKeys(): if "TH" in tfiles_by_group[group][0].Get(key.GetName()).ClassName(): # this is a histogram file hist_names.append(key.GetName()) # Looping over histogram names hist_names_to_plot = [] for hist_name in hist_names: if fnmatch.fnmatch(hist_name, args.histname): hist_names_to_plot.append(hist_name) # Loop over the histograms to plot for hist_name in hist_names_to_plot: # Open all hists thists_by_group = {} for group in sorted(tfiles_by_group.keys()): # If NOTUSED skip the group if "NOTUSED" in group: continue # Create a list that holds histograms for the group thists_by_group[group] = [] # Loop over the tfiles for f in tfiles_by_group[group]: # Retrieve the histogram after scaling it appropriately according to its cross section and lumi of the year if "Data" in group: thists_by_group[group].append(get_raw_histogram(f, hist_name)) else: thists_by_group[group].append(get_xsec_lumi_scaled_histogram(f, hist_name)) # Now create a list of histogram one per each grouping hists = {} for group in sorted(thists_by_group.keys()): # If NOTUSED skip the group if "NOTUSED" in group: continue # Loop over the thists for h in thists_by_group[group]: # If the group key does not exist in the mapping, it means a histogram for it hasn't be created yet if group not in hists: hists[group] = h.Clone(group) # If it exists, then there is a base histogram, we add to it else: hists[group].Add(h) if "Data" in group: if args.year==2016 and group != "Data_2016": continue if args.year==2017 and group != "Data_2017": continue if args.year==2018 and group != "Data_2018": continue if "Data" not in hists: hists["Data"] = h.Clone("Data") else: hists["Data"].Add(h) # # Printing histograms we have # for group in hists: # hists[group].Print("all") #-------------------------- # # Now actual plotting # #-------------get_xsec------------- xminimum = args.xMin if args.xMin!=-999 else hists[bkg_plot_order[0] ].GetXaxis().GetBinLowEdge(1) xmaximum = args.xMax if args.xMax!=-999 else hists[bkg_plot_order[0] ].GetXaxis().GetBinLowEdge(hists[bkg_plot_order[0] ].GetNbinsX()+1) x_range = [] if (args.xMin==-999 and args.xMax==-999) else [xminimum,xmaximum] p.plot_hist( bgs = [ hists[group].Clone() for group in bkg_plot_order ], sigs = [ hists[group].Clone() for group in sig_plot_order ], data = hists["Data"] if args.data else None, colors = colors, legend_labels = legend_labels, sig_labels = sig_labels, options={ "yaxis_log":args.yaxis_log, "nbins":args.nbins, "output_name": "plots/{}/{}/{}.pdf".format(args.tag,yearstring,hist_name),#"output_name": "plots/{}/{}.pdf".format(args.tag,hist_name), "lumi_value": "{:.1f}".format(get_lumi(args)), "print_yield": True, "legend_ncolumns": 3, "legend_scalex": 2, "xaxis_range" : x_range, "remove_underflow":False, "bkg_sort_method":"unsorted", "signal_scale":args.scale, }, )
def main_fake_rate_measurement(prefix, output_name, etaregion="", procname="ttbar6"): # Parse the input arguments try: ntuple_version = sys.argv[1] tag = sys.argv[2] except: usage() if "2016" in ntuple_version: lumi = 35.9 if "2017" in ntuple_version: lumi = 41.3 if "2018" in ntuple_version: lumi = 59.74 basedir = "plots/{}/{}/lin/".format(ntuple_version, tag) # Denominator : fake from data (i.e. data - prompt) yields_ddfake = rt.read_table(basedir + prefix + "Prompt__lepFakeCand2PtFineVarBin"+etaregion+".txt") yields_ddfake["ddfake"] = [] for datacount, bkgcount in zip(yields_ddfake["data"], yields_ddfake["Total"]): yields_ddfake["ddfake"].append(datacount - bkgcount) # print yields_ddfake["ddfake"] # Numerator : fake from data (i.e. data - prompt) yields_ddfake_tight = rt.read_table(basedir + prefix + "TightPrompt__lepFakeCand2PtFineVarBin"+etaregion+".txt") yields_ddfake_tight["ddfake"] = [] for datacount, bkgcount in zip(yields_ddfake_tight["data"], yields_ddfake_tight["Total"]): yields_ddfake_tight["ddfake"].append(datacount - bkgcount) # print yields_ddfake_tight["ddfake"] fr_data = [] for den, num in zip(yields_ddfake["ddfake"], yields_ddfake_tight["ddfake"]): if den.val != 0: fr = num / den fr_data.append(fr) else: fr_data.append(E(0, 0)) fr_data.pop(0) # first one is underflow bin fr_data.pop(0) # second one is underflow bin fr_data.pop(-1) # last one is overflow bin print(fr_data) # Denominator: Fake directly from ttbar MC yields_ttbar = rt.read_table(basedir + prefix + "Fake__lepFakeCand2PtFineVarBin"+etaregion+".txt") # print yields_ttbar[procname] # Numerator: fake from data (i.e. data - prompt) yields_ttbar_tight = rt.read_table(basedir + prefix + "TightFake__lepFakeCand2PtFineVarBin"+etaregion+".txt") # print yields_ttbar_tight[procname] fr_mc = [] for den, num in zip(yields_ttbar[procname], yields_ttbar_tight[procname]): if den.val != 0: fr = num / den fr_mc.append(fr) else: fr_mc.append(E(0, 0)) print(fr_mc) fr_mc.pop(0) # first one is underflow bin fr_mc.pop(0) # second one is underflow bin fr_mc.pop(-1) # last one is overflow bin # bin boundaries # bounds = [0., 10., 15., 20., 30., 150.] # bounds = [0., 10., 20., 70.] bounds = [0., 10., 20., 30., 50., 70.] h_fr_data = r.TH1F("FR","",len(bounds)-1,array('d',bounds)) h_fr_mc = r.TH1F("FR","",len(bounds)-1,array('d',bounds)) for idx, fr in enumerate(fr_data): h_fr_data.SetBinContent(idx+2, fr.val) h_fr_data.SetBinError(idx+2, fr.err) for idx, fr in enumerate(fr_mc): h_fr_mc.SetBinContent(idx+2, fr.val) h_fr_mc.SetBinError(idx+2, fr.err) # Options alloptions= { "ratio_range":[0.0,2.0], "nbins": 180, "autobin": False, "legend_scalex": 0.8, "legend_scaley": 0.8, "output_name": basedir + "/"+output_name+".pdf", "bkg_sort_method": "unsorted", "no_ratio": False, "print_yield": True, "yield_prec": 3, "draw_points": True, "hist_line_none": True, "show_bkg_errors": True, "lumi_value" : lumi, # "yaxis_range": [0., 1], } p.plot_hist( sigs = [], bgs = [h_fr_mc.Clone()], data = h_fr_data.Clone(), syst = None, colors=[2001], legend_labels=["MC t#bar{t}"], options=alloptions) return h_fr_mc.Clone(), h_fr_data.Clone()
def plot(hwwhists, output_name, extraoptions): bgs = hwwhists.clone().bgs bgs_sigs = hwwhists.clone().bgs_sigs sigs = hwwhists.clone().sigs colors = [2004, 2003, 2005, 2001, 2, 4] if len(bgs) > 0 else [2, 4] alloptions = { "ratio_range": [0.0, 2.0], "nbins": 180, "autobin": False, "legend_scalex": 1.8, "legend_scaley": 1.1, "output_name": "plots/scan/{}.pdf".format(output_name), "bkg_sort_method": "unsorted", "no_ratio": True, "print_yield": False, # "yaxis_range": [0, 0.002], } alloptions.update(extraoptions) # # Run scan only if the both sig and bkg exists # if len(bgs) > 0 and len(sigs) > 0: # p.plot_cut_scan( # bgs = bgs, # sigs = sigs, # data = None, # colors = colors, # syst = None, # options=alloptions) bgs = hwwhists.clone().bgs bgs_sigs = hwwhists.clone().bgs_sigs sigs = hwwhists.clone().sigs alloptions = { "ratio_range": [0.0, 2.0], "nbins": 60, "autobin": False, "legend_scalex": 1.8, "legend_scaley": 1.1, "output_name": "plots/lin/{}.pdf".format(output_name), "bkg_sort_method": "unsorted", "no_ratio": True, "print_yield": True, "signal_scale": 1 if "yield" in output_name or "cutflow" in output_name else "auto", # "divide_by_first_bin": True if "cutflow" in output_name else False, "yield_prec": 3 if "cutflow" in output_name else 3, #"yaxis_range": [0., 500], } alloptions.update(extraoptions) p.plot_hist(bgs=bgs_sigs, sigs=sigs, data=None, colors=colors, syst=None, options=alloptions) bgs = hwwhists.clone().bgs bgs_sigs = hwwhists.clone().bgs_sigs sigs = hwwhists.clone().sigs alloptions = { "ratio_range": [0.0, 2.0], "nbins": 60, "autobin": False, "legend_scalex": 1.8, "legend_scaley": 1.1, "output_name": "plots/log/{}.pdf".format(output_name), "bkg_sort_method": "unsorted", "no_ratio": True, "print_yield": False, "signal_scale": 1 if "yield" in output_name or "cutflow" in output_name else "auto", # "divide_by_first_bin": True if "cutflow" in output_name else False, "yaxis_log": True, "legend_smart": False, } alloptions.update(extraoptions) p.plot_hist(bgs=bgs_sigs, sigs=sigs, data=None, colors=colors, syst=None, options=alloptions)
def fakerate(num, den, ps=0, sf=0, sferr=0, tfile=None): # Obtain histograms h_num, h_num_qcd_mu, h_num_qcd_esum, h_num_qcd_el, h_num_qcd_bc = get_fakerate_histograms( num, den, ps, sf) herr_num, herr_num_qcd_mu, herr_num_qcd_esum, herr_num_qcd_el, herr_num_qcd_bc = get_fakerate_histograms( num, den, ps, sf + sferr) # Set data-driven QCD estimate systematics stemming from EWK SF uncertainty add_systematics(h_num, herr_num) # Options alloptions = { "ratio_range": [0.0, 2.0], "nbins": 180, "autobin": False, "legend_scalex": 1.8, "legend_scaley": 1.1, "output_name": "plots/{}/{}/{}/fakerate/{}.pdf".format(input_ntup_tag, analysis_tag, "ss" if isSS else "3l", num + "__" + den), "bkg_sort_method": "unsorted", "no_ratio": False, "print_yield": True, "yield_prec": 3, "draw_points": True, "lumi_value": 41.3, } bgs_list = [h_num_qcd_mu] if "Mu" in num else [h_num_qcd_esum] #bgs_list = [h_num_qcd_mu] if "Mu" in num else [h_num_qcd_esum, h_num_qcd_el, h_num_qcd_bc] sigs_list = [] if "Mu" in num else [h_num_qcd_el, h_num_qcd_bc] # Special label handling instance for pt-eta rolled out case histname = num.split("__")[1] if histname == "ptcorretarolledcoarse": xbounds = get_bounds_from_source_file("ptcorrcoarse_bounds") ybounds = get_bounds_from_source_file("eta_bounds") for jndex in xrange(len(ybounds) - 1): for index in xrange(len(xbounds) - 1): #label = "Ptcorr #in ({}, {}) and |#eta| #in ({:.1f}, {:.1f})".format(int(xbounds[index]), int(xbounds[index+1]), ybounds[jndex], ybounds[jndex+1]) label = "({}, {}), ({:.1f}, {:.1f})".format( int(xbounds[index]), int(xbounds[index + 1]), ybounds[jndex], ybounds[jndex + 1]) for h in sigs_list + bgs_list + [h_num]: h.GetXaxis().SetBinLabel( (jndex) * (len(xbounds) - 1) + (index + 1), label) alloptions["canvas_main_rightmargin"] = 1. / 6. alloptions["canvas_ratio_rightmargin"] = 1. / 6. alloptions["canvas_ratio_bottommargin"] = 0.5 p.plot_hist( sigs=sigs_list, bgs=bgs_list, data=h_num, #data = None, syst=None, options=alloptions) if tfile: tfile.cd() channel = "Mu" if "Mu" in num else "El" histname = num.split("__")[1] data_fakerate = h_num.Clone(channel + "_" + histname + "_data_fakerate") qcd_fakerate = bgs_list[0].Clone(channel + "_" + histname + "_qcd_fakerate") if histname == "etacorrvarbin": create_varbin(data_fakerate, "eta_bounds").Write() create_varbin(qcd_fakerate, "eta_bounds").Write() elif histname == "ptcorrvarbin": create_varbin(data_fakerate, "ptcorr_bounds").Write() create_varbin(qcd_fakerate, "ptcorr_bounds").Write() elif histname == "ptcorrvarbincoarse": create_varbin(data_fakerate, "ptcorrcoarse_bounds").Write() create_varbin(qcd_fakerate, "ptcorrcoarse_bounds").Write() elif histname == "ptcorretarolled": create_varbin(data_fakerate, "ptcorr_bounds", "eta_bounds").Write() create_varbin(qcd_fakerate, "ptcorr_bounds", "eta_bounds").Write() elif histname == "ptcorretarolledcoarse": create_varbin(data_fakerate, "ptcorrcoarse_bounds", "eta_bounds").Write() create_varbin(qcd_fakerate, "ptcorrcoarse_bounds", "eta_bounds").Write() # Closure 3l mu 51% 3l el 1% ss mu 33% ss el 3% (1.51, 0.994, 1.329, 0.978) if channel == "Mu": if isSS: create_varbin(data_fakerate, "ptcorrcoarse_bounds", "eta_bounds", 0.33, "closure").Write() else: create_varbin(data_fakerate, "ptcorrcoarse_bounds", "eta_bounds", 0.51, "closure").Write() elif channel == "El": if isSS: create_varbin(data_fakerate, "ptcorrcoarse_bounds", "eta_bounds", 0.03, "closure").Write() else: create_varbin(data_fakerate, "ptcorrcoarse_bounds", "eta_bounds", 0.01, "closure").Write()
if ilayer == 0: continue if i == 1: h_layers_pt_denom.Fill(ilayer - 1) if both_layer_has_hits[ilayer - 1]: h_layers_pt_numer.Fill(ilayer - 1) if pt > 1.2 and pt < 1.5: for ilayer, i in enumerate(both_layer_has_hits): if ilayer == 0: continue if i == 1: h_layers_pt_denom_pt1p2_1p5.Fill(ilayer - 1) if both_layer_has_hits[ilayer - 1]: h_layers_pt_numer_pt1p2_1p5.Fill(ilayer - 1) print ntrk_12hits p.plot_hist(bgs=[h_layers_pt_denom], data=h_layers_pt_numer, options={ "output_name": "plots_hit_eff/layers_eff.pdf", "print_yield": True }) p.plot_hist(bgs=[h_layers_pt_denom_pt1p2_1p5], data=h_layers_pt_numer_pt1p2_1p5, options={ "output_name": "plots_hit_eff/layers_eff_pt1p2_1p5.pdf", "print_yield": True })
import sys # f = r.TFile("results/pt0p5_2p0_20200120/fulleff_pt0p5_2p0_mtv.root") # f = r.TFile("results/e200_20200120/fulleff_e200_mtv.root") # f = r.TFile("results/e200_20200121/fulleff_e200_mtv.root") # f = r.TFile("results/pt0p5_2p0_20200121/fulleff_pt0p5_2p0_mtv.root") # f = r.TFile("results/pt0p5_2p0_20200127/fulleff_pt0p5_2p0_mtv.root") # f = r.TFile("results/pt0p5_2p0_conditional/fulleff_pt0p5_2p0_mtv.root") # f = r.TFile("results/pt0p5_2p0_conditional_pt1p0/fulleff_pt0p5_2p0_mtv.root") # f = r.TFile("results/pt0p5_2p0_20200131_v1/fulleff_pt0p5_2p0_mtv.root") f = r.TFile("results/e200_20200204/fulleff_e200_mtv.root") prop_phi_2Slayer2 = f.Get("Root__prop_phi_2Slayer2").Clone() p.plot_hist(bgs=[prop_phi_2Slayer2], legend_labels=["prop #phi layer6"], options={ "output_name": "plots_conditional/hit_nmiss2_phi_prop.pdf", "ratio_range": [0., 0.4], }) prop_phi_2Slayer2 = f.Get("Root__prop_phi_2Slayer2_zoom_m02_p02").Clone() p.plot_hist(bgs=[prop_phi_2Slayer2], legend_labels=["prop #phi layer6"], options={ "output_name": "plots_conditional/hit_nmiss2_phi_prop_m02_p02.pdf", "ratio_range": [0., 0.4], "remove_overflow": True, "remove_underflow": True }) prop_phi_2Slayer1 = f.Get("Root__prop_phi_2Slayer1").Clone()
def main_old(): # Get TTZ MET Modeling Uncertainty p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MET", "mc"), options={ "output_name": "exp/mc_eff_ttz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MET", "data"), options={ "output_name": "exp/data_eff_ttz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MET", "ratio"), options={ "output_name": "exp/eff_ratio_ttz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MT", "mc"), options={ "output_name": "exp/mc_eff_ttz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MT", "data"), options={ "output_name": "exp/data_eff_ttz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("ttz", "BTagEMu", "MT", "ratio"), options={ "output_name": "exp/eff_ratio_ttz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "mc"), options={ "output_name": "exp/mc_eff_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "data"), options={ "output_name": "exp/data_eff_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "ratio"), options={ "output_name": "exp/eff_ratio_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "mc_num"), options={ "output_name": "exp/eff_mc_num_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "mc_den"), options={ "output_name": "exp/eff_mc_den_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "data_num"), options={ "output_name": "exp/eff_data_num_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "data_den"), options={ "output_name": "exp/eff_data_den_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MT", "mc"), options={ "output_name": "exp/mc_eff_zz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MT", "data"), options={ "output_name": "exp/data_eff_zz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MT", "ratio"), options={ "output_name": "exp/eff_ratio_zz_mt.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("ttz", "ChannelEMu", "ChannelBTagEMu", "num"), options={ "output_name": "exp/ttz_emu_alpha.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("ttz", "ChannelEMu", "ChannelBTagEMu", "den"), options={ "output_name": "exp/ttz_emu_alpha.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("ttz", "ChannelEMu", "ChannelBTagEMu", "eff"), options={ "output_name": "exp/ttz_emu_alpha.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("ttz", "ChannelOffZ", "ChannelBTagEMu", "num"), options={ "output_name": "exp/ttz_offz_alpha.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("ttz", "ChannelOffZ", "ChannelBTagEMu", "den"), options={ "output_name": "exp/ttz_offz_alpha.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("ttz", "ChannelOffZ", "ChannelBTagEMu", "eff"), options={ "output_name": "exp/ttz_offz_alpha.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("zz", "ChannelEMu", "ChannelOnZ", "num"), options={ "output_name": "exp/zz_emu_alpha.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("zz", "ChannelEMu", "ChannelOnZ", "den"), options={ "output_name": "exp/zz_emu_alpha.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("zz", "ChannelEMu", "ChannelOnZ", "eff"), options={ "output_name": "exp/zz_emu_alpha.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("zz", "ChannelOffZ", "ChannelOnZ", "num"), options={ "output_name": "exp/zz_offz_alpha.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("zz", "ChannelOffZ", "ChannelOnZ", "den"), options={ "output_name": "exp/zz_offz_alpha.pdf", "print_yield": True }) p.plot_hist(bgs=get_alpha("zz", "ChannelOffZ", "ChannelOnZ", "eff"), options={ "output_name": "exp/zz_offz_alpha.pdf", "print_yield": True })
def main_onz_zz_met_only(): p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "mc"), options={ "output_name": "exp/mc_eff_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "data"), options={ "output_name": "exp/data_eff_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "ratio"), options={ "output_name": "exp/eff_ratio_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "mc_num"), options={ "output_name": "exp/eff_mc_num_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "mc_den"), options={ "output_name": "exp/eff_mc_den_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "data_num"), options={ "output_name": "exp/eff_data_num_zz_met.pdf", "print_yield": True }) p.plot_hist(bgs=get_eff_ratios("zz", "OnZ", "MET", "data_den"), options={ "output_name": "exp/eff_data_den_zz_met.pdf", "print_yield": True })
def different_composition_test(histname): samples = ["ttw", "ttz", "tt1l", "tt2l", "raretop"] files = [] hists = [] for sample in samples: ref_file = r.TFile("hadds/mainAnalysis_Run2/{}.root".format(sample)) files.append(ref_file) hists.append(ref_file.Get(histname)) # Nominal nominal_totalbkg = hists[0].Clone("alltop") nominal_totalbkg.Add(hists[1].Clone()) nominal_totalbkg.Add(hists[2].Clone()) nominal_totalbkg.Add(hists[3].Clone()) nominal_totalbkg.Add(hists[4].Clone()) # Vary each i-th ones by factor 2 to get up-down variation hists_up = [] hists_dn = [] hists_up_indiv = [] hists_dn_indiv = [] for i in range(len(samples)): up_totalbkg = hists[0].Clone("alltop") up_totalbkg.Reset() uplist = [] for j in range(len(samples)): temp = hists[j].Clone() if i == j: temp.Scale(3.) up_totalbkg.Add(temp) uplist.append(temp.Clone()) sf = 1. / up_totalbkg.Integral() up_totalbkg.Scale(sf) hists_up.append(up_totalbkg) for h in uplist: h.Scale(sf) hists_up_indiv.append(uplist) dn_totalbkg = hists[0].Clone("alltop") dn_totalbkg.Reset() dnlist = [] for j in range(len(samples)): temp = hists[j].Clone() if i == j: temp.Scale(1 / 3.) dn_totalbkg.Add(temp) dnlist.append(temp.Clone()) sf = 1. / dn_totalbkg.Integral() dn_totalbkg.Scale(sf) hists_dn.append(dn_totalbkg) for h in dnlist: h.Scale(sf) hists_dn_indiv.append(dnlist) sf = 1. / nominal_totalbkg.Integral() nominal_totalbkg.Scale(sf) for h in hists: h.Scale(sf) colors = [ 4021, 4024, 4020, 4023, 2001, ] for i, (uphist, dnhist, uphist_list, dnhist_list) in enumerate( zip(hists_up, hists_dn, hists_up_indiv, hists_dn_indiv)): options = { "legend_ncolumns": 3, "legend_scalex": 2.0, "legend_scaley": 0.8, "nbins": 20, "output_name": "mbb_study/{}_{}_upvaried_detail.pdf".format(histname, samples[i]), "ratio_range": [0.5, 1.5] if "__MbbInOut" in histname else [0., 2.], "remove_underflow": True, "yaxis_label": "Normalized", "xaxis_label": "" if "__MbbInOut" in histname else "m_{bb} [GeV]", "lumi_value": 137, } if "__MbbInOut" in histname: options["bin_labels"] = ["Out", "In"] options["output_name"] = "mbb_study/{}_{}_upvaried_detail.pdf".format( histname, samples[i]) p.plot_hist( bgs=[h.Clone() for h in uphist_list], data=nominal_totalbkg.Clone(), legend_labels=[ "{}x2".format(samples[j]) if j == i else samples[j] for j in range(len(samples)) ], colors=colors, options=options, ) options["output_name"] = "mbb_study/{}_{}_dnvaried_detail.pdf".format( histname, samples[i]) p.plot_hist( bgs=[h.Clone() for h in dnhist_list], data=nominal_totalbkg.Clone(), legend_labels=[ "{}x1/2".format(samples[j]) if j == i else samples[j] for j in range(len(samples)) ], colors=colors, options=options, ) options["output_name"] = "mbb_study/{}_{}_varied.pdf".format( histname, samples[i]) p.plot_hist( bgs=[nominal_totalbkg.Clone()], sigs=[uphist.Clone(), dnhist.Clone()], legend_labels=["All Top Bkg."], sig_labels=[ "{}x2".format(samples[i]), "{}x1/2".format(samples[i]) ], colors=[r.kGray], options=options, ) options["output_name"] = "mbb_study/{}_{}_up_varied.pdf".format( histname, samples[i]) p.plot_hist( bgs=[nominal_totalbkg.Clone()], data=uphist.Clone(), legend_labels=["All Top Bkg."], # sig_labels=["{}x2".format(samples[i]), "{}x1/2".format(samples[i])], colors=[r.kGray], options=options, ) options["output_name"] = "mbb_study/{}_{}_dn_varied.pdf".format( histname, samples[i]) p.plot_hist( bgs=[nominal_totalbkg.Clone()], data=dnhist.Clone(), legend_labels=["All Top Bkg."], # sig_labels=["{}x2".format(samples[i]), "{}x1/2".format(samples[i])], colors=[r.kGray], options=options, )
def plot(histnames, ps=0, sf=0): # Glob the file lists # bkg_list_wjets = glob.glob(output_dirpath+"/WJetsToLNu_Tune*.root") # bkg_list_dy = glob.glob(output_dirpath+"/DY*.root") # bkg_list_ttbar = glob.glob(output_dirpath+"/TTJets_Tune*.root") # bkg_list_vv = glob.glob(output_dirpath+"/WW*.root") + glob.glob(output_dirpath+"/WW*.root") # bkg_list_qcd_mu = glob.glob(output_dirpath+"/QCD*MuEn*.root") # bkg_list_qcd_el = glob.glob(output_dirpath+"/QCD*EMEn*.root") # bkg_list_qcd_bc = glob.glob(output_dirpath+"/QCD*bcToE*.root") bkg_list_wjets = [output_dirpath + "/wj_incl.root"] bkg_list_dy = [output_dirpath + "/dy.root"] bkg_list_ttbar = [output_dirpath + "/tt_incl.root"] bkg_list_vv = [ output_dirpath + "/ww.root", output_dirpath + "/wz.root" ] bkg_list_qcd_mu = [output_dirpath + "/qcd_mu.root"] bkg_list_qcd_el = [output_dirpath + "/qcd_em.root"] bkg_list_qcd_bc = [output_dirpath + "/qcd_bc.root"] bkg_list_all = bkg_list_wjets + bkg_list_dy + bkg_list_ttbar + bkg_list_vv # Glob the data file list depending on the region if "Mu" in histnames: data_list = [output_dirpath + "/data_mu.root"] elif "El" in histnames: data_list = [output_dirpath + "/data_el.root"] else: data_list = [ output_dirpath + "/data_mu.root", output_dirpath + "/data_el.root" ] # Get all the histogram objects h_wjets = ru.get_summed_histogram(bkg_list_wjets, histnames) h_dy = ru.get_summed_histogram(bkg_list_dy, histnames) h_ttbar = ru.get_summed_histogram(bkg_list_ttbar, histnames) h_vv = ru.get_summed_histogram(bkg_list_vv, histnames) h_qcd_mu = ru.get_summed_histogram(bkg_list_qcd_mu, histnames) h_qcd_el = ru.get_summed_histogram(bkg_list_qcd_el, histnames) h_qcd_bc = ru.get_summed_histogram(bkg_list_qcd_bc, histnames) h_data = ru.get_summed_histogram(data_list, histnames) # Set the names of the histograms h_wjets.SetName("W") h_dy.SetName("Z") h_ttbar.SetName("Top") h_vv.SetName("VV") h_qcd_mu.SetName("QCD(#mu)") h_qcd_el.SetName("QCD(e)") h_qcd_bc.SetName("QCD(bc)") h_data.SetName("Data") # Scale the histograms appropriately from SF from the EWKCR if sf > 0: h_wjets.Scale(sf) h_dy.Scale(sf) h_ttbar.Scale(sf) h_vv.Scale(sf) # If the data needs some additional correction for the prescale if ps > 0: h_data.Scale(ps) # Color settings colors = [2007, 2005, 2003, 2001, 920, 2] # Options alloptions = { "ratio_range": [0.0, 2.0], "nbins": 30, "autobin": False, "legend_scalex": 1.8, "legend_scaley": 1.1, "output_name": "plots/{}/{}/{}/plot/{}.pdf".format(input_ntup_tag, analysis_tag, "ss" if isSS else "3l", histnames), "bkg_sort_method": "unsorted", "no_ratio": False, "print_yield": False, "yaxis_log": True if "ptcorr" in histnames else False, #"yaxis_log": False, "divide_by_bin_width": True, "legend_smart": False if "ptcorr" in histnames else True, "lumi_value": 41.3, } # The bkg histogram list bgs_list = [h_vv, h_ttbar, h_dy, h_wjets, h_qcd_mu], bgs_list = [ h_vv, h_ttbar, h_dy, h_wjets, h_qcd_mu ] if "Mu" in histnames else [h_vv, h_ttbar, h_dy, h_wjets, h_qcd_el] legend_labels = ["VV", "t#bar{t}", "DY", "W", "QCD"] # Plot them p.plot_hist(bgs=bgs_list, data=h_data.Clone("Data"), colors=colors, syst=None, legend_labels=legend_labels, options=alloptions) # Obtain the histogram again to return the object for further calculations # Data-driven QCD = data - bkg h_ddqcd = ru.get_summed_histogram(data_list, histnames) h_bkg = ru.get_summed_histogram(bkg_list_all, histnames) if ps > 0: h_ddqcd.Scale(ps) if sf > 0: h_bkg.Scale(sf) h_ddqcd.Add(h_bkg, -1) # MC QCD h_qcd_mu = ru.get_summed_histogram(bkg_list_qcd_mu, histnames).Clone("QCD(#mu)") h_qcd_el = ru.get_summed_histogram(bkg_list_qcd_el, histnames).Clone("QCD(EM)") h_qcd_bc = ru.get_summed_histogram(bkg_list_qcd_bc, histnames).Clone("QCD(HF)") return h_ddqcd, h_data, h_bkg, h_qcd_mu, h_qcd_el, h_qcd_bc
def el_fakerate(): # Glob the file lists mcs = [ output_dirpath + "/wj_ht.root", output_dirpath + "/tt_1l.root", ] num = "ElClosureTight__elptcorretarolledcoarse" den = "ElClosureLoose__elptcorretarolledcoarse" h_num = ru.get_summed_histogram(mcs, num) h_den = ru.get_summed_histogram(mcs, den) u.move_in_overflows(h_num) u.move_in_overflows(h_den) h_num.Divide(h_den) qcds = [ output_dirpath + "/qcd_em.root", output_dirpath + "/qcd_bc.root", ] qcd_num = "OneElTightMR__elptcorretarolledcoarse" qcd_den = "OneElMR__elptcorretarolledcoarse" h_qcd_num = ru.get_summed_histogram(qcds, qcd_num) h_qcd_den = ru.get_summed_histogram(qcds, qcd_den) u.move_in_overflows(h_qcd_num) u.move_in_overflows(h_qcd_den) h_qcd_num.Divide(h_qcd_den) h_qcd_num.SetName("QCD(e)") # Color settings colors = [ 2005, 2001, ] # Options alloptions = { "ratio_range": [0.0, 2.0], "nbins": 180, "autobin": False, "legend_scalex": 1.8, "legend_scaley": 1.1, "output_name": "plots/{}/{}/{}/fakeratemc/{}.pdf".format(input_ntup_tag, analysis_tag, "ss" if isSS else "3l", num + "__" + den), "bkg_sort_method": "unsorted", "no_ratio": False, "print_yield": True, "yield_prec": 3, "draw_points": True, "lumi_value": 41.3, "legend_datalabel": "W+t#bar{t}" } # Plot them p.plot_hist(bgs=[h_qcd_num], data=h_num, colors=colors, syst=None, options=alloptions)
def el_iso(binname=""): # Glob the file lists mcs = [ output_dirpath + "/wj_ht.root", output_dirpath + "/tt_1l.root", ] qcds = [ output_dirpath + "/qcd_em.root", output_dirpath + "/qcd_bc.root", ] bkg_hist = "ElClosureLoose{}__eliso".format(binname) qcd_hist = "OneElMR{}__iso".format(binname) h_bkg = ru.get_summed_histogram(mcs, bkg_hist) h_qcd = ru.get_summed_histogram(qcds, qcd_hist) h_qcd_em = ru.get_summed_histogram([output_dirpath + "/qcd_em.root"], qcd_hist) h_qcd_bc = ru.get_summed_histogram([output_dirpath + "/qcd_bc.root"], qcd_hist) h_bkg.Scale(1. / h_bkg.Integral()) h_qcd.Scale(1. / h_qcd.Integral()) h_qcd_em.Scale(1. / h_qcd_em.Integral()) h_qcd_bc.Scale(1. / h_qcd_bc.Integral()) h_qcd.SetName("QCD(e)") h_qcd_em.SetName("QCD(LF)") h_qcd_bc.SetName("QCD(HF)") # Color settings colors = [ 2001, ] # Options alloptions = { "ratio_range": [0.0, 2.0], "nbins": 10, "autobin": False, "legend_scalex": 1.8, "legend_scaley": 1.1, "output_name": "plots/{}/{}/{}/eliso/{}.pdf".format(input_ntup_tag, analysis_tag, "ss" if isSS else "3l", bkg_hist + "__" + qcd_hist), "bkg_sort_method": "unsorted", "no_ratio": False, "print_yield": True, "yield_prec": 3, "draw_points": True, "lumi_value": 41.3, "legend_datalabel": "W+t#bar{t}", } # Plot them p.plot_hist(sigs=[h_qcd_em, h_qcd_bc], bgs=[h_qcd], data=h_bkg, colors=colors, syst=None, options=alloptions)
#!/bin/env python import plottery_wrapper as p import ROOT as r import sys f = r.TFile("results/pt0p5_2p0_20200209/fulleff_pt0p5_2p0_mtv.root") h_plot = f.Get("Root__pt_0p95_1p05_hit_miss_study") p.plot_hist( bgs=[h_plot], # data=h_all, legend_labels=[], options={ "output_name": "plots_missing_hits/summary.pdf", "print_yield": True }, ) prop_phi_2Slayer0 = f.Get( "Root__pt_0p95_1p05_nmiss2_prop_phi_2Slayer0").Clone() p.plot_hist( bgs=[prop_phi_2Slayer0], legend_labels=["prop #phi layer4"], options={ "output_name": "plots_missing_hits/hit_nmiss2_phi_prop_pt_0p95_1p05_2Slayer0.pdf", "ratio_range": [0., 0.4], "print_yield": True })
def plot(histnames, ps=0, sf=None, sfqcd=None, output_suffix="", dd_qcd=None): # Glob the file lists bkg_list_wjets = [output_dirpath + "/wj_incl.root"] bkg_list_dy = [output_dirpath + "/dy.root"] bkg_list_ttbar = [output_dirpath + "/tt_incl.root"] bkg_list_vv = [output_dirpath + "/ww.root", output_dirpath + "/wz.root"] bkg_list_qcd_mu = [output_dirpath + "/qcd_mu.root"] bkg_list_qcd_el = [output_dirpath + "/qcd_em.root"] bkg_list_qcd_bc = [output_dirpath + "/qcd_bc.root"] bkg_list_all = bkg_list_wjets + bkg_list_dy + bkg_list_ttbar + bkg_list_vv # Glob the data file list depending on the region if "Mu" in histnames: data_list = [output_dirpath + "/data_mu.root"] elif "El" in histnames: data_list = [output_dirpath + "/data_el.root"] else: data_list = [ output_dirpath + "/data_mu.root", output_dirpath + "/data_el.root" ] # Get all the histogram objects h_wjets = ru.get_summed_histogram(bkg_list_wjets, histnames) h_dy = ru.get_summed_histogram(bkg_list_dy, histnames) h_ttbar = ru.get_summed_histogram(bkg_list_ttbar, histnames) h_vv = ru.get_summed_histogram(bkg_list_vv, histnames) h_qcd_mu = ru.get_summed_histogram(bkg_list_qcd_mu, histnames) h_qcd_el = ru.get_summed_histogram(bkg_list_qcd_el, histnames) h_qcd_bc = ru.get_summed_histogram(bkg_list_qcd_bc, histnames) h_data = ru.get_summed_histogram(data_list, histnames) # Set the names of the histograms h_wjets.SetName("W") h_dy.SetName("Z") h_ttbar.SetName("Top") h_vv.SetName("VV") h_qcd_mu.SetName("QCD(#mu)") h_qcd_el.SetName("QCD(e)") h_qcd_bc.SetName("QCD(bc)") h_data.SetName("Data") # print h_wjets.Integral() + h_dy.Integral() + h_ttbar.Integral() + h_vv.Integral() # print h_qcd_el.Integral() + h_qcd_bc.Integral() # Scale the histograms appropriately from SF from the EWKCR if sf: if isinstance(sf, list): hists = [h_wjets, h_dy, h_ttbar, h_vv] for h in hists: for ii, s in enumerate(sf): bc = h.GetBinContent(ii + 1) be = h.GetBinError(ii + 1) h.SetBinContent(ii + 1, bc * s) h.SetBinError(ii + 1, be * s) else: if sf > 0: h_wjets.Scale(sf) h_dy.Scale(sf) h_ttbar.Scale(sf) h_vv.Scale(sf) if sfqcd: if isinstance(sfqcd, list): hists = [h_qcd_mu, h_qcd_el, h_qcd_bc] for h in hists: for ii, s in enumerate(sfqcd): bc = h.GetBinContent(ii + 1) be = h.GetBinError(ii + 1) h.SetBinContent(ii + 1, bc * s) h.SetBinError(ii + 1, be * s) else: if sfqcd > 0: h_qcd_mu.Scale(sfqcd) h_qcd_el.Scale(sfqcd) h_qcd_bc.Scale(sfqcd) # If the data needs some additional correction for the prescale if ps > 0: h_data.Scale(ps) # print h_wjets.Integral() + h_dy.Integral() + h_ttbar.Integral() + h_vv.Integral() # print h_qcd_el.Integral() + h_qcd_bc.Integral() # print h_data.Integral() # Color settings colors = [2007, 2005, 2003, 2001, 920, 921] # Options alloptions = { "ratio_range": [0.0, 2.0], "nbins": 30, "autobin": False, "legend_scalex": 1.8, "legend_scaley": 1.1, "output_name": "plots/{}/{}/{}/plot/{}{}.pdf".format(input_ntup_tag, analysis_tag, "ss" if isSS else "3l", histnames, output_suffix), "bkg_sort_method": "unsorted", "no_ratio": False, "print_yield": True, "yaxis_log": False if "ptcorr" in histnames else False, #"yaxis_log": False, #"yaxis_log": False, "divide_by_bin_width": True, "legend_smart": False if "ptcorr" in histnames else True, "lumi_value": lumi, } # The bkg histogram list h_qcd = h_qcd_mu if "Mu" in histnames else h_qcd_el if dd_qcd: h_qcd = dd_qcd bgs_list = [h_vv, h_ttbar, h_dy, h_wjets, h_qcd] legend_labels = ["VV", "t#bar{t}", "DY", "W", "QCD(#mu)" ] if "Mu" in histnames else [ "VV", "t#bar{t}", "DY", "W", "QCD(e)", "QCD(HF)" ] if "Mu" not in histnames: bgs_list.append(h_qcd_bc) # # For 2018 merge the last two bins in the central # if "ptcorretarolledcoarse" in histnames: # def merge_4_5(h): # bc4 = h.GetBinContent(4) # bc5 = h.GetBinContent(5) # be4 = h.GetBinError(4) # be5 = h.GetBinError(5) # nb = E(bc4, be4) + E(bc5, be5) # nbc = nb.val # nbe = nb.err # h.SetBinContent(4, nbc) # h.SetBinError(4, nbe) # h.SetBinContent(5, nbc) # h.SetBinError(5, nbe) # merge_4_5(h_vv) # merge_4_5(h_ttbar) # merge_4_5(h_dy) # merge_4_5(h_wjets) # merge_4_5(h_qcd_mu) # merge_4_5(h_qcd_el) # merge_4_5(h_qcd_bc) # merge_4_5(h_data) # Plot them p.plot_hist(bgs=bgs_list, data=h_data.Clone("Data"), colors=colors, syst=None, legend_labels=legend_labels, options=alloptions) # print h_wjets.Integral() + h_dy.Integral() + h_ttbar.Integral() + h_vv.Integral() # print h_qcd_el.Integral() + h_qcd_bc.Integral() # print h_data.Integral() # Obtain the histogram again to return the object for further calculations # Data-driven QCD = data - bkg h_ddqcd = ru.get_summed_histogram(data_list, histnames) h_bkg = ru.get_summed_histogram(bkg_list_all, histnames) h_wjets = ru.get_summed_histogram(bkg_list_wjets, histnames) h_dy = ru.get_summed_histogram(bkg_list_dy, histnames) h_ttbar = ru.get_summed_histogram(bkg_list_ttbar, histnames) h_vv = ru.get_summed_histogram(bkg_list_vv, histnames) if ps > 0: h_ddqcd.Scale(ps) # Scale the histograms appropriately from SF from the EWKCR if sf: if isinstance(sf, list): hists = [h_bkg, h_wjets, h_dy, h_ttbar, h_vv] for h in hists: for ii, s in enumerate(sf): bc = h.GetBinContent(ii + 1) be = h.GetBinError(ii + 1) h.SetBinContent(ii + 1, bc * s) h.SetBinError(ii + 1, be * s) else: if sf > 0: h_bkg.Scale(sf) h_wjets.Scale(sf) h_dy.Scale(sf) h_ttbar.Scale(sf) h_vv.Scale(sf) if "ptcorretarolled" in histnames: # print h_ddqcd.GetBinContent(6), h_ddqcd.GetBinContent(7) # d6 = E(h_ddqcd.GetBinContent(6), h_ddqcd.GetBinError(6)) + E(h_ddqcd.GetBinContent(7), h_ddqcd.GetBinError(7)) # d13 = E(h_ddqcd.GetBinContent(13), h_ddqcd.GetBinError(13)) + E(h_ddqcd.GetBinContent(14), h_ddqcd.GetBinError(14)) # b6 = E(h_bkg.GetBinContent(6), h_bkg.GetBinError(6)) + E(h_bkg.GetBinContent(7), h_bkg.GetBinError(7)) # b13 = E(h_bkg.GetBinContent(13), h_bkg.GetBinError(13)) + E(h_bkg.GetBinContent(14), h_bkg.GetBinError(14)) # h_ddqcd.SetBinContent(6, d6.val) # h_ddqcd.SetBinContent(7, d6.val) # h_ddqcd.SetBinError(6, d6.err) # h_ddqcd.SetBinError(7, d6.err) # h_ddqcd.SetBinContent(13, d13.val) # h_ddqcd.SetBinContent(14, d13.val) # h_ddqcd.SetBinError(13, d13.err) # h_ddqcd.SetBinError(14, d13.err) # h_bkg.SetBinContent(6, b6.val) # h_bkg.SetBinContent(7, b6.val) # h_bkg.SetBinError(6, b6.err) # h_bkg.SetBinError(7, b6.err) # h_bkg.SetBinContent(13, b13.val) # h_bkg.SetBinContent(14, b13.val) # h_bkg.SetBinError(13, b13.err) # h_bkg.SetBinError(14, b13.err) for ii in xrange(1, h_ddqcd.GetNbinsX() + 1): data_bc = h_ddqcd.GetBinContent(ii) data_be = h_ddqcd.GetBinError(ii) bkg_bc = h_bkg.GetBinContent(ii) bkg_be = h_bkg.GetBinError(ii) d = E(data_bc, data_be) b = E(bkg_bc, bkg_be) n = d - b if isSS: if d.err > n.val: n.val = d.err h_ddqcd.SetBinContent(ii, n.val) h_ddqcd.SetBinError(ii, n.err) else: h_ddqcd.Add(h_bkg, -1) # MC QCD h_qcd_mu = ru.get_summed_histogram(bkg_list_qcd_mu, histnames).Clone("QCD(#mu)") h_qcd_el = ru.get_summed_histogram(bkg_list_qcd_el, histnames).Clone("QCD(EM)") h_qcd_bc = ru.get_summed_histogram(bkg_list_qcd_bc, histnames).Clone("QCD(HF)") return h_ddqcd, h_data, h_bkg, h_qcd_mu, h_qcd_el, h_qcd_bc, h_wjets, h_dy, h_ttbar, h_vv
def write_datacards(ntuple_version, tag): # ntuple_version = args.sample_set_name # tag = args.tag if args.wwz_only: fname_sig = "outputs/{}/{}/wwz.root".format(ntuple_version, tag) else: fname_sig = "outputs/{}/{}/sig.root".format(ntuple_version, tag) fname_wwz = "outputs/{}/{}/wwz.root".format(ntuple_version, tag) fname_wzz = "outputs/{}/{}/wzz.root".format(ntuple_version, tag) fname_zzz = "outputs/{}/{}/zzz.root".format(ntuple_version, tag) fname_ttz = "outputs/{}/{}/ttz.root".format(ntuple_version, tag) fname_zz = "outputs/{}/{}/zz.root".format(ntuple_version, tag) fname_wz = "outputs/{}/{}/wz.root".format(ntuple_version, tag) fname_twz = "outputs/{}/{}/twz.root".format(ntuple_version, tag) fname_rare = "outputs/{}/{}/rare.root".format(ntuple_version, tag) fname_dyttbar = "outputs/{}/{}/dyttbar.root".format(ntuple_version, tag) fname_higgs = "outputs/{}/{}/higgs.root".format(ntuple_version, tag) fname_othernoh = "outputs/{}/{}/othernoh.root".format(ntuple_version, tag) fname_data = "outputs/{}/{}/data.root".format(ntuple_version, tag) year = "2" + ntuple_version.split("_")[0].split("2")[1] if "2016" in ntuple_version and "2017" in ntuple_version: year = "All" prefix = "{}/{}".format(ntuple_version, tag) procs = [ "data_obs", "sig", "ttz", "zz", "wz", "twz", "rare", "dyttbar", "higgs" ] mcprocs = procs[1:] bkgprocs = procs[2:] fnames = [ fname_data, fname_sig, fname_ttz, fname_zz, fname_wz, fname_twz, fname_rare, fname_dyttbar, fname_higgs ] nonzzbkg = [ fname_sig, fname_ttz, fname_wz, fname_twz, fname_rare, fname_dyttbar, fname_higgs ] nonttzbkg = [ fname_sig, fname_zz, fname_wz, fname_twz, fname_rare, fname_dyttbar, fname_higgs ] procs = ["data_obs", "sig", "ttz", "zz", "wz", "twz", "higgs", "other"] mcprocs = procs[1:] bkgprocs = procs[2:] fnames = [ fname_data, fname_sig, fname_ttz, fname_zz, fname_wz, fname_twz, fname_higgs, fname_othernoh ] nonzzbkg = [ fname_sig, fname_ttz, fname_wz, fname_twz, fname_higgs, fname_othernoh ] nonttzbkg = [ fname_sig, fname_zz, fname_wz, fname_twz, fname_higgs, fname_othernoh ] if args.wwz_only: procs = [ "data_obs", "sig", "wzz", "zzz", "zz", "ttz", "twz", "wz", "higgs", "other" ] mcprocs = procs[1:] bkgprocs = procs[2:] fnames = [ fname_data, fname_wwz, fname_wzz, fname_zzz, fname_zz, fname_ttz, fname_twz, fname_wz, fname_higgs, fname_othernoh ] nonzzbkg = [ fname_wwz, fname_wzz, fname_zzz, fname_ttz, fname_twz, fname_wz, fname_higgs, fname_othernoh ] nonttzbkg = [ fname_wwz, fname_wzz, fname_zzz, fname_zz, fname_twz, fname_wz, fname_higgs, fname_othernoh ] systcategs = [ "BTagHF", "BTagLF", "JES", "Pileup", "Qsq", "PDF", "AlphaS", "MET", "JER", "METPileup" ] # Null string is the nominal variation systnames = ["Nominal"] # Nominal always exist for systcateg in systcategs: systnames.append(systcateg + "Up") systnames.append(systcateg + "Down") ############# # Open TFiles ############# tfiles = {} for proc, fname in zip(procs, fnames): tfiles[proc] = r.TFile(fname) ########################### # OnZ Control region yields ########################### onz_zz_h = pr.get_summed_histogram([fname_zz], "ChannelOnZ__Yield") onz_data_h = pr.get_summed_histogram([fname_data], "ChannelOnZ__Yield") onz_nonzz_h = pr.get_summed_histogram(nonzzbkg, "ChannelOnZ__Yield") zz_sf = pr.get_sf(onz_zz_h, onz_data_h, onz_nonzz_h).GetBinContent(1) zz_sferr = pr.get_sf(onz_zz_h, onz_data_h, onz_nonzz_h).GetBinError(1) expected_nevt_zz = onz_data_h.GetBinContent(1) ############################ # BTag Control region yields ############################ bcr_ttz_h = pr.get_summed_histogram([fname_ttz], "ChannelBTagEMu__Yield") bcr_data_h = pr.get_summed_histogram([fname_data], "ChannelBTagEMu__Yield") bcr_nonttz_h = pr.get_summed_histogram(nonttzbkg, "ChannelBTagEMu__Yield") ttz_sf = pr.get_sf(bcr_ttz_h, bcr_data_h, bcr_nonttz_h).GetBinContent(1) ttz_sferr = pr.get_sf(bcr_ttz_h, bcr_data_h, bcr_nonttz_h).GetBinError(1) expected_nevt_ttz = bcr_data_h.GetBinContent(1) if not args.print_yields: print year, "ttz_sf", "{:.2f} +/- {:.2f}".format( ttz_sf, ttz_sferr), expected_nevt_ttz print year, "zz_sf", "{:.2f} +/- {:.2f}".format( zz_sf, zz_sferr), expected_nevt_zz ############################### # EMu channel data card writing ############################### # number of bins fitreg = "EMuHighMT" if args.emu_one_bin: nbins = 1 fitvar = "Yield" else: nbins = 5 fitvar = "MllNom" # nbins = 5 # fitvar = "pt_zeta" # Main data base to hold all the histograms hists_db = {} # Loop over the processes for proc in procs: # Retrieve the tfile tfile = tfiles[proc] # For each processes create another map to hold various histograms hists_db[proc] = {} # Loop over the systematic variations for syst in systnames: if syst == "Nominal": if nbins == 5: h = rebin36( tfile.Get("Channel{}__{}".format(fitreg, fitvar)).Clone()) else: h = tfile.Get("Channel{}__{}".format(fitreg, fitvar)).Clone() else: systhacked = syst if proc == "NONE": systhacked = "" if nbins == 5: h = rebin36( tfile.Get("Channel{}{}__{}".format( fitreg, systhacked, fitvar)).Clone()) else: h = tfile.Get("Channel{}{}__{}".format( fitreg, systhacked, fitvar)).Clone() # if nbins == 5: # h = rebin36(tfile.Get("Channel{}{}__{}".format(fitreg, syst, fitvar)).Clone()) # else: # h = tfile.Get("Channel{}{}__{}".format(fitreg, syst, fitvar)).Clone() h.SetTitle("emu{}_{}".format(year, proc)) if proc == "ttz": before_scale = h.Integral() h.Scale(ttz_sf) after_scale = h.Integral() if syst == "Nominal": print year, "ttz", before_scale, after_scale if proc == "zz": before_scale = h.Integral() h.Scale(zz_sf) after_scale = h.Integral() if syst == "Nominal": print year, "zz", before_scale, after_scale # if proc == "wz": h.Scale(2) hists_db[proc][syst] = h systs = [] # ZZ CR systematic line onz_cr_hist = r.TH1F("onz_cr", "", nbins, 0, nbins) for i in xrange(1, nbins + 1): onz_cr_hist.SetBinContent(i, expected_nevt_zz) alpha = hists_db["zz"]["Nominal"].Clone("alpha") alpha.Divide(onz_cr_hist) thissyst = {} for proc in mcprocs: if proc == "zz": thissyst["emu{}_".format(year) + proc] = [ "{:4f}".format(alpha.GetBinContent(i)) for i in range(1, nbins + 1) ] else: thissyst["emu{}_".format(year) + proc] = 0 systs.append(("CRZZ{}".format(year), "gmN", [onz_cr_hist], thissyst)) # ttZ CR systematic line btag_cr_hist = r.TH1F("btag_cr", "", nbins, 0, nbins) for i in xrange(1, nbins + 1): btag_cr_hist.SetBinContent(i, expected_nevt_ttz) alpha = hists_db["ttz"]["Nominal"].Clone("alpha") alpha.Divide(btag_cr_hist) thissyst = {} for proc in mcprocs: if proc == "ttz": thissyst["emu{}_".format(year) + proc] = [ "{:4f}".format(alpha.GetBinContent(i)) for i in range(1, nbins + 1) ] else: thissyst["emu{}_".format(year) + proc] = 0 systs.append(("CRTTZ{}".format(year), "gmN", [btag_cr_hist], thissyst)) # Experimental systematics for systcateg in systcategs: thissyst = {} for proc in mcprocs: if proc not in ["zz", "ttz"]: thissyst["emu{}_".format(year) + proc] = [ hists_db[proc][systcateg + "Up"], hists_db[proc][systcateg + "Down"] ] else: thissyst["emu{}_".format(year) + proc] = 0 systs.append((systcateg + year, "lnN", [], thissyst)) # # Flat additional systematics # thissyst = {} # for proc in mcprocs: # if proc == "ttz": thissyst["emu{}_".format(year) + proc] = "1.11" # else: thissyst["emu{}_".format(year) + proc] = 0 # systs.append( ("FlatSystTFNbTTZ{}".format(year), "lnN", [], thissyst) ) # # Flat additional systematics # thissyst = {} # for proc in mcprocs: # if proc == "ttz": thissyst["emu{}_".format(year) + proc] = "1.02" # else: thissyst["emu{}_".format(year) + proc] = 0 # systs.append( ("FlatSystMTexpTTZ{}".format(year), "lnN", [], thissyst) ) # Flat additional systematics thissyst = {} for proc in mcprocs: if proc == "ttz": thissyst["emu{}_".format(year) + proc] = "1.105594" else: thissyst["emu{}_".format(year) + proc] = 0 systs.append(("FlatSystTFEMuTTZ{}".format(year), "lnN", [], thissyst)) # Flat additional systematics thissyst = {} for proc in mcprocs: if proc == "zz": thissyst["emu{}_".format(year) + proc] = "1.049173" else: thissyst["emu{}_".format(year) + proc] = 0 systs.append(("FlatSystTFEMuZZ{}".format(year), "lnN", [], thissyst)) # # Flat additional systematics # thissyst = {} # for proc in mcprocs: # if proc == "zz": thissyst["emu{}_".format(year) + proc] = "1.05" # else: thissyst["emu{}_".format(year) + proc] = 0 # systs.append( ("FlatSystMTexpZZ{}".format(year), "lnN", [], thissyst) ) # Flat additional systematics thissyst = {} for proc in mcprocs: if proc == "wz": thissyst["emu{}_".format(year) + proc] = "1.6" # Fake Syst else: thissyst["emu{}_".format(year) + proc] = 0 systs.append(("FlatSystWZ{}".format(year), "lnN", [], thissyst)) # Flat additional systematics thissyst = {} for proc in mcprocs: thissyst["emu{}_".format(year) + proc] = "1.025" systs.append(("FlatSystLumi{}".format(year), "lnN", [], thissyst)) # Flat additional systematics thissyst = {} for proc in mcprocs: thissyst["emu{}_".format(year) + proc] = "1.03" systs.append(("FlatSystsIP3D{}".format(year), "lnN", [], thissyst)) # Flat additional systematics thissyst = {} for proc in mcprocs: thissyst["emu{}_".format(year) + proc] = "1.02" systs.append(("FlatSystsTrigSF{}".format(year), "lnN", [], thissyst)) # Now create data card writer sig = hists_db["sig"]["Nominal"] bgs = [hists_db[proc]["Nominal"] for proc in bkgprocs] data = hists_db["data_obs"]["Nominal"] d = dw.DataCardWriter( sig=sig, bgs=bgs, data=data, systs=systs, no_stat_procs=["emu{}_zz".format(year), "emu{}_ttz".format(year)]) finalyields = [] if nbins == 5: for i in xrange(1, nbins + 1): d.set_bin(i) d.set_region_name("bin{}".format(i)) d.write("stats/{}/emu_datacard_bin{}.txt".format(prefix, i)) if args.print_yields and args.wwz_only: vals = d.print_yields(detail=args.print_detail) if vals: print_yield_table(vals[0], vals[1], "textable/emu{}{}".format(year, i)) finalyields.append(vals) elif nbins == 1: d.set_bin(1) d.set_region_name("bin{}".format(1)) d.write("stats/{}/emu_datacard_singlebin{}.txt".format(prefix, 1)) if args.print_yields and args.wwz_only: vals = d.print_yields(detail=args.print_detail) if vals: print_yield_table(vals[0], vals[1], "textable/emu{}".format(year)) # colors = [2005, 2001, 2003, 2007, 920, 2012, 2011, 2002] # p.plot_hist(data=None, bgs=bgs, sigs=[sig], options={"bkg_sort_method":"ascending", "yaxis_range":[0.,2.5]}, colors=colors, sig_labels=["sig"], legend_labels=bkgprocs) ################################ # OffZ channel data card writing ################################ # number of bins nbins = 1 # Main data base to hold all the histograms hists_db = {} # Loop over the processes for proc in procs: # Retrieve the tfile tfile = tfiles[proc] # For each processes create another map to hold various histograms hists_db[proc] = {} # Loop over the systematic variations for syst in systnames: if syst == "Nominal": h = tfile.Get("ChannelOffZHighMET__Yield").Clone() else: systhacked = syst if proc == "NONE": systhacked = "" h = tfile.Get( "ChannelOffZHighMET{}__Yield".format(systhacked)).Clone() # h = tfile.Get("ChannelOffZHighMET{}__Yield".format(syst)).Clone() h.SetTitle("offz{}_{}".format(year, proc)) if proc == "ttz": before_scale = h.Integral() h.Scale(ttz_sf) after_scale = h.Integral() if syst == "Nominal": print year, "ttz", before_scale, after_scale if proc == "zz": before_scale = h.Integral() h.Scale(zz_sf) after_scale = h.Integral() if syst == "Nominal": print year, "zz", before_scale, after_scale # if proc == "wz": h.Scale(2) hists_db[proc][syst] = h systs = [] # ZZ CR systematic line onz_cr_hist = r.TH1F("onz_cr", "", nbins, 0, nbins) for i in xrange(1, nbins + 1): onz_cr_hist.SetBinContent(i, expected_nevt_zz) alpha = hists_db["zz"]["Nominal"].Clone("alpha") alpha.Divide(onz_cr_hist) thissyst = {} for proc in mcprocs: if proc == "zz": thissyst["offz{}_".format(year) + proc] = ["{:4f}".format(alpha.GetBinContent(1))] else: thissyst["offz{}_".format(year) + proc] = 0 systs.append(("CRZZ{}".format(year), "gmN", [onz_cr_hist], thissyst)) # ttZ CR systematic line btag_cr_hist = r.TH1F("btag_cr", "", nbins, 0, nbins) for i in xrange(1, nbins + 1): btag_cr_hist.SetBinContent(i, expected_nevt_ttz) alpha = hists_db["ttz"]["Nominal"].Clone("alpha") alpha.Divide(btag_cr_hist) thissyst = {} for proc in mcprocs: if proc == "ttz": thissyst["offz{}_".format(year) + proc] = ["{:4f}".format(alpha.GetBinContent(1))] else: thissyst["offz{}_".format(year) + proc] = 0 systs.append(("CRTTZ{}".format(year), "gmN", [btag_cr_hist], thissyst)) # Experimental systematics for systcateg in systcategs: thissyst = {} for proc in mcprocs: if proc not in ["zz", "ttz"]: thissyst["offz{}_".format(year) + proc] = [ hists_db[proc][systcateg + "Up"], hists_db[proc][systcateg + "Down"] ] else: thissyst["offz{}_".format(year) + proc] = 0 systs.append((systcateg + year, "lnN", [], thissyst)) # # Flat additional systematics # thissyst = {} # for proc in mcprocs: # if proc == "ttz": thissyst["offz{}_".format(year) + proc] = "1.10" # else: thissyst["offz{}_".format(year) + proc] = 0 # systs.append( ("FlatSystTFeemmTTZ{}".format(year), "lnN", [], thissyst) ) # # Flat additional systematics # thissyst = {} # for proc in mcprocs: # if proc == "ttz": thissyst["offz{}_".format(year) + proc] = "1.03" # else: thissyst["offz{}_".format(year) + proc] = 0 # systs.append( ("FlatSystMETexpTTZ{}".format(year), "lnN", [], thissyst) ) # Flat additional systematics thissyst = {} for proc in mcprocs: if proc == "ttz": thissyst["offz{}_".format(year) + proc] = "1.111924" else: thissyst["offz{}_".format(year) + proc] = 0 systs.append(("FlatSystTFEEMMTTZ{}".format(year), "lnN", [], thissyst)) # Flat additional systematics thissyst = {} for proc in mcprocs: if proc == "zz": thissyst["offz{}_".format(year) + proc] = "1.167012" else: thissyst["offz{}_".format(year) + proc] = 0 systs.append(("FlatSystTFEEMMZZ{}".format(year), "lnN", [], thissyst)) # # Flat additional systematics # thissyst = {} # for proc in mcprocs: # if proc == "zz": thissyst["offz{}_".format(year) + proc] = "1.23" # else: thissyst["offz{}_".format(year) + proc] = 0 # systs.append( ("FlatSystMETexpZZ{}".format(year), "lnN", [], thissyst) ) # Flat additional systematics thissyst = {} for proc in mcprocs: if proc == "wz": thissyst["offz{}_".format(year) + proc] = "1.6" # Fake Syst else: thissyst["offz{}_".format(year) + proc] = 0 systs.append(("FlatSystWZ{}".format(year), "lnN", [], thissyst)) # Flat additional systematics thissyst = {} for proc in mcprocs: thissyst["offz{}_".format(year) + proc] = "1.025" systs.append(("FlatSystLumi{}".format(year), "lnN", [], thissyst)) # Flat additional systematics thissyst = {} for proc in mcprocs: thissyst["offz{}_".format(year) + proc] = "1.03" systs.append(("FlatSystsIP3D{}".format(year), "lnN", [], thissyst)) # Flat additional systematics thissyst = {} for proc in mcprocs: thissyst["offz{}_".format(year) + proc] = "1.02" systs.append(("FlatSystsTrigSF{}".format(year), "lnN", [], thissyst)) # Now create data card writer sig = hists_db["sig"]["Nominal"] bgs = [hists_db[proc]["Nominal"] for proc in bkgprocs] data = hists_db["data_obs"]["Nominal"] d = dw.DataCardWriter( sig=sig, bgs=bgs, data=data, systs=systs, no_stat_procs=["offz{}_zz".format(year), "offz{}_ttz".format(year)]) for i in xrange(1, nbins + 1): d.set_bin(i) d.set_region_name("bin{}".format(i)) d.write("stats/{}/offz_datacard_bin{}.txt".format(prefix, i)) if args.print_yields and args.wwz_only: vals = d.print_yields(detail=args.print_detail) if vals: print_yield_table(vals[0], vals[1], "textable/offz{}".format(year)) finalyields.append(vals) if len(finalyields) > 0: procs = [ "sig", "wzz", "zzz", "zz", "ttz", "twz", "wz", "higgs", "other" ] histsdict = {} for proc in procs: if proc == "sig": h = r.TH1F("WWZ", "", 6, 0, 6) elif proc == "wzz": h = r.TH1F("WZZ", "", 6, 0, 6) elif proc == "zzz": h = r.TH1F("ZZZ", "", 6, 0, 6) else: h = r.TH1F("Fit{}".format(proc), "", 6, 0, 6) h.GetXaxis().SetBinLabel(1, "e#mu Bin 1") h.GetXaxis().SetBinLabel(2, "e#mu Bin 2") h.GetXaxis().SetBinLabel(3, "e#mu Bin 3") h.GetXaxis().SetBinLabel(4, "e#mu Bin 4") h.GetXaxis().SetBinLabel(5, "e#mu Bin 5") h.GetXaxis().SetBinLabel(6, "ee/#mu#mu") histsdict[proc] = h for index, item in enumerate(finalyields): for procfullname, rate in zip(item[0], item[1]): procname = procfullname.split("_")[1] print index, procname, rate histsdict[procname].SetBinContent(index + 1, rate.val) histsdict[procname].SetBinError(index + 1, rate.err) bkghists = [histsdict[proc].Clone() for proc in procs[3:]] sighists = [histsdict[proc].Clone() for proc in procs[:3]] lumi = 137 if "2016" in year: lumi = 35.9 if "2017" in year: lumi = 41.3 if "2018" in year: lumi = 59.74 p.plot_hist( bgs=bkghists, sigs=sighists, options={ "output_name": "fitplot/fit{}.pdf".format(year), "print_yield": True, "signal_scale": 1, "legend_scalex": 1.8, "legend_scaley": 1.0, "legend_ncolumns": 3, "legend_smart": True, "yaxis_log": False, "ymax_scale": 1.2, "lumi_value": lumi, # "no_overflow": True, "remove_underflow": True, "xaxis_ndivisions": 505, "ratio_range": [0., 2.], "xaxis_label": "Fit regions", "ratio_xaxis_title": "Fit regions", "no_ratio": True, }, colors=[2001, 2005, 2007, 2003, 2011, 920, 2012, 2011, 2002], legend_labels=["ZZ", "t#bar{t}Z", "tWZ", "WZ", "Higgs", "Other"], # sig_labels = ["WWZ","WZZ","ZZZ"] )
def fakerate(num, den, ps=0, sf=0, sferr=0, tfile=None, sfden=0, sfdenerr=0): # Obtain histograms h_num, h_num_qcd_mu, h_num_qcd_esum, h_num_qcd_el, h_num_qcd_bc = get_fakerate_histograms( num, den, ps, sf, sfden) if isinstance(sf, list): herr_num, herr_num_qcd_mu, herr_num_qcd_esum, herr_num_qcd_el, herr_num_qcd_bc = get_fakerate_histograms( num, den, ps, sferr, sfdenerr) else: herr_num, herr_num_qcd_mu, herr_num_qcd_esum, herr_num_qcd_el, herr_num_qcd_bc = get_fakerate_histograms( num, den, ps, sf - sferr) # Set data-driven QCD estimate systematics stemming from EWK SF uncertainty add_systematics(h_num, herr_num) # Options alloptions = { "ratio_range": [0.0, 2.0], "nbins": 180, "autobin": False, "legend_scalex": 0.8, "legend_scaley": 0.8, "output_name": "plots/{}/{}/{}/fakerate/{}.pdf".format(input_ntup_tag, analysis_tag, "ss" if isSS else "3l", num + "__" + den), "bkg_sort_method": "unsorted", "no_ratio": False, "print_yield": True, "yield_prec": 3, "draw_points": True, "hist_line_none": True, "show_bkg_errors": True, "lumi_value": lumi, "yaxis_range": [0., 0.4] if "Mu" in num else ([0., 1.2] if isSS else [0., 1.8]), } bgs_list = [h_num_qcd_mu] if "Mu" in num else [h_num_qcd_esum] #bgs_list = [h_num_qcd_mu] if "Mu" in num else [h_num_qcd_esum, h_num_qcd_el, h_num_qcd_bc] #sigs_list = [] if "Mu" in num else [h_num_qcd_el, h_num_qcd_bc] bgs_list_copy = [h_num_qcd_mu.Clone() ] if "Mu" in num else [h_num_qcd_esum.Clone()] sigs_list = [] h_num_qcd_esum.Print("all") # Special label handling instance for pt-eta rolled out case histname = num.split("__")[1] if histname == "ptcorretarolledcoarse": xbounds = get_bounds_from_source_file("ptcorrcoarse_bounds") ybounds = get_bounds_from_source_file("eta_bounds") for jndex in xrange(len(ybounds) - 1): for index in xrange(len(xbounds) - 1): #label = "Ptcorr #in ({}, {}) and |#eta| #in ({:.1f}, {:.1f})".format(int(xbounds[index]), int(xbounds[index+1]), ybounds[jndex], ybounds[jndex+1]) label = "({}, {}), ({:.1f}, {:.1f})".format( int(xbounds[index]), int(xbounds[index + 1]), ybounds[jndex], ybounds[jndex + 1]) for h in sigs_list + bgs_list + [h_num]: h.GetXaxis().SetBinLabel( (jndex) * (len(xbounds) - 1) + (index + 1), label) if histname == "ptcorretarolled": xbounds = get_bounds_from_source_file("ptcorr_bounds") ybounds = get_bounds_from_source_file("eta_bounds") for jndex in xrange(len(ybounds) - 1): for index in xrange(len(xbounds) - 1): #label = "Ptcorr #in ({}, {}) and |#eta| #in ({:.1f}, {:.1f})".format(int(xbounds[index]), int(xbounds[index+1]), ybounds[jndex], ybounds[jndex+1]) label = "({}, {}), ({:.1f}, {:.1f})".format( int(xbounds[index]), int(xbounds[index + 1]), ybounds[jndex], ybounds[jndex + 1]) for h in sigs_list + bgs_list + [h_num]: h.GetXaxis().SetBinLabel( (jndex) * (len(xbounds) - 1) + (index + 1), label) alloptions["canvas_main_rightmargin"] = 1. / 6. alloptions["canvas_ratio_rightmargin"] = 1. / 6. alloptions["canvas_ratio_bottommargin"] = 0.5 p.plot_hist( sigs=sigs_list, bgs=bgs_list, data=h_num, #data = None, syst=None, colors=[2001], legend_labels=["QCD(#mu)"] if "Mu" in num else ["QCD(e)"], options=alloptions) if tfile: tfile.cd() channel = "Mu" if "Mu" in num else "El" histname = num.split("__")[1] data_fakerate = h_num.Clone(channel + "_" + histname + "_data_fakerate") qcd_fakerate = bgs_list_copy[0].Clone(channel + "_" + histname + "_qcd_fakerate") # Special treatment for 2018 3l mu fakerate # if highest bin in pt set it to QCD one if channel == "Mu" and isSS == False: data_fakerate.SetBinContent(6, qcd_fakerate.GetBinContent(6)) data_fakerate.SetBinError(6, qcd_fakerate.GetBinError(6)) data_fakerate.SetBinContent(12, qcd_fakerate.GetBinContent(12)) data_fakerate.SetBinError(12, qcd_fakerate.GetBinError(12)) if histname == "etacorrvarbin": create_varbin(data_fakerate, "eta_bounds").Write() create_varbin(qcd_fakerate, "eta_bounds").Write() elif histname == "ptcorrvarbin": create_varbin(data_fakerate, "ptcorr_bounds").Write() create_varbin(qcd_fakerate, "ptcorr_bounds").Write() elif histname == "ptcorrvarbincoarse": create_varbin(data_fakerate, "ptcorrcoarse_bounds").Write() create_varbin(qcd_fakerate, "ptcorrcoarse_bounds").Write() elif histname == "ptcorretarolled": create_varbin(data_fakerate, "ptcorr_bounds", "eta_bounds").Write() create_varbin(qcd_fakerate, "ptcorr_bounds", "eta_bounds").Write() # Closure 3l mu 51% 3l el 1% ss mu 33% ss el 3% (1.51, 0.994, 1.329, 0.978) if channel == "Mu": if isSS: create_varbin(data_fakerate, "ptcorr_bounds", "eta_bounds", 0.14, "closure").Write() else: create_varbin(data_fakerate, "ptcorr_bounds", "eta_bounds", 0.56, "closure").Write() elif channel == "El": if isSS: create_varbin(data_fakerate, "ptcorr_bounds", "eta_bounds", 0.29, "closure").Write() else: create_varbin(data_fakerate, "ptcorr_bounds", "eta_bounds", 0.11, "closure").Write() elif histname == "ptcorretarolledcoarse": create_varbin(data_fakerate, "ptcorrcoarse_bounds", "eta_bounds").Write() create_varbin(qcd_fakerate, "ptcorrcoarse_bounds", "eta_bounds").Write() # Closure 3l mu 51% 3l el 1% ss mu 33% ss el 3% (1.51, 0.994, 1.329, 0.978) if channel == "Mu": if isSS: create_varbin(data_fakerate, "ptcorrcoarse_bounds", "eta_bounds", 0.33, "closure").Write() else: create_varbin(data_fakerate, "ptcorrcoarse_bounds", "eta_bounds", 0.51, "closure").Write() elif channel == "El": if isSS: create_varbin(data_fakerate, "ptcorrcoarse_bounds", "eta_bounds", 0.03, "closure").Write() else: create_varbin(data_fakerate, "ptcorrcoarse_bounds", "eta_bounds", 0.01, "closure").Write()
#!/bin/env python import ROOT as r import plottery_wrapper as p tl_timing_data = open("tl_timing_data.txt") tl_times = [float(line.strip()) for line in tl_timing_data.readlines()] print tl_times tl_time_hist = r.TH1F("segment_linking_timing", "", 10, 5, 50) for tl_time in tl_times: tl_time_hist.Fill(tl_time) p.plot_hist(bgs=[tl_time_hist], legend_labels=["PU200 t#bar{t}"], options={ "output_name": "tl_time_hist.pdf", "xaxis_label": "Seconds" }) sg_timing_data = open("sg_timing_data.txt") sg_times = [float(line.strip()) for line in sg_timing_data.readlines()] print sg_times sg_time_hist = r.TH1F("segment_linking_timing", "", 10, 0, 1.5) for sg_time in sg_times: sg_time_hist.Fill(sg_time) p.plot_hist(bgs=[sg_time_hist], legend_labels=["PU200 t#bar{t}"], options={ "output_name": "sg_time_hist.pdf", "xaxis_label": "Seconds"
f = r.TFile("results/pt0p5_2p0_20200318_0843/fulleff_pt0p5_2p0.root") # Older Denom (March 18 morning) # f = r.TFile("results/pt0p5_2p0_20200319_1008/fulleff_pt0p5_2p0.root") # Newer Denom (March 19 morning) # f = r.TFile("results/pt0p5_2p0_20200319_1023/fulleff_pt0p5_2p0.root") # Newer Denom with no pdgId matching between simhits and simtrack # f = r.TFile("results/pt0p5_2p0_20200319_1143/fulleff_pt0p5_2p0.root") # Newer Denom with no pdgId matching between simhits and simtrack and some priority scheme # f = r.TFile("results/pt0p5_2p0_20200319_1153/fulleff_pt0p5_2p0.root") # Newer Denom with yes pdgId matching between simhits and simtrack and some priority scheme bbbbbb_denom = f.Get("Root__tc_bbbbbb_all_track_pt_by_layer0") bbbbbe_denom = f.Get("Root__tc_bbbbbe_all_track_pt_by_layer0") bbbbee_denom = f.Get("Root__tc_bbbbee_all_track_pt_by_layer0") bbbeee_denom = f.Get("Root__tc_bbbeee_all_track_pt_by_layer0") bbeeee_denom = f.Get("Root__tc_bbeeee_all_track_pt_by_layer0") beeeee_denom = f.Get("Root__tc_beeeee_all_track_pt_by_layer0") p.plot_hist(bgs=[bbbbbb_denom, bbbbbe_denom, bbbbee_denom, bbbeee_denom, bbeeee_denom, beeeee_denom], options={ "output_name":"plots_denom/denom_pt.pdf", "bkg_sort_method":"unsorted", } ) bbbbbb_denom = f.Get("Root__tc_bbbbbb_all_track_eta_by_layer0") bbbbbe_denom = f.Get("Root__tc_bbbbbe_all_track_eta_by_layer0") bbbbee_denom = f.Get("Root__tc_bbbbee_all_track_eta_by_layer0") bbbeee_denom = f.Get("Root__tc_bbbeee_all_track_eta_by_layer0") bbeeee_denom = f.Get("Root__tc_bbeeee_all_track_eta_by_layer0") beeeee_denom = f.Get("Root__tc_beeeee_all_track_eta_by_layer0") p.plot_hist(bgs=[bbbbbb_denom, bbbbbe_denom, bbbbee_denom, bbbeee_denom, bbeeee_denom, beeeee_denom], options={ "output_name":"plots_denom/denom_eta.pdf", "bkg_sort_method":"unsorted", }