def run(self): wrp = next(rebin_chhadiso( gen.gen_sum( [ gen.fs_filter_active_sort_load({ "analyzer" : sb_anzlrs, "is_data" : True }) ] ) )) # multiply with weight if do_dist_reweighting: wrp = gen.op.prod(( settings.post_proc_dict["TemplateFitToolChHadIsoSbBkgInputBkgWeight"], wrp, )) wrp.lumi = settings.data_lumi_sum() self.result = [wrp] gen.consume_n_count( gen.save( gen.canvas((self.result,)), lambda c: self.plot_output_dir + c.name ) )
def write_tabular(self): table = [ r"\begin{tabular}{l c c } \\", r"\hline", r"\hline", r"Sample & fake-rate (MC) & fake-rate (MC truth) \\", r"\hline", ] data_lumi_sum = settings.data_lumi_sum() for smp in sorted(settings.active_samples + ["TTPoHe","TTMadG","TTMCNLO"]): smp = settings.samples[smp] if smp.is_data: continue norm = data_lumi_sum / smp.lumi tight = smp.log_event_counts["FullTightIDCount,"] * norm real = smp.log_event_counts["realFullTightIDCount,"] * norm shilp = smp.log_event_counts["ShilpiSumDirect,"] * norm table.append( "\;\;\;" + smp.name.replace("_", r"\_") + r" & $%.1f\pm%.1f$ & $%.1f\pm%.1f$ \\" % ( shilp, 0.3 * shilp, # 30% on fr count tight - real, ((tight - real) * norm)**.5 / norm, # MC stat error ) ) table += ( r"\hline", r"\hline", r"\end{tabular}", ) with open(self.plot_output_dir + "shilpi_tabular.tex", "w") as f: f.writelines(map(lambda l: l + "\n", table))
def get_shilp_counts(self, c): data_lumi_sum = settings.data_lumi_sum() for smp in settings.mc_samples().itervalues(): norm = data_lumi_sum / smp.lumi tight = smp.log_event_counts[self.tight_cnt] * norm real = smp.log_event_counts["realFullTightIDCount,"] * norm c.tight += tight c.shilp += tight - real
def run(self): wrp = next(rebin_chhadiso( gen.gen_sum( [gen.fs_filter_active_sort_load({ "analyzer" : "TemplateRandConereal", "is_data" : True })] ) )) # normalize to mc expectation integral_real = next( gen.gen_integral( gen.gen_norm_to_data_lumi( gen.filter( settings.post_proc_dict["TemplateStacks"], {"analyzer": "TemplateRandConereal"} ) ) ) ) print integral_real wrp = gen.op.prod(( gen.op.norm_to_integral(wrp), integral_real )) # multiply with weight if do_dist_reweighting: wrp = gen.op.prod(( settings.post_proc_dict["TemplateFitToolRandConeIsoInputSigWeight"], wrp, )) wrp.lumi = settings.data_lumi_sum() self.result = [wrp] gen.consume_n_count( gen.save( gen.canvas((self.result,)), lambda c: self.plot_output_dir + c.name ) )
def run(self): self.configure() if not self.n_sig_ttgam_wrp: self.message("WARNING Did not find result in post_proc_dict. Skipping...") return # store results in wrapper r = copy.deepcopy(self.n_sig_ttgam_wrp) r.name = self.name self.result = r # prepare mc counts class counts(object): pass c = counts() c.sig_pre = 0. c.sig_fid = 0. c.sig_post = 0. c.bkg_pre = 0. c.bkg_post = 0. c.tt_pre = 0. c.tt_post = 0 for smp in settings.mc_samples().itervalues(): legend = smp.legend if legend == "t#bar{t}#gamma (Signal)": c.sig_pre += smp.log_event_counts[self.get_sig_count_name(self.pre_count_name)] / smp.lumi c.sig_fid += smp.log_event_counts[self.get_sig_count_name(self.fid_count_name)] / smp.lumi c.sig_post += smp.log_event_counts[self.get_sig_count_name(self.post_count_name)] / smp.lumi else: c.bkg_pre += smp.log_event_counts[self.pre_count_name] / smp.lumi c.bkg_post += smp.log_event_counts[self.post_count_name] / smp.lumi if legend == "t#bar{t} inclusive": c.tt_pre += smp.log_event_counts[self.pre_count_name] / smp.lumi c.tt_post += smp.log_event_counts[self.post_count_name] / smp.lumi data_lumi_sum = settings.data_lumi_sum() for k in c.__dict__.keys(): c.__dict__[k] *= data_lumi_sum # prepare data counts c.data_pre = 0. c.data_post = 0. for smp in settings.data_samples().itervalues(): c.data_pre += smp.log_event_counts[self.pre_count_name] c.data_post += smp.log_event_counts[self.post_count_name] # selection performance r.eff_gamma = c.sig_post / c.sig_fid r.eff_gamma_fid = c.sig_fid / c.sig_pre r.pur_tt = (c.tt_pre + c.sig_pre) / (c.bkg_pre + c.sig_pre) r.N_presel_data = c.data_pre r.N_sel_data = c.data_post r.StoB_gamma = c.sig_post / c.bkg_post r.StoB_presel = c.tt_pre / (c.bkg_pre - c.tt_pre) # background-substracted number of ttgamma signal events # r.n_sig_ttgam = self.n_sig_ttgam_wrp.n_sig_ttgam # R_fid R_fid_denom = r.eff_gamma * r.N_presel_data * r.pur_tt r.R_fid = r.n_sig_ttgam / R_fid_denom r.R_fid_err_stat= r.n_sig_ttgam_err / R_fid_denom # R R_denom = r.eff_gamma_fid * r.eff_gamma * r.N_presel_data * r.pur_tt r.R = r.n_sig_ttgam / R_denom r.R_err_stat = r.n_sig_ttgam_err / R_denom # xsec r.xsec = r.R * settings.ttbar_xsec_cms r.xsec_err_stat = r.xsec * r.R_err_stat / r.R self.message(str(r))
def get_shilp_counts(self, c): data_lumi_sum = settings.data_lumi_sum() for smp in settings.mc_samples().itervalues(): norm = data_lumi_sum / smp.lumi c.tight += smp.log_event_counts[self.tight_cnt] * norm c.shilp += smp.log_event_counts[self.shilp_cnt] * norm