def run(self): top_sample = next(s for s in settings.active_samples if s[:2] == "TT") wrp_fake = next(rebin_chhadiso(gen.fs_filter_sort_load({ "analyzer": "TemplateChHadIsofake", "sample": top_sample,#"TTMadG", }))) wrp_sb = gen.op.merge(rebin_chhadiso(gen.fs_filter_sort_load({ "analyzer": sb_anzlrs, "sample": top_sample,#"TTMadG", }))) wrp = gen.op.div(( gen.op.norm_to_integral(wrp_fake), gen.op.norm_to_integral(wrp_sb), )) wrp.lumi = 1. wrp.draw_option = "E1" self.result = wrp cnvs = list(gen.canvas(((wrp,),)),) cnvs[0].canvas.SetGridy(1) gen.consume_n_count( gen.save( gen.canvas(((wrp,),)), lambda c: self.plot_output_dir + c.name ) ) del wrp.draw_option
def store_results(self): cnv = itertools.chain( gen.canvas( [self.plot_chi2] + self.plots_truth_vs_fitted, [rnd.Legend(None, True, y_pos=0.25)] ), gen.canvas(self.plots_diffs) ) cnv = gen.save(cnv, lambda c: self.plot_output_dir + c.name) gen.consume_n_count(cnv)
def run(self): wrp = next(rebin_chhadiso( gen.gen_sum( [ gen.fs_filter_active_sort_load({ "analyzer" : sb_anzlrs, "is_data" : True }) ] ) )) # multiply with weight if do_dist_reweighting: wrp = gen.op.prod(( settings.post_proc_dict["TemplateFitToolChHadIsoSbBkgInputBkgWeight"], wrp, )) wrp.lumi = settings.data_lumi_sum() self.result = [wrp] gen.consume_n_count( gen.save( gen.canvas((self.result,)), lambda c: self.plot_output_dir + c.name ) )
def run(self): """ Load, stack, print and save histograms in a stream. """ # combined operation for loading, filtering, stacking, etc.. # the output looks like: [(stack1, data1), (stack2, data2), ...] stream_stack_n_data = gen.fs_mc_stack_n_data_sum({ "name": "histo", "analyzer": ["CrtlFiltEt", "CrtlFiltEta"] }) # plot (stack, data) pairs into canvases, with legend stream_canvas = gen.canvas(stream_stack_n_data, [cmstoolsac3b.rendering.Legend]) # store into dir of this tool stream_canvas = gen.save( stream_canvas, lambda wrp: self.plot_output_dir + wrp. name, # this function returns a path without postfix settings.rootfile_postfixes) # pull everything through the stream count = gen.consume_n_count(stream_canvas) # make a nice statement self.message("INFO: " + self.name + " produced " + count + " canvases.")
def run(self): """ Load, stack, print and save histograms in a stream. """ # combined operation for loading, filtering, stacking, etc.. # the output looks like: [(stack1, data1), (stack2, data2), ...] stream_stack_n_data = gen.fs_mc_stack_n_data_sum( { "name" : "histo", "analyzer" : ["CrtlFiltEt", "CrtlFiltEta"] } ) # plot (stack, data) pairs into canvases, with legend stream_canvas = gen.canvas( stream_stack_n_data, [cmstoolsac3b.rendering.Legend] ) # store into dir of this tool stream_canvas = gen.save( stream_canvas, lambda wrp: self.plot_output_dir + wrp.name, # this function returns a path without postfix settings.rootfile_postfixes ) # pull everything through the stream count = gen.consume_n_count(stream_canvas) # make a nice statement self.message("INFO: "+self.name+" produced "+count+" canvases.")
def run(self): """ Load, stack, print and save histograms in a stream. """ # combined operation for loading, filtering, stacking, etc.. # the output looks like: [(stack1, data1), (stack2, data2), ...] stream_stack_n_data = gen.fs_mc_stack_n_data_sum( self.histo_filter_dict ) # can be saved for later use. if self.store_stack_and_data_in_pool: stream_stack_n_data = self.store_to_pool(stream_stack_n_data) # plot (stack, data) pairs into canvases, with decorators stream_canvas = gen.canvas( stream_stack_n_data, self.canvas_decorators ) # store into dir of this tool stream_canvas = gen.save( stream_canvas, lambda wrp: self.plot_output_dir + wrp.analyzer ) # pull everything through the stream count = gen.consume_n_count(stream_canvas) # make a nice statement self.message("INFO: "+self.name+" produced "+count+" canvases.")
def run(self): """ Load, stack, print and save histograms in a stream. """ # combined operation for loading, filtering, stacking, etc.. # the output looks like: [(stack1, data1), (stack2, data2), ...] stream_stack_n_data = gen.fs_mc_stack_n_data_sum( self.histo_filter_dict) # can be saved for later use. if self.store_stack_and_data_in_pool: stream_stack_n_data = self.store_to_pool(stream_stack_n_data) # plot (stack, data) pairs into canvases, with decorators stream_canvas = gen.canvas(stream_stack_n_data, self.canvas_decorators) # store into dir of this tool stream_canvas = gen.save( stream_canvas, lambda wrp: self.plot_output_dir + wrp.analyzer) # pull everything through the stream count = gen.consume_n_count(stream_canvas) # make a nice statement self.message("INFO: " + self.name + " produced " + count + " canvases.")
def save_canvas(wrps, postfix): canvas = gen.canvas( wrps, [rnd.BottomPlotRatio, rnd.Legend, com.SimpleTitleBox] ) canvas = gen.save( canvas, lambda c: self.plot_output_dir + c.name + postfix ) canvas = gen.switch_log_scale(canvas) canvas = gen.save( canvas, lambda c: self.plot_output_dir + c.name + postfix + "_log" ) gen.consume_n_count(canvas)
def run(self): self.result = settings.post_proc_dict["XsecCalculatorChHadIsoSBID"] for quantity in ["R", "R_fid", "xsec"]: self.calc_variation(quantity) histo = wrappers.HistoWrapper( util.list2histogram( self.values, "PDF_uncert_distr_" + quantity, ";#Delta("+quantity+");CTEQ61 PDF eigenvector evaluation", 60 ) ) del self.values[:] cnv = gen.canvas([[histo]]) cnv = gen.save(cnv, lambda c: self.plot_output_dir + c.name) gen.consume_n_count(cnv)
def run(self): wrp = tmpl_fit.get_merged_sbbkg_histo(sample) # multiply with weight if tmpl_fit.do_dist_reweighting: wrp = gen.op.prod(( settings.post_proc_dict["TemplateFitToolChHadIsoSbBkgInputBkgWeight"], wrp, )) wrps = gen.gen_norm_to_data_lumi((wrp,)) wrps = list(wrps) self.result = wrps gen.consume_n_count( gen.save( gen.canvas((wrps,)), lambda c: self.plot_output_dir + c.name ) )
def run(self): wrp = next(rebin_chhadiso( gen.gen_sum( [gen.fs_filter_active_sort_load({ "analyzer" : "TemplateRandConereal", "is_data" : True })] ) )) # normalize to mc expectation integral_real = next( gen.gen_integral( gen.gen_norm_to_data_lumi( gen.filter( settings.post_proc_dict["TemplateStacks"], {"analyzer": "TemplateRandConereal"} ) ) ) ) print integral_real wrp = gen.op.prod(( gen.op.norm_to_integral(wrp), integral_real )) # multiply with weight if do_dist_reweighting: wrp = gen.op.prod(( settings.post_proc_dict["TemplateFitToolRandConeIsoInputSigWeight"], wrp, )) wrp.lumi = settings.data_lumi_sum() self.result = [wrp] gen.consume_n_count( gen.save( gen.canvas((self.result,)), lambda c: self.plot_output_dir + c.name ) )
def do_evaluation(self, wrp_list, maker_func=None): if not maker_func: maker_func = self.make_histo_1d self.wrp_dict = dict((w.histo_key, w) for w in wrp_list) min_wrp = min(wrp_list, key=lambda w: w.float) histo_in_s = maker_func(min_wrp, "sieie") histo_in_p = maker_func(min_wrp, "phoiso") histo_in_n = maker_func(min_wrp, "neuiso") histo_in_s.min_token = min_wrp.histo_key histo_in_p.min_token = min_wrp.histo_key histo_in_n.min_token = min_wrp.histo_key self.result = [ histo_in_s, histo_in_p, histo_in_n, ] gen.consume_n_count( gen.save( gen.canvas((h,) for h in self.result), lambda c: self.plot_output_dir + c.name ) )
def run(self): wrps = tmpl_fit.rebin_chhadiso(gen.fs_filter_sort_load({ "analyzer": "PlotSBID", "sample": sample, })) wrp = gen.op.merge(wrps) # multiply with weight if tmpl_fit.do_dist_reweighting: wrp = gen.op.prod(( settings.post_proc_dict["TemplateFitToolChHadIsoSBIDInputBkgWeight"], wrp, )) wrps = gen.gen_norm_to_data_lumi((wrp,)) wrps = list(wrps) self.result = wrps gen.consume_n_count( gen.save( gen.canvas((wrps,)), lambda c: self.plot_output_dir + c.name ) )