fit_srh_dir = '%s/bkgonly_srh' % fits_dir cmd_srl = 'run_bkgonly.py -i %s -o %s --sr SRiL -c %s --val --data data --log %s/bkgonly_fit_srl.log' % (histograms_srl_path, fit_srl_dir, configfile, log_dir) cmd_srh = 'run_bkgonly.py -i %s -o %s --sr SRiH -c %s --val --data data --log %s/bkgonly_fit_srh.log' % (histograms_srh_path, fit_srh_dir, configfile, log_dir) if do_syst: cmd_srl += ' --syst' cmd_srh += ' --syst' run_cmd(cmd_srl) run_cmd(cmd_srh) ws_srl = fit_srl_dir + "/BkgOnlyFit_combined_BasicMeasurement_model_afterFit.root" ws_srh = fit_srh_dir + "/BkgOnlyFit_combined_BasicMeasurement_model_afterFit.root" mus_srl = get_normalization_factors(ws_srl) mus_srh = get_normalization_factors(ws_srh) #------- # Tables #------- backgrounds_str = 'photonjet,wgamma,[zllgamma,znunugamma],ttbarg,efake,jfake,[diphoton,vgammagamma]' # Yields tables ## CR yieldstable(ws_srl, backgrounds_str, 'CRQ,CRW,CRT', tables_dir+'/table_cr_srl.tex', 'CR for SRL', is_cr=True) yieldstable(ws_srh, backgrounds_str, 'CRQ,CRW,CRT', tables_dir+'/table_cr_srh.tex', 'CR for SRH', is_cr=True) ## VR
def prepare_histograms_for_plots(input_path, output_path, regions, backgrounds, variables, ws=None, merge_dict={}, norm_dict={}): syst = 'Nom' # only nominal for now file_ = ROOT.TFile.Open(input_path) h_data = [] h_bkg = {name: [] for name in backgrounds} for region in regions: for variable in variables: ## data h_data.append( get_histogram_from_file(file_, 'data', variable, region, '')) ## backgrounds for name in backgrounds: h_bkg[name].append( get_histogram_from_file(file_, name, variable, region, syst)) # Scale background with scale factors, if workspace not None if ws is not None and norm_dict: mus = get_normalization_factors(ws) for cr, bkg in norm_dict.items(): if cr in mus: mu = mus[cr] for hist in h_bkg[bkg]: hist.Scale(mu[0]) # Merge backgrounds to plot if merge_dict: for merge_name, merge_list in merge_dict.items(): h_bkg[merge_name] = [ hist.Clone(hist.GetName().replace(merge_list[0], merge_name)) for hist in h_bkg[merge_list[0]] ] for name in merge_list[1:]: for h1, h2 in zip(h_bkg[merge_name], h_bkg[name]): h1.Add(h2, 1) for name in merge_list: del h_bkg[name] fin = ROOT.TFile(output_path, 'recreate') for hist in h_data: hist.Write(hist.GetName()) for hlist in h_bkg.itervalues(): for hist in hlist: hist.Write(hist.GetName()) fin.Close()
def main(): parser = argparse.ArgumentParser(description='') # histograms file / output directory parser.add_argument('-i', dest='input_file') parser.add_argument('-o', dest='output', default='.') parser.add_argument('--save', help='Save histograms in this rootfile') # samples, regions, variables parser.add_argument('-v', '--variable', dest='variables', required=True) parser.add_argument('-r', '--region', dest='regions', help='regions separated by ,') # Backgrounds parser.add_argument('--mc', action='store_true', help='use all backgrounds from MC') # normalization parser.add_argument('--muq', help='mu value for photon+jet') parser.add_argument( '--ws', help='Bkg-only fit workspace to extract normalization factors') # other parser.add_argument('-l', dest='lumi') parser.add_argument('--data', help='data15|data16|data') parser.add_argument('--opt', action='store_true', help='Optimization plot') parser.add_argument('--sel', dest='selection', default='', help='Custom selection') parser.add_argument('--outname', help='If custom selection use this output_name') parser.add_argument('--n1', action='store_true', help='N-1 plot') parser.add_argument('--signal', action='store_true', help='Add signal samples (separated with ,)') parser.add_argument('--blind', action='store_true') parser.add_argument('--pl', action='store_true', help='publink') parser.add_argument('--www', action='store_true', help='create webpage') parser.add_argument('--ext', dest='extensions', default='pdf', help='') parser.add_argument('--ratio', default='default', help='ratio type: none,default') parser.add_argument('--sepfakes', action='store_true', help='separete efakes/jfakes') global args args = parser.parse_args() do_scale = True if args.lumi == '0': do_scale = False get_histogram = partial(miniutils.get_histogram, remove_var=args.n1, lumi=args.lumi, scale=do_scale) # regions if args.regions is not None: regions = args.regions.split(',') else: regions = [ '', ] # variables variables = args.variables.split(',') # systematics syst = 'Nom' # only nominal for now ## plots style set_atlas_style() # Backgrounds if args.mc: backgrounds = [ 'photonjet', 'multijet', 'zgamma', 'wgamma', 'wjets', 'zjets', 'ttbar', 'ttbarg', ] else: backgrounds = [ 'photonjet', 'zgamma', 'wgamma', 'ttbarg', 'jfake', 'efake', 'diphoton', # 'vgammagamma', ] # Standard DATA/Backgrounds plot for region in regions: for variable in variables: print 'plotting %s in region %s ...' % (variable, region) if args.input_file: selection = region else: if not args.selection: selection = getattr(regions_, region) else: selection = args.selection if args.selection: region_name = region else: region_name = region #[:-2] ## backgrounds h_bkg = OrderedDict() for name in backgrounds: h_bkg[name] = get_histogram(name, variable=variable, region=region_name, selection=selection, syst=syst) # If fit workspace given -> scale backgrounds according to normalization factos if args.ws is not None and os.path.isfile(args.ws): mus = get_normalization_factors(args.ws) if 'CRQ' in mus: mu = mus['CRQ'] #histogram_scale(h_bkg['photonjet'], *mu) h_bkg['photonjet'].Scale(mu[0]) if 'CRW' in mus: mu = mus['CRW'] #histogram_scale(h_bkg['wgamma'], *mu) h_bkg['wgamma'].Scale(mu[0]) if 'CRT' in mus: mu = mus['CRT'] #histogram_scale(h_bkg['ttbarg'], *mu) h_bkg['ttbarg'].Scale(mu[0]) else: if args.muq is not None: #histogram_scale(h_bkg['photonjet'], float(args.muq)) h_bkg['photonjet'].Scale(args.muq) # Merge backgrounds to plot ## V + jets if args.mc: h_bkg['vjets'] = h_bkg['wjets'].Clone() h_bkg['vjets'].Add(h_bkg['zjets'], 1) del h_bkg['wjets'] del h_bkg['zjets'] ## V + gamma # h_bkg['vgamma'] = h_bkg['wgamma'].Clone() # h_bkg['vgamma'].Add(h_bkg['zgamma'], 1) # del h_bkg['wgamma'] # del h_bkg['zgamma'] # if 'vqqgamma' in h_bkg: # h_bkg['vgamma'].Add(h_bkg['vqqgamma'], 1) # del h_bkg['vqqgamma'] # h_bkg['vgamma'].SetName(h_bkg['vgamma'].GetName().replace('wgamma', 'vgamma')) ## tt + gamma if args.mc: h_bkg['tgamma'] = h_bkg['ttbarg'].Clone() h_bkg['tgamma'].Add(h_bkg['ttbar'], 1) del h_bkg['ttbar'] else: h_bkg['tgamma'] = h_bkg['ttbarg'].Clone() del h_bkg['ttbarg'] h_bkg['tgamma'].SetName(h_bkg['tgamma'].GetName().replace( 'ttbarg', 'tgamma')) ## diphoton # if 'diphoton' in h_bkg: # h_bkg['diphoton'].Add(h_bkg['vgammagamma'], 1) # del h_bkg['vgammagamma'] ## fakes if not args.sepfakes: h_bkg['fakes'] = h_bkg['efake'].Clone() h_bkg['fakes'].Add(h_bkg['jfake'], 1) h_bkg['fakes'].SetName(h_bkg['efake'].GetName().replace( 'efake', 'fakes')) del h_bkg['efake'] del h_bkg['jfake'] ## data h_data = None if args.data: h_data = get_histogram(args.data, variable=variable, region=region_name, selection=selection, syst=syst, revert_cut=args.blind) ## signal h_signal = None if args.signal: h_signal = OrderedDict() if 'SRL' in region: signal1 = 'GGM_GG_bhmix_1900_450' signal2 = 'GGM_GG_bhmix_1900_650' elif 'SRH' in region: signal1 = 'GGM_GG_bhmix_1900_1810' signal2 = 'GGM_GG_bhmix_1900_1860' else: signal1 = 'GGM_GG_bhmix_1900_650' signal2 = 'GGM_GG_bhmix_1900_1650' h_signal[signal1] = get_histogram(signal1, variable=variable, region=region_name, selection=selection, syst=syst) h_signal[signal2] = get_histogram(signal2, variable=variable, region=region_name, selection=selection, syst=syst) varname = variable.replace('[', '').replace(']', '').replace('/', '_over_') if args.selection and args.outname: tag = args.outname else: tag = region if args.ws is not None: outname = os.path.join( args.output, 'can_{}_{}_afterFit'.format(tag, varname)) else: outname = os.path.join( args.output, 'can_{}_{}_beforeFit'.format(tag, varname)) do_plot(outname, variable, data=h_data, bkg=h_bkg, signal=h_signal, region_name=region, extensions=args.extensions.split(','), do_ratio=(args.ratio != 'none')) if args.pl: os.system('pl %s.pdf' % outname) # save if args.save is not None: file_name = args.save with RootFile(file_name, 'update') as f: f.write(h_data) for hist in h_bkg.itervalues(): f.write(hist) if h_signal is not None: for hist in h_signal.itervalues(): f.write(hist)
def yieldstable(workspace, samples, channels, output_name, table_name, is_cr=False, show_before_fit=False, unblind=True): if is_cr: show_before_fit=True normalization_factors = get_normalization_factors(workspace) #sample_str = samples.replace(",","_") from cmdLineUtils import cmdStringToListOfLists samples_list = cmdStringToListOfLists(samples) regions_list = [ '%s_cuts' % r for r in channels.split(",") ] #samples_list = samples.split(",") # call the function to calculate the numbers, or take numbers from pickle file if workspace.endswith(".pickle"): print "READING PICKLE FILE" f = open(workspace, 'r') m = pickle.load(f) f.close() else: #m = YieldsTable.latexfitresults(workspace, regions_list, samples_list, 'obsData') m = latexfitresults(workspace, regions_list, samples_list) with open(output_name.replace('.tex', '.pickle'), 'w') as f: pickle.dump(m, f) regions_names = [ region.replace("_cuts", "").replace('_','\_') for region in m['names'] ] field_names = [table_name,] + regions_names align = ['l',] + [ 'r' for i in regions_names ] samples_list_decoded = [] for isam, sample in enumerate(samples_list): sampleName = getName(sample) samples_list_decoded.append(sampleName) samples_list = samples_list_decoded tablel = LatexTable(field_names, align=align, env=True) tablep = PrettyTable(field_names, align=align) # number of observed events if unblind: row = ['Observed events',] + [ '%d' % n for n in m['nobs'] ] else: row = ['Observed events',] + [ '-' for n in m['nobs'] ] tablel.add_row(row) tablep.add_row(row) tablel.add_line() tablep.add_line() #print the total fitted (after fit) number of events # if the N_fit - N_error extends below 0, make the error physical , meaning extend to 0 rowl = ['Expected SM events', ] rowp = ['Expected SM events', ] for index, n in enumerate(m['TOTAL_FITTED_bkg_events']): if (n - m['TOTAL_FITTED_bkg_events_err'][index]) > 0. : rowl.append('$%.2f \pm %.2f$' % (n, m['TOTAL_FITTED_bkg_events_err'][index])) rowp.append('%.2f ± %.2f' % (n, m['TOTAL_FITTED_bkg_events_err'][index])) else: #print "WARNING: negative symmetric error after fit extends below 0. for total bkg pdf: will print asymmetric error w/ truncated negative error reaching to 0." rowl.append('$%.2f_{-%.2f}^{+%.2f}$' % (n, n, m['TOTAL_FITTED_bkg_events_err'][index])) rowp.append('%.2f -%.2f +%.2f' % (n, n, m['TOTAL_FITTED_bkg_events_err'][index])) tablel.add_row(rowl) tablel.add_line() tablep.add_row(rowp) tablep.add_line() map_listofkeys = m.keys() # print fitted number of events per sample # if the N_fit - N_error extends below 0, make the error physical , meaning extend to 0 for sample in samples_list: for name in map_listofkeys: rowl = [] rowp = [] if not "Fitted_events_" in name: continue sample_name = name.replace("Fitted_events_", "") if sample_name != sample: continue rowl.append('%s' % labels_latex_dict.get(sample_name, sample_name).replace('_', '\_')) rowp.append('%s' % labels_html_dict.get(sample_name, sample_name)) for index, n in enumerate(m[name]): if ((n - m['Fitted_err_'+sample][index]) > 0.) or not abs(n) > 0.00001: rowl.append('$%.2f \\pm %.2f$' % (n, m['Fitted_err_'+sample][index])) rowp.append('%.2f ± %.2f' % (n, m['Fitted_err_'+sample][index])) else: #print "WARNING: negative symmetric error after fit extends below 0. for sample", sample, " will print asymmetric error w/ truncated negative error reaching to 0." rowl.append('$%.2f_{-%.2f}^{+%.2f}$' % (n, n, m['Fitted_err_'+sample][index])) rowp.append('%.2f -%.2f +%.2f' % (n, n, m['Fitted_err_'+sample][index])) tablel.add_row(rowl) tablep.add_row(rowp) tablel.add_line() tablep.add_line() # print the total expected (before fit) number of events if show_before_fit: # if the N_fit - N_error extends below 0, make the error physical , meaning extend to 0 rowl = ['Before SM events',] rowp = ['(before fit) SM events',] total_before = [] purity_before = [] for index, n in enumerate(m['TOTAL_MC_EXP_BKG_events']): if regions_names[index].startswith('CR'): total_before.append(n) rowl.append('$%.2f$' % n) rowp.append('%.2f' % n) tablel.add_row(rowl) tablel.add_line() tablep.add_row(rowp) tablep.add_line() map_listofkeys = m.keys() # print expected number of events per sample # if the N_fit - N_error extends below 0, make the error physical , meaning extend to 0 for sample in samples_list: for name in map_listofkeys: rowl = [] rowp = [] if "MC_exp_events_" in name and sample in name: sample_name = name.replace("MC_exp_events_","") if sample_name != sample: continue rowl.append('(before fit) %s' % labels_latex_dict.get(sample_name, sample_name).replace('_', '\_')) rowp.append('(before fit) %s' % labels_html_dict.get(sample_name, sample_name)) for index, n in enumerate(m[name]): if regions_names[index] == 'CRQ' and sample == 'photonjet': purity_before.append(n) if regions_names[index] == 'CRW' and sample == 'wgamma': purity_before.append(n) if regions_names[index] == 'CRT' and sample == 'ttbarg': purity_before.append(n) rowl.append('$%.2f$' % n) rowp.append('%.2f' % n) tablel.add_row(rowl) tablep.add_row(rowp) tablel.add_line() tablep.add_line() if show_before_fit and all([r.startswith('CR') for r in regions_names]) and normalization_factors is not None: tablel.add_row(['', '', '', '']) tablel.add_line() tablep.add_row(['', '', '', '']) tablep.add_line() # purity rowl = ['Background purity',] rowp = ['Background purity',] for index, region in enumerate(regions_names): purity = int(purity_before[index]/total_before[index] * 100.) rowl.append('$%i\%%$' % purity) rowp.append('%i%%' % purity) tablel.add_row(rowl) tablel.add_line() tablep.add_row(rowp) tablep.add_line() # normalization rowl = ['Normalization factor ($\mu$)',] rowp = ['Normalization factor (mu)',] for region in regions_names: rowl.append('$%.2f \pm %.2f$' % normalization_factors[region]) rowp.append('%.2f ± %.2f' % normalization_factors[region]) tablel.add_row(rowl) tablel.add_line() tablep.add_row(rowp) tablep.add_line() tablel.save_tex(output_name) with open(output_name.replace('.tex', '.html'), 'w+') as f: f.write(tablep.get_html_string())
def yieldstable(workspace, samples, channels, output_name, table_name='', show_before_fit=False, unblind=True, show_cr_info=False, cr_dict={}): if show_cr_info: show_before_fit = True normalization_factors = get_normalization_factors(workspace) samples_list = cmdStringToListOfLists(samples) regions_list = ['%s_cuts' % r for r in channels.split(",")] # call the function to calculate the numbers, or take numbers from pickle file if workspace.endswith(".pickle"): print "Reading from pickle file" f = open(workspace, 'r') m = pickle.load(f) f.close() else: #m = YieldsTable.latexfitresults(workspace, regions_list, samples_list, 'obsData') m = latexfitresults(workspace, regions_list, samples_list) with open(output_name.replace('.tex', '.pickle'), 'w') as f: pickle.dump(m, f) regions_names = [ region.replace("_cuts", "").replace('_', '\_') for region in m['names'] ] field_names = [ table_name, ] + regions_names align = [ 'l', ] + ['r' for i in regions_names] samples_list_decoded = [] for isam, sample in enumerate(samples_list): sampleName = getName(sample) samples_list_decoded.append(sampleName) samples_list = samples_list_decoded tablel = LatexTable(field_names, align=align, env=True) # number of observed events if unblind: row = [ 'Observed events', ] + ['%d' % n for n in m['nobs']] else: row = [ 'Observed events', ] + ['-' for n in m['nobs']] tablel.add_row(row) tablel.add_line() # Total fitted (after fit) number of events # if the N_fit - N_error extends below 0, make the error physical, meaning extend to 0 rowl = [ 'Expected SM events', ] for index, n in enumerate(m['TOTAL_FITTED_bkg_events']): if (n - m['TOTAL_FITTED_bkg_events_err'][index]) > 0.: rowl.append('$%.2f \pm %.2f$' % (n, m['TOTAL_FITTED_bkg_events_err'][index])) else: rowl.append('$%.2f_{-%.2f}^{+%.2f}$' % (n, n, m['TOTAL_FITTED_bkg_events_err'][index])) tablel.add_row(rowl) tablel.add_line() map_listofkeys = m.keys() # After fit number of events per sample (if the N_fit-N_error extends below 0, make the error physical, meaning extend to 0) for sample in samples_list: for name in map_listofkeys: rowl = [] if not "Fitted_events_" in name: continue sample_name = name.replace("Fitted_events_", "") if sample_name != sample: continue rowl.append('%s' % labels_latex_dict.get( sample_name, sample_name).replace('_', '\_')) for index, n in enumerate(m[name]): if ((n - m['Fitted_err_' + sample][index]) > 0.) or not abs(n) > 0.00001: rowl.append('$%.2f \\pm %.2f$' % (n, m['Fitted_err_' + sample][index])) else: rowl.append('$%.2f_{-%.2f}^{+%.2f}$' % (n, n, m['Fitted_err_' + sample][index])) tablel.add_row(rowl) tablel.add_line() # Total expected (before fit) number of events if show_before_fit: # if the N_fit - N_error extends below 0, make the error physical, meaning extend to 0 rowl = [ 'Before fit SM events', ] total_before = {} purity_before = {} for index, n in enumerate(m['TOTAL_MC_EXP_BKG_events']): reg_name = regions_names[index] if cr_dict and reg_name in cr_dict: total_before[reg_name] = n rowl.append('$%.2f$' % n) tablel.add_row(rowl) tablel.add_line() map_listofkeys = m.keys() # Expected number of events per sample (if the N_fit - N_error extends below 0, make the error physical, meaning extend to 0) for sample in samples_list: for name in map_listofkeys: rowl = [] if "MC_exp_events_" in name and sample in name: sample_name = name.replace("MC_exp_events_", "") if sample_name != sample: continue rowl.append('Before fit %s' % labels_latex_dict.get( sample_name, sample_name).replace('_', '\_')) for index, n in enumerate(m[name]): reg_name = regions_names[index] if cr_dict and reg_name in cr_dict and sample == cr_dict[ reg_name]: purity_before[reg_name] = n rowl.append('$%.2f$' % n) tablel.add_row(rowl) tablel.add_line() if show_cr_info and normalization_factors is not None: tablel.add_row(['' for i in range(len(regions_names) + 1)]) tablel.add_line() # purity rowl = [ 'Background purity', ] for region in regions_names: try: purity = int( round(purity_before[region] / total_before[region] * 100.)) rowl.append('$%i\%%$' % purity) except: rowl.append('-') tablel.add_row(rowl) tablel.add_line() # normalization rowl = [ 'Normalization factor ($\mu$)', ] for region in regions_names: try: rowl.append('$%.2f \pm %.2f$' % normalization_factors[region]) except: rowl.append('-') tablel.add_row(rowl) tablel.add_line() tablel.save_tex(output_name)
def main(): parser = argparse.ArgumentParser(description='') # histograms file / output directory parser.add_argument('-i', dest='input_file') parser.add_argument('-o', dest='output', default='.') parser.add_argument('--save', help='Save histograms in this rootfile') # samples, regions, variables parser.add_argument('-v', '--variable', dest='variables', required=True) parser.add_argument('-r', '--region', dest='regions', help='regions separated by ,') # Backgrounds parser.add_argument('--mc', action='store_true', help='use all backgrounds from MC') # normalization parser.add_argument('--ws', help='Bkg-only fit workspace to extract normalization factors') # other parser.add_argument('-l', dest='lumi') parser.add_argument('--data', help='data15|data16|data') parser.add_argument('--opt', action='store_true', help='Optimization plot') parser.add_argument('--sel', dest='selection', default='', help='Custom selection') parser.add_argument('--outname', help='If custom selection use this output_name') parser.add_argument('--n1', action='store_true', help='N-1 plot') parser.add_argument('--signal', action='store_true', help='Add signal samples (separated with ,)') parser.add_argument('--blind', action='store_true') parser.add_argument('--pl', action='store_true', help='publink') parser.add_argument('--www', action='store_true', help='create webpage') parser.add_argument('--ext', dest='extensions', default='pdf', help='') parser.add_argument('--ratio', default='default', help='ratio type: none,default') global args args = parser.parse_args() do_scale = True if args.lumi == '0': do_scale = False get_histogram = partial(miniutils.get_histogram, remove_var=args.n1, lumi=args.lumi, scale=do_scale) # regions if args.regions is not None: regions = args.regions.split(',') else: regions = ['',] # variables variables = args.variables.split(',') # systematics syst = 'Nom' # only nominal for now ## plots style set_atlas_style() # Backgrounds if args.mc: backgrounds = [ 'photonjet', 'multijet', 'zgamma', 'wgamma', 'wjets', 'zjets', 'ttbar', 'ttbarg', ] else: backgrounds = [ 'photonjet', 'zgamma', 'wgamma', 'ttbarg', 'jfake', 'efake', 'diphoton', 'vgammagamma', ] # Plot from histograms file if args.input_file: ifile = ROOT.TFile.Open(args.input_file) for region in regions: region_name = region #.split('_')[0] for variable in variables: print 'plotting %s in region %s ...' % (variable, region) ## backgrounds h_bkg = OrderedDict() backgrounds = ['photonjet', 'vgamma', 'tgamma', 'diphoton', 'efake', 'jfake'] for name in backgrounds: h_bkg[name] = get_histogram_from_file(ifile, name, variable, region_name, syst=syst) ## data h_data = get_histogram_from_file(ifile, 'data', variable, region_name, syst=syst) ## signal h_signal = OrderedDict() if region.endswith('_L'): h_signal['GGM_M3_mu_1600_250'] = get_histogram_from_file(ifile, 'GGM_M3_mu_1600_250', variable, region_name, syst=syst) h_signal['GGM_M3_mu_1600_650'] = get_histogram_from_file(ifile, 'GGM_M3_mu_1600_650', variable, region_name, syst=syst) elif region.endswith('_H'): h_signal['GGM_M3_mu_1600_1250'] = get_histogram_from_file(ifile, 'GGM_M3_mu_1600_1250', variable, region_name, syst) h_signal['GGM_M3_mu_1600_1450'] = get_histogram_from_file(ifile, 'GGM_M3_mu_1600_1450', variable, region_name, syst) variable = variable.replace('/', '_over_') outname = os.path.join(args.output, 'can_{}_{}_afterFit'.format(region, variable)) do_plot(outname, variable, data=h_data, bkg=h_bkg, signal=h_signal, region_name=region) ifile.Close() sys.exit(0) # Standard DATA/Backgrounds plot for region in regions: for variable in variables: print 'plotting %s in region %s ...' % (variable, region) if args.input_file: selection = region else: if not args.selection: selection = getattr(regions_, region) else: selection = args.selection if args.selection: region_name = region else: region_name = region #[:-2] ## backgrounds h_bkg = OrderedDict() for name in backgrounds: h_bkg[name] = get_histogram(name, variable=variable, region=region_name, selection=selection, syst=syst) # If fit workspace given -> scale backgrounds according to normalization factos if args.ws is not None and os.path.isfile(args.ws): mus = get_normalization_factors(args.ws) if 'CRQ' in mus: mu = mus['CRQ'] histogram_scale(h_bkg['photonjet'], *mu) if 'CRW' in mus: mu = mus['CRW'] histogram_scale(h_bkg['wgamma'], *mu) if 'vqqgamma' in h_bkg: histogram_scale(h_bkg['vqqgamma'], *mu) if 'CRT' in mus: mu = mus['CRT'] histogram_scale(h_bkg['ttbarg'], *mu) # Merge backgrounds to plot ## V + jets if args.mc: h_bkg['vjets'] = h_bkg['wjets'].Clone() h_bkg['vjets'].Add(h_bkg['zjets'], 1) del h_bkg['wjets'] del h_bkg['zjets'] ## V + gamma # h_bkg['vgamma'] = h_bkg['wgamma'].Clone() # h_bkg['vgamma'].Add(h_bkg['zgamma'], 1) # del h_bkg['wgamma'] # del h_bkg['zgamma'] # if 'vqqgamma' in h_bkg: # h_bkg['vgamma'].Add(h_bkg['vqqgamma'], 1) # del h_bkg['vqqgamma'] # h_bkg['vgamma'].SetName(h_bkg['vgamma'].GetName().replace('wgamma', 'vgamma')) ## tt + gamma if args.mc: h_bkg['tgamma'] = h_bkg['ttbarg'].Clone() h_bkg['tgamma'].Add(h_bkg['ttbar'], 1) del h_bkg['ttbar'] else: h_bkg['tgamma'] = h_bkg['ttbarg'].Clone() del h_bkg['ttbarg'] h_bkg['tgamma'].SetName(h_bkg['tgamma'].GetName().replace('ttbarg', 'tgamma')) ## diphoton if 'diphoton' in h_bkg: h_bkg['diphoton'].Add(h_bkg['vgammagamma'], 1) del h_bkg['vgammagamma'] ## fakes h_bkg['fakes'] = h_bkg['efake'].Clone() h_bkg['fakes'].Add(h_bkg['jfake'], 1) h_bkg['fakes'].SetName(h_bkg['efake'].GetName().replace('efake', 'fakes')) del h_bkg['efake'] del h_bkg['jfake'] ## data h_data = None if args.data: h_data = get_histogram(args.data, variable=variable, region=region_name, selection=selection, syst=syst, revert_cut=args.blind) ## add overflow bins to the last bin for hist in h_bkg.itervalues(): histogram_add_overflow_bin(hist) if h_data is not None: histogram_add_overflow_bin(h_data) ## signal h_signal = None if args.signal: h_signal = OrderedDict() if region.endswith('L'): signal1 = 'GGM_M3_mu_1900_250' signal2 = 'GGM_M3_mu_1900_650' elif region.endswith('H'): signal1 = 'GGM_M3_mu_1900_1650' signal2 = 'GGM_M3_mu_1900_1850' h_signal[signal1] = get_histogram(signal1, variable=variable, region=region_name, selection=selection, syst=syst) h_signal[signal2] = get_histogram(signal2, variable=variable, region=region_name, selection=selection, syst=syst) histogram_add_overflow_bin(h_signal[signal1]) histogram_add_overflow_bin(h_signal[signal2]) varname = variable.replace('[', '').replace(']', '').replace('/', '_over_') if args.selection and args.outname: tag = args.outname else: tag = region if args.ws is not None: outname = os.path.join(args.output, 'can_{}_{}_afterFit'.format(tag, varname)) else: outname = os.path.join(args.output, 'can_{}_{}_beforeFit'.format(tag, varname)) do_plot(outname, variable, data=h_data, bkg=h_bkg, signal=h_signal, region_name=region, extensions=args.extensions.split(','), do_ratio=(args.ratio!='none')) if args.pl: os.system('pl %s.pdf' % outname) # save if args.save is not None: file_name = args.save with RootFile(file_name, 'update') as f: f.write(h_data) for hist in h_bkg.itervalues(): f.write(hist) if h_signal is not None: for hist in h_signal.itervalues(): f.write(hist)