def make_fakes_view(weight_type, scale): scaled_bare_data = views.ScaleView(all_data_view, scale) scaled_wz_data = views.ScaleView(all_wz_view, scale) scaled_data = SubtractionView(scaled_bare_data, scaled_wz_data, restrict_positive=True) # View of weighted obj1-fails data obj1_view = views.SubdirectoryView( scaled_data, 'ss/%s/f1p2f3/%s1' % (tau_charge, weight_type)) # View of weighted obj2-fails data obj2_view = views.SubdirectoryView( scaled_data, 'ss/%s/p1f2f3/%s2' % (tau_charge, weight_type)) # View of weighted obj1&2-fails data obj12_view = views.SubdirectoryView( scaled_data, 'ss/%s/f1f2f3/%s12' % (tau_charge, weight_type)) # Give the individual object views nice colors obj1_view = views.TitleView( views.StyleView(obj1_view, **remove_name_entry(data_styles['TT*'])), 'Reducible bkg. 1') obj2_view = views.TitleView( views.StyleView(obj2_view, **remove_name_entry(data_styles['QCD*'])), 'Reducible bkg. 2') obj12_view = views.TitleView( views.StyleView(obj12_view, **remove_name_entry(data_styles['WW*'])), 'Reducible bkg. 12') subtract_obj12_view = views.ScaleView(obj12_view, -1) return obj1_view, obj2_view, obj12_view, subtract_obj12_view
def correct_for_contrib_in_fakes(x, fudge_factor=1.0): ''' Make a view of MC which corrects for the contribution in the fakes ''' fakes_view = views.PathModifierView(x, get_fakes) invert_view = views.ScaleView(fakes_view, -1) output = views.SumView(x, invert_view) # Fudge factor from Zmumu (from H2TAu inclusive) fudge = views.ScaleView(output, fudge_factor) return fudge
def __init__(self, main_view, *to_subtract, **kwargs): # Make all the subtracted ones negative negated = [views.ScaleView(x, -1) for x in to_subtract] # Now make this a sum of the main view + the negated subtractors # Remember, the base class is a SumView. super(SubtractionView, self).__init__(main_view, *negated) self.restrict_positive = kwargs.get('restrict_positive', False)
def make_signal_views(self, rebin, unblinded=True): ''' Make signal views with FR background estimation ''' wz_view = views.SubdirectoryView( self.rebin_view(self.get_view('WZ*'), rebin), 'os/All_Passed/') zz_view = views.SubdirectoryView( self.rebin_view(self.get_view('ZZ*'), rebin), 'os/All_Passed/') all_data_view = self.rebin_view(self.get_view('data'), rebin) if unblinded and self.blind: all_data_view = self.rebin_view( self.get_view('data', 'unblinded_view'), rebin) data_view = views.SubdirectoryView(all_data_view, 'os/All_Passed/') #Categories (to match Abdollah's naming convention) probes = [p + 'IsoFailed' for p in products_map[self.channel][1]] cat0 = 'os/' + '_'.join(probes) + '/all_weights_applied/' cat1 = 'os/' + probes[0] + '/obj1_weight/' cat2 = 'os/' + probes[1] + '/obj2_weight/' # View of weighted obj1-fails data cat1_view = views.SubdirectoryView(all_data_view, cat1) # View of weighted obj2-fails data cat2_view = views.SubdirectoryView(all_data_view, cat2) # View of weighted obj1&2-fails data cat0_view = views.SubdirectoryView(all_data_view, cat0) subtract_cat0_view = views.ScaleView(cat0_view, -1) # Corrected fake view Zjets_view = views.SumView(cat1_view, cat2_view, subtract_cat0_view) Zjets_view = views.TitleView( views.StyleView(Zjets_view, **data_styles['Zjets*']), 'Non-prompt') charge_fakes = views.TitleView( views.StyleView( views.SubdirectoryView(all_data_view, 'os/p1p2p3/c1'), **data_styles['TT*']), 'Charge mis-id') output = { 'wz': wz_view, 'zz': zz_view, 'data': data_view, 'cat1': cat1_view, 'cat2': cat2_view, 'Zjets': Zjets_view, 'charge_fakes': charge_fakes, } # Add signal for mass in [110, 120, 130, 140]: vh_view = views.SubdirectoryView( self.rebin_view(self.get_view('VH_H2Tau_M-%i' % mass), rebin), 'os/All_Passed/') output['vh%i' % mass] = vh_view ww_view = views.SubdirectoryView( self.rebin_view(self.get_view('VH_%i_HWW' % mass), rebin), 'os/All_Passed/') output['vh%i_hww' % mass] = ww_view output['signal%i' % mass] = views.SumView(ww_view, vh_view) return output
def make_signal_views(self, rebin, unblinded=True): ''' Make signal views with FR background estimation ''' wz_view = views.SubdirectoryView( self.rebin_view(self.get_view('WZJetsTo3LNu*'), rebin), 'ss/p1p2p3/') zz_view = views.SubdirectoryView( self.rebin_view(self.get_view('ZZJetsTo4L*'), rebin), 'ss/p1p2p3/') all_data_view = self.rebin_view(self.get_view('data'), rebin) if unblinded: all_data_view = self.rebin_view( self.get_view('data', 'unblinded_view'), rebin) data_view = views.SubdirectoryView(all_data_view, 'ss/p1p2p3/') # View of weighted obj1-fails data obj1_view = views.SubdirectoryView(all_data_view, 'ss/f1p2p3/w1') # View of weighted obj2-fails data obj2_view = views.SubdirectoryView(all_data_view, 'ss/p1f2p3/w2') # View of weighted obj1&2-fails data obj12_view = views.SubdirectoryView(all_data_view, 'ss/f1f2p3/w12') subtract_obj12_view = views.ScaleView(obj12_view, -1) # Corrected fake view fakes_view = views.SumView(obj1_view, obj2_view, subtract_obj12_view) fakes_view = views.TitleView( views.StyleView(fakes_view, **data_styles['Zjets*']), 'Non-prompt') charge_fakes = views.TitleView( views.StyleView( views.SubdirectoryView(all_data_view, 'os/p1p2p3/c1'), **data_styles['TT*']), 'Charge mis-id') output = { 'wz': wz_view, 'zz': zz_view, 'data': data_view, 'obj1': obj1_view, 'obj2': obj2_view, 'fakes': fakes_view, 'charge_fakes': charge_fakes, } # Add signal for mass in [110, 120, 130, 140]: vh_view = views.SubdirectoryView( self.rebin_view(self.get_view('VH_*%i' % mass), rebin), 'ss/p1p2p3/') output['vh%i' % mass] = vh_view ww_view = views.SubdirectoryView( self.rebin_view(self.get_view('WH_%i*' % mass), rebin), 'ss/p1p2p3/') output['vh%i_hww' % mass] = ww_view output['signal%i' % mass] = views.SumView(ww_view, vh_view) return output
def make_fakes_view(sign, weight_type): # View of weighted obj1-fails data obj1_view = views.SubdirectoryView( data_view, '%s/f1p2/%s' % (sign, weight_type)) # View of weighted obj2-fails data obj2_view = views.SubdirectoryView( data_view, '%s/p1f2/%s' % (sign, weight_type)) # View of weighted obj1&2-fails data obj12_view = views.SubdirectoryView( data_view, '%s/f1f2/%s' % (sign, weight_type)) # Give the individual object views nice colors subtract_obj12_view = views.ScaleView(obj12_view, -1) return obj1_view, obj2_view, subtract_obj12_view
def plot_final(self, variable, rebin=1, xaxis='', maxy=10, show_error=False, magnifyHiggs=5): ''' Plot the final output - with bkg. estimation ''' sig_view = self.make_signal_views(rebin) vh_nx = views.TitleView( views.StyleView( views.ScaleView(sig_view['signal120'], magnifyHiggs), **data_styles['VH*']), "(%s#times) m_{H} = 120" % magnifyHiggs) stack = views.StackView( sig_view['wz'], sig_view['zz'], sig_view['fakes'], vh_10x, ) histo = stack.Get(variable) histo.Draw() histo.GetHistogram().GetXaxis().SetTitle(xaxis) histo.SetMaximum(maxy) self.keep.append(histo) # Add legend legend = self.add_legend(histo, leftside=False, entries=4) if show_error: bkg_error_view = BackgroundErrorView( sig_view['fakes'], sig_view['wz'], sig_view['zz'], ) bkg_error = bkg_error_view.Get(variable) self.keep.append(bkg_error) bkg_error.Draw('pe2,same') legend.AddEntry(bkg_error) # Use poisson error bars on the data sig_view['data'] = PoissonView(sig_view['data'], x_err=False) data = sig_view['data'].Get(variable) data.Draw('pe,same') self.keep.append(data) #legend.AddEntry(data) legend.Draw()
def make_wz_cr_views(self, rebin): ''' Make WZ control region views with FR background estimation ''' wz_view = views.SubdirectoryView( self.rebin_view(self.get_view('WZJetsTo3LNu*'), rebin), 'ss/p1p2p3_enhance_wz/') zz_view = views.SubdirectoryView( self.rebin_view(self.get_view('ZZJetsTo4L*'), rebin), 'ss/p1p2p3_enhance_wz/') all_data_view = self.rebin_view(self.get_view('data'), rebin) data_view = views.SubdirectoryView(all_data_view, 'ss/p1p2p3_enhance_wz/') # View of weighted obj2-fails data fakes_view = views.SubdirectoryView(all_data_view, 'ss/p1f2p3_enhance_wz/w2') fakes_view = views.StyleView(fakes_view, **data_styles['Zjets*']) # Correct wz_in_fakes_view = views.SubdirectoryView( self.rebin_view(self.get_view('WZJetsTo3LNu*'), rebin), 'ss/p1f2p3_enhance_wz/w2') zz_in_fakes_view = views.SubdirectoryView( self.rebin_view(self.get_view('ZZJetsTo4L*'), rebin), 'ss/p1f2p3_enhance_wz/w2') diboson_view = views.SumView(wz_in_fakes_view, zz_in_fakes_view) inverted_diboson_view = views.ScaleView(diboson_view, -1) fakes_view = views.SumView(fakes_view, inverted_diboson_view) fakes_view = views.TitleView(fakes_view, 'Non-prompt') output = { 'wz': wz_view, 'zz': zz_view, 'data': data_view, 'fakes': fakes_view } # Add signal for mass in [110, 120, 130, 140]: vh_view = views.SubdirectoryView( self.rebin_view(self.get_view('VH_*%i' % mass), rebin), 'ss/p1p2p3/') output['vh%i' % mass] = vh_view return output
def make_wz_cr_views(self, rebin=1, project=None, project_axis=None): ''' Make WZ control region views with FR background estimation ''' def preprocess(view): ret = view if project and project_axis: ret = ProjectionView(ret, project_axis, project) return RebinView( ret, rebin ) wz_view_tautau_all = preprocess( self.get_view('WZJetsTo3LNu*ZToTauTau*') ) wz_view_tautau = views.SubdirectoryView(wz_view_tautau_all, 'ss/tau_os/p1p2p3_enhance_wz/') tomatch = 'WZJetsTo3LNu' if self.sqrts == 7 else 'WZJetsTo3LNu_pythia' wz_view_3l_all = preprocess( self.get_view(tomatch) ) wz_view_3l = views.SubdirectoryView(wz_view_3l_all, 'ss/tau_os/p1p2p3_enhance_wz/') wz_view_all = views.SumView(wz_view_tautau_all, wz_view_3l_all) zz_view_all = preprocess( self.get_view('ZZJetsTo4L*') ) zz_view = views.SubdirectoryView(zz_view_all, 'ss/tau_os/p1p2p3_enhance_wz/') all_data_view = preprocess( self.get_view('data') ) data_view = views.SubdirectoryView( all_data_view, 'ss/tau_os/p1p2p3_enhance_wz/') # View of weighted obj2-fails data fakes_view = views.SubdirectoryView( all_data_view, 'ss/tau_os/p1f2p3_enhance_wz/w2') fakes_view = views.StyleView(fakes_view, **remove_name_entry(data_styles['Zjets*'])) # Correct wz_in_fakes_view = views.SubdirectoryView(wz_view_all, 'ss/tau_os/p1f2p3_enhance_wz/w2') zz_in_fakes_view = views.SubdirectoryView(zz_view_all, 'ss/tau_os/p1f2p3_enhance_wz/w2') diboson_view = views.SumView(wz_in_fakes_view, zz_in_fakes_view) inverted_diboson_view = views.ScaleView(diboson_view, -1) fakes_view = views.SumView(fakes_view, inverted_diboson_view) fakes_view = views.TitleView(fakes_view, 'Reducible bkg.') output = { 'wz_ztt': wz_view_tautau, 'wz_3l' : wz_view_3l, 'zz' : zz_view, 'data' : data_view, 'fakes' : fakes_view } return output
def make_obj3_fail_cr_views(self, rebin): ''' Make views when obj3 fails, estimating the bkg in obj1 pass using f1p2f3 ''' wz_view = views.SubdirectoryView( self.rebin_view(self.get_view('WZJetsTo3LNu*'), rebin), 'ss/p1p2f3/') zz_view = views.SubdirectoryView( self.rebin_view(self.get_view('ZZJetsTo4L*'), rebin), 'ss/p1p2f3/') all_data_view = self.rebin_view(self.get_view('data'), rebin) data_view = views.SubdirectoryView(all_data_view, 'ss/p1p2f3/') # View of weighted obj1-fails data obj1_view = views.SubdirectoryView(all_data_view, 'ss/f1p2f3/w1') # View of weighted obj2-fails data obj2_view = views.SubdirectoryView(all_data_view, 'ss/p1f2f3/w2') # View of weighted obj1&2-fails data obj12_view = views.SubdirectoryView(all_data_view, 'ss/f1f2f3/w12') subtract_obj12_view = views.ScaleView(obj12_view, -1) # Corrected fake view fakes_view = views.SumView(obj1_view, obj2_view, subtract_obj12_view) fakes_view = views.TitleView( views.StyleView(fakes_view, **data_styles['Zjets*']), 'Non-prompt') charge_fakes = views.TitleView( views.StyleView( views.SubdirectoryView(all_data_view, 'os/p1p2f3/c1'), **data_styles['TT*']), 'Charge mis-id') output = { 'wz': wz_view, 'zz': zz_view, 'data': data_view, 'obj1': obj1_view, 'obj2': obj2_view, 'fakes': fakes_view, 'charge_fakes': charge_fakes, } return output
def __init__(self, highv=None, lowv=None, centv=None): self.highv = highv self.lowv = lowv self.centv = views.ScaleView( views.SumView(lowv, self.highv) , 0.5) if not centv else centv
def data_views(files, lumifiles, styles, forceLumi=-1): ''' Builds views of files. [files] gives an iterator of .root files with histograms to build. [lumifiles] gives the correspond list of .lumisum files which contain the effective integrated luminosity of the samples. The lumi to normalize to is taken as the sum of the data file int. lumis. ''' files = list(files) log.info("Creating views from %i files", len(files)) # Map sample_name => root file histo_files = dict((extract_sample(x), io.open(x)) for x in files) # Map sample_name => lumi file lumi_files = dict((extract_sample(x), read_lumi(x)) for x in lumifiles) # Identify data files datafiles = set([name for name in histo_files.keys() if 'data' in name]) log.info("Found the following data samples:") log.info(" ".join(datafiles)) datalumi = 0 for x in datafiles: if x not in lumi_files: raise KeyError( "Can't find a lumi file for %s - I have these ones: " % x + repr(lumi_files.keys())) datalumi += lumi_files[x] log.warning("-> total int. lumi = %0.0fpb-1", datalumi) if forceLumi > 0: datalumi = forceLumi log.warning("-> forcing lumi to = %0.0fpb-1", datalumi) # Figure out the dataset for each file, and the int lumi. # Key = dataset name # Value = {intlumi, rootpy file, weight, weighted view} output = {} has_data = False for sample in histo_files.keys(): raw_file = histo_files[sample] intlumi = lumi_files[sample] weight = 1 if intlumi: weight = datalumi / intlumi if 'data' in sample: has_data = True weight = 1 log.warning( "Building sample: %s => int lumi: %0.f pb-1. Weight => %0.2E", sample, intlumi, weight) view = views.ScaleView(raw_file, weight) unweighted_view = raw_file # Find the longest (i.e. most specific) matching style pattern style = get_best_style(sample, styles) if style: log.info("Found style for %s - applying Style View", sample) # Set style and title # title = the name of the sample, rootpy Legend uses this. nicename = copy.copy(style['name']) log.debug("sample name %s", nicename) style_dict_no_name = dict( [i for i in style.iteritems() if i[0] != 'name']) view = views.TitleView(views.StyleView(view, **style_dict_no_name), nicename) unweighted_view = views.TitleView( views.StyleView(unweighted_view, **style_dict_no_name), nicename) else: log.warning("No matching style found for %s", sample) output[sample] = { 'intlumi': intlumi, 'file': raw_file, 'weight': weight, 'view': view, 'unweighted_view': unweighted_view } if not has_data: return output # Merge the data into just 'data' log.info("Merging data together") output['data'] = { 'intlumi': datalumi, 'weight': 1, 'view': views.SumView(*[output[x]['view'] for x in datafiles]), 'unweighted_view': views.SumView(*[output[x]['unweighted_view'] for x in datafiles]), } return output
def plot_final(self, variable, rebin=1, xaxis='', maxy=24, show_error=False, qcd_correction=False, stack_higgs=True, qcd_weight_fraction=0., x_range=None, show_charge_fakes=False, leftside_legend=False, higgs_xsec_multiplier=1, project=None, project_axis=None, differential=False, yaxis='Events', tau_charge='tau_os', **kwargs): ''' Plot the final output - with bkg. estimation ''' show_charge_fakes = show_charge_fakes if 'show_charge_fakes' not in self.defaults else self.defaults['show_charge_fakes'] sig_view = self.make_signal_views(unblinded=(not self.blind), qcd_weight_fraction=qcd_weight_fraction, rebin=rebin, project=project, project_axis=project_axis, tau_charge=tau_charge) if differential: sig_view = self.apply_to_dict(sig_view, DifferentialView) vh_10x = views.TitleView( views.StyleView( views.ScaleView(sig_view['vh125'], higgs_xsec_multiplier), **remove_name_entry(data_styles['VH*']) ), "(%i#times) m_{H} = 125" % higgs_xsec_multiplier ) charge_fakes_view = MedianView(highv=sig_view['charge_fakes']['sys_up'], centv=sig_view['charge_fakes']['central']) # Fudge factor to go from 120->125 - change in xsec*BR #vh_10x = views.ScaleView(vh_10x), .783) tostack = [sig_view['wz_3l'], sig_view['zz'], sig_view['wz'], sig_view['fakes'], vh_10x] if stack_higgs else \ [sig_view['wz_3l'], sig_view['zz'], sig_view['wz'], sig_view['fakes']] if show_charge_fakes: tostack = tostack[:2]+[charge_fakes_view]+tostack[2:] vh_hww = views.ScaleView(sig_view['vh120_hww'], .783) if 'vh120_hww' in sig_view else None if vh_hww: tostack = tostack[:-1] + [vh_hww] + tostack[-1:] stack = views.StackView( *tostack ) histo = stack.Get(variable) histo.Draw() histo.GetHistogram().GetXaxis().SetTitle(xaxis) histo.GetHistogram().GetYaxis().SetTitle(yaxis) if x_range: histo.GetHistogram().GetXaxis().SetRangeUser(x_range[0], x_range[1]) self.keep.append(histo) # Add legend entries = len(tostack)+1 if show_error: entries += 1 legend = self.add_legend(histo, leftside=leftside_legend, entries=entries) if show_error: #correct_qcd_view = None #if qcd_weight_fraction == 0: # fakes05 = sig_view['weighted_fakes'][1.] # correct_qcd_view = MedianView(lowv=fakes05, centv=sig_view['fakes']) # #elif qcd_weight_fraction == 0.5: # fakes1 = sig_view['weighted_fakes'][1.] # correct_qcd_view = MedianView(highv=fakes1, centv=sig_view['fakes']) # #elif qcd_weight_fraction == 1: # fakes05 = sig_view['weighted_fakes'][0.5] # correct_qcd_view = MedianView(lowv=fakes05, centv=sig_view['fakes']) bkg_error_view = BackgroundErrorView( sig_view['fakes'], #correct_qcd_view, #sig_view['fakes'], views.SumView( sig_view['wz'], sig_view['wz_3l']), sig_view['zz'], charge_fakes_view, fake_error=0.3, **kwargs ) bkg_error = bkg_error_view.Get(variable) self.keep.append(bkg_error) bkg_error.Draw('pe2,same') legend.AddEntry(bkg_error) # Use poisson error bars on the data sig_view['data'] = PoissonView(sig_view['data'], x_err=False, is_scaled=differential) #PoissonView(, x_err=False) data = sig_view['data'].Get(variable) ymax = histo.GetMaximum() if not self.blind or tau_charge != 'tau_os': #print "drawing", data.Integral() data.Draw('pe,same') legend.AddEntry(data) ymax = max(ymax, data.GetMaximum()) self.keep.append(data) if isinstance(maxy, (int, long, float)): #print "setting maxy to %s" % maxy histo.SetMaximum(maxy) self.canvas.Update() else: histo.SetMaximum(ymax*1.2) if not stack_higgs: higgs_plot = vh_10x.Get(variable) higgs_plot.Draw('same') self.keep.append(higgs_plot) legend.Draw()
def write_shapes(self, variable, rebin, outdir, qcd_fraction=0., #[1., 0., -1.], show_charge_fakes=False, project=None, project_axis=None, different_fakes=False): ''' Write final shapes for [variable] into a TDirectory [outputdir] ''' show_charge_fakes = show_charge_fakes if 'show_charge_fakes' not in self.defaults else self.defaults['show_charge_fakes'] sig_view = self.make_signal_views(unblinded=(not self.blind), qcd_weight_fraction=qcd_fraction, rebin=rebin, project=project, project_axis=project_axis) different_fakes_views = self.make_additional_fakes_view( unblinded=(not self.blind), rebin=rebin, project=project, project_axis=project_axis) outdir.cd() wz_weight = self.get_view('WZJetsTo3LNu*ZToTauTau*', 'weight') zz_weight = self.get_view('ZZJetsTo4L*', 'weight') print "wz_weight: %s" % wz_weight print "zz_weight: %s" % zz_weight wz = views.FunctorView( views.SumView(sig_view['wz'], sig_view['wz_3l']), make_empty_bin_remover(wz_weight)).Get(variable) zz = views.FunctorView( sig_view['zz'], make_empty_bin_remover(zz_weight)).Get(variable) obs = sig_view['data'].Get(variable) fakes = sig_view['fakes'].Get(variable) if not different_fakes else different_fakes_views['fakes'].Get(variable) fakes_down = different_fakes_views['fakes'].Get(variable) fakes_up = PositiveView( views.SumView( views.ScaleView(sig_view['fakes'], 2.), views.ScaleView(different_fakes_views['fakes'], -1.) ) ).Get(variable) wz.SetName('wz') zz.SetName('zz') obs.SetName('data_obs') fakes.SetName('fakes') fakes_down.SetName('fakes_CMS_vhtt_%s_fakeshape_%sTeVDown' % (outdir.GetName(), self.sqrts)) fakes_up.SetName('fakes_CMS_vhtt_%s_fakeshape_%sTeVUp' % (outdir.GetName(), self.sqrts)) #for mass in [110, 115, 120, 125, 130, 135, 140]: #set_trace() for mass in range(90, 165, 5): try: vh = None if mass == 90 and self.sqrts == 8: vh = views.ScaleView(sig_view['vh100'], 1.3719).Get(variable) elif mass == 95 and self.sqrts == 8: vh = views.ScaleView(sig_view['vh100'], 1.1717).Get(variable) else: vh = sig_view['vh%i' % mass].Get(variable) vh.SetName('WH%i' % mass) vh.SetLineColor(0) vh.Write() except KeyError: #logging.warning('No sample found matching VH_*%i' % mass) continue if mass % 10 == 0 and mass < 150: # Only have 10 GeV steps for WW if 'vh%i_hww' % mass in sig_view: ww = sig_view['vh%i_hww' % mass].Get(variable) ww.SetName('WH_hww%i' % mass) ww.Write() wz.Write() zz.Write() obs.Write() fakes.Write() fakes_down.Write() fakes_up.Write() #charge_fakes_CMS_vhtt_emt_chargeFlip_8TeVUpx if show_charge_fakes: logging.info('adding charge fakes shape errors') charge_fakes = sig_view['charge_fakes']['central'].Get(variable) charge_fakes_sys_up = sig_view['charge_fakes']['sys_up' ].Get(variable) #shift='up') charge_fakes_sys_down = charge_fakes+charge_fakes - charge_fakes_sys_up charge_fakes.SetName('charge_fakes') charge_fakes_sys_up.SetName('charge_fakes_CMS_vhtt_%s_chargeFlip_%sTeVUp' % (self.channel.lower(), self.sqrts)) charge_fakes_sys_down.SetName('charge_fakes_CMS_vhtt_%s_chargeFlip_%sTeVDown' % (self.channel.lower(), self.sqrts)) charge_fakes.Write() charge_fakes_sys_up.Write() charge_fakes_sys_down.Write()
def get_ss(x): return x.replace('em/', 'em/ss/') mc_view = views.SumView( *[views.PathModifierView(plotter.get_view(x), get_ss) for x in [ 'WZJetsTo3LNu*', 'ZZJetsTo4L*', 'WW*', 'WplusJets_madgraph', 'TTplusJets_madgraph', 'Zjets_M50', ]] ) #mc_inverted = views.ScaleView(mc_view, -1) mc_inverted = views.ScaleView(mc_view, -1) sqrts = 7 if '7TeV' in jobid else 8 qcd_view = views.StyleView( views.TitleView( views.ScaleView( views.SumView(views.PathModifierView(plotter.data, get_ss), mc_inverted), 1.4 if sqrts == 8 else 1.28 # OS/SS from Valentina ), 'QCD'), **data_styles['WZ*']) def get_fakes(x):
def roodatahistizer(hist): ''' Turn a hist into a RooDataHist ''' return ROOT.RooDataHist(hist.GetName(), hist.GetTitle(), ROOT.RooArgList(x), hist) # Now a Get() will return a RooDataHist for type in regions.keys(): # Rebin the histograms. Make this smarter later regions[type] = views.FunctorView(regions[type], lambda x: x.Rebin(5)) # Make views of the numerator and denominator. # For RooFit we have to split into Pass & Fail # So subtract the numerator from the denominator num_view = regions[type] all_view = views.PathModifierView(regions[type], denominator_path_mangler) negative_num_view = views.ScaleView(num_view, -1) fail_view = views.SumView(all_view, negative_num_view) # Now make RooDataHistViews of the numerator & denominator regions[type] = ( views.FunctorView(num_view, roodatahistizer), views.FunctorView(fail_view, roodatahistizer), ) log.info("Making output workspace") ws = ROOT.RooWorkspace("fit_results") def ws_import(*args): getattr(ws, 'import')(*args) # Fit each region
def data_views(files, lumifiles): ''' Builds views of files. [files] gives an iterator of .root files with histograms to build. [lumifiles] gives the correspond list of .lumisum files which contain the effective integrated luminosity of the samples. The lumi to normalize to is taken as the sum of the data file int. lumis. ''' files = list(files) log.info("Creating views from %i files", len(files)) # Map sample_name => root file histo_files = dict((extract_sample(x), io.open(x)) for x in files) # Map sample_name => lumi file lumi_files = dict((extract_sample(x), read_lumi(x)) for x in lumifiles) # Identify data files datafiles = set([name for name in histo_files.keys() if 'data' in name]) log.info("Found the following data samples:") log.info(" ".join(datafiles)) datalumi = 0 for x in datafiles: if x not in lumi_files: raise KeyError( "Can't find a lumi file for %s - I have these ones: " % x + repr(lumi_files.keys())) datalumi += lumi_files[x] log.info("-> total int. lumi = %0.0fpb-1", datalumi) # Figure out the dataset for each file, and the int lumi. # Key = dataset name # Value = {intlumi, rootpy file, weight, weighted view} output = {} for sample in histo_files.keys(): raw_file = histo_files[sample] intlumi = lumi_files[sample] log.info("Building sample: %s => int lumi: %0.f pb-1", sample, intlumi) weight = 1 if intlumi: weight = datalumi/intlumi if 'data' in sample: weight = 1 log.debug("Weight: %0.2f", weight) view = views.ScaleView(raw_file, weight) unweighted_view = raw_file # Find the longest (i.e. most specific) matching style pattern best_pattern = '' for pattern, style_dict in data_styles.iteritems(): log.debug("Checking pattern: %s against %s", pattern, sample) if fnmatch.fnmatch(sample, pattern): log.debug("-> it matches!") if len(pattern) > len(best_pattern): best_pattern = pattern log.info("Found new best style for %s: %s", sample, pattern) if best_pattern: style_dict = data_styles[best_pattern] log.info("Found style for %s - applying Style View", sample) # Set style and title # title = the name of the sample, rootpy Legend uses this. nicename = copy.copy(style_dict['name']) view = views.TitleView( views.StyleView(view, **style_dict), nicename ) unweighted_view = views.TitleView( views.StyleView(unweighted_view, **style_dict), nicename ) output[sample] = { 'intlumi': intlumi, 'file' : raw_file, 'weight' : weight, 'view' : view, 'unweighted_view' : unweighted_view } # Merge the data into just 'data' log.info("Merging data together") output['data'] = { 'intlumi' : datalumi, 'weight' : 1, 'view' : views.SumView(*[output[x]['view'] for x in datafiles]), 'unweighted_view' : views.SumView(*[output[x]['unweighted_view'] for x in datafiles]), } return output
''' Make a view where all bins > 0 ''' return views.FunctorView(x, all_bins_positive) def get_view(sample_pattern): for sample, sample_info in the_views.iteritems(): if fnmatch.fnmatch(sample, sample_pattern): return rebin_view(sample_info['view'], args.rebin) raise KeyError("I can't find a view that matches %s, I have: %s" % (sample_pattern, " ".join(the_views.keys()))) wz_view = get_view('WZ*') zz_view = get_view('ZZ*') data = rebin_view(the_views['data']['view'], args.rebin) diboson_view = views.SumView(wz_view, zz_view) inverted_diboson_view = views.ScaleView(diboson_view, -1) corrected_view = postive_view(views.SumView(data, inverted_diboson_view)) output = io.open(args.outputfile, 'RECREATE') output.cd() corr_numerator = corrected_view.Get(args.numerator) corr_denominator = corrected_view.Get(args.denom) log.info( "Corrected: %0.2f/%0.2f = %0.1f%%", corr_numerator.Integral(), corr_denominator.Integral(), 100 * corr_numerator.Integral() / corr_denominator.Integral() if corr_denominator.Integral() else 0) uncorr_numerator = data.Get(args.numerator) uncorr_denominator = data.Get(args.denom)