Example #1
0
 def __init__(self, tree, outfile, **kwargs):
     super(ChargeFlipProbabilityEE, self).__init__(tree, outfile, **kwargs)
     # Use the cython wrapper
     self.tree = EETree.EETree(tree)
     self.out = outfile
     # Histograms for each category
     self.histograms = {}
     self.is7TeV = '7TeV' in os.environ['jobid']
     self.pucorrector = mcCorrectors.make_puCorrector('doublee')
Example #2
0
 def __init__(self, tree, outfile, **kwargs):
     super(ControlZMM, self).__init__(tree, outfile, **kwargs)
     # Use the cython wrapper
     self.tree = MuMuTree.MuMuTree(tree)
     self.out = outfile
     # Histograms for each category
     self.histograms = {}
     self.is7TeV = '7TeV' in os.environ['jobid']
     self.pucorrector = mcCorrectors.make_puCorrector('doublemu')
Example #3
0
 def __init__(self, tree, outfile, **kwargs):
     super(ControlZEE, self).__init__(tree, outfile, **kwargs)
     # Use the cython wrapper
     self.tree = EETree.EETree(tree)
     self.out = outfile
     # Histograms for each category
     self.dir_based_histograms = {}
     self.is7TeV = '7TeV' in os.environ['jobid']
     self.pucorrector = mcCorrectors.make_puCorrector('doublee')
 def __init__(self, tree, outfile, **kwargs):
     super(ChargeFlipProbabilityEE, self).__init__(tree, outfile, **kwargs)
     # Use the cython wrapper
     self.tree = EETree.EETree(tree)
     self.out = outfile
     # Histograms for each category
     self.histograms = {}
     self.is7TeV = "7TeV" in os.environ["jobid"]
     self.pucorrector = mcCorrectors.make_puCorrector("doublee")
Example #5
0
 def __init__(self, tree, outfile, **kwargs):
     super(FakeRatesMM, self).__init__(tree, outfile, **kwargs)
     # Use the cython wrapper
     self.tree = MuMuTree.MuMuTree(tree)
     self.out = outfile
     # Histograms for each category
     self.histograms = {}
     self.is7TeV = '7TeV' in os.environ['jobid']
     self.lepIds = ['pfidiso02', 'h2taucuts', 'h2taucuts020'] #, 'h2taucuts025']
     self.pucorrector = mcCorrectors.make_puCorrector('doublemu')
Example #6
0
    def __init__(self, tree, outfile, **kwargs):
        super(WHAnalyzeEET, self).__init__(tree, outfile, EETauTree, **kwargs)
        self.hfunc['subMTMass'] = lambda row, weight: (row.e2_t_Mass, weight) if row.e1MtToMET > row.e2MtToMET else (row.e1_t_Mass, weight) #maps the name of non-trivial histograms to a function to get the proper value, the function MUST have two args (evt and weight). Used in WHAnalyzerBase.fill_histos later
	def make_both_barrel(attribute):
		def f_(row, weight):
			return ( getattr(row, attribute), weight ) if row.e1AbsEta < 1.48 and row.e2AbsEta < 1.48 else (0., 0.)
		return f_

	def make_both_endcap(attribute):
		def f_(row, weight):
			return ( getattr(row, attribute), weight ) if row.e1AbsEta >= 1.48 and row.e2AbsEta >= 1.48 else (0., 0.)
		return f_

	def make_mixed(attribute):
		def f_(row, weight):
			return ( getattr(row, attribute), weight ) \
			    if (row.e1AbsEta >= 1.48 and row.e2AbsEta < 1.48) or \
			    (row.e1AbsEta < 1.48 and row.e2AbsEta >= 1.48) \
		            else (0., 0.)
		return f_
	
		
        self.hfunc['pt_ratio' ] = lambda row, weight: (row.e2Pt/row.e1Pt, weight)
        self.hfunc["e*1_e2_Mass"] = lambda row, weight: ( frfits.mass_scaler['h2taucuts']( row.e1_e2_Mass), weight)
        self.hfunc["e*1_t_Mass" ] = lambda row, weight: ( frfits.mass_scaler['h2taucuts']( row.e1_t_Mass ), weight)
        self.hfunc["e1_e*2_Mass"] = lambda row, weight: ( frfits.mass_scaler['h2taucuts']( row.e1_e2_Mass), weight)
        self.hfunc["e*2_t_Mass" ] = lambda row, weight: ( frfits.mass_scaler['h2taucuts']( row.e2_t_Mass ), weight)
        self.hfunc["logic_cut_met" ] = self.logic_cut_met
	self.hfunc["my_selection_info" ] = self.fill_id_info

	self.hfunc["type1_pfMetEt_barr"] = make_both_barrel("type1_pfMetEt")
	self.hfunc["type1_pfMetEt_endc"] = make_both_endcap("type1_pfMetEt")
	self.hfunc["type1_pfMetEt_mix" ] = make_mixed("type1_pfMetEt")      
	self.hfunc["mva_metEt_barr"] = make_both_barrel("mva_metEt")
	self.hfunc["mva_metEt_endc"] = make_both_endcap("mva_metEt")
	self.hfunc["mva_metEt_mix" ] = make_mixed("mva_metEt")      


        self.hfunc["e2RelPFIsoDB_bar"] = lambda row, weight: ( row.e2RelPFIsoDB, weight ) if row.e2AbsEta < 1.48  else (0., 0.)
        self.hfunc["e1RelPFIsoDB_bar"] = lambda row, weight: ( row.e1RelPFIsoDB, weight ) if row.e1AbsEta < 1.48  else (0., 0.)
        self.hfunc["e2RelPFIsoDB_end"] = lambda row, weight: ( row.e2RelPFIsoDB, weight ) if row.e2AbsEta >= 1.48 else (0., 0.)
        self.hfunc["e1RelPFIsoDB_end"] = lambda row, weight: ( row.e1RelPFIsoDB, weight ) if row.e1AbsEta >= 1.48 else (0., 0.)


        self.hfunc["e1_t_CosThetaStar_barr"] = make_both_barrel("e1_t_CosThetaStar")
        self.hfunc["e1_t_CosThetaStar_endc"] = make_both_endcap("e1_t_CosThetaStar")
        self.hfunc["e1_t_CosThetaStar_mix" ] = make_mixed("e1_t_CosThetaStar")      
	self.hfunc["e1_e2_Mass_barr"] = make_both_barrel("e1_e2_Mass")
	self.hfunc["e1_e2_Mass_endc"] = make_both_endcap("e1_e2_Mass")
	self.hfunc["e1_e2_Mass_mix" ] = make_mixed("e1_e2_Mass")
        self.hfunc["electron_rejection_study" ] = self.electron_rejection_study
	self.hfunc["tau_id_study" ] = self.tau_id_study

        self.pucorrector = mcCorrectors.make_puCorrector('doublee')
Example #7
0
 def __init__(self, tree, outfile, **kwargs):
     super(FakeRatesMM, self).__init__(tree, outfile, **kwargs)
     # Use the cython wrapper
     self.tree = MuMuTree.MuMuTree(tree)
     self.out = outfile
     # Histograms for each category
     self.histograms = {}
     self.is7TeV = '7TeV' in os.environ['jobid']
     self.lepIds = ['pfidiso02', 'h2taucuts',
                    'h2taucuts020']  #, 'h2taucuts025']
     self.pucorrector = mcCorrectors.make_puCorrector('doublemu')
Example #8
0
    def __init__(self, tree, outfile, **kwargs):
        self.channel = 'MMT'
        super(WHAnalyzeMMT, self).__init__(tree, outfile, MuMuTauTree, **kwargs)

	def attr_getter(attribute):
            def f(row, weight):
                return (getattr(row,attribute), weight)
            return f

        def merge_functions(fcn_1, fcn_2):
            def f(row, weight):
                r1, w1 = fcn_1(row, weight)
                r2, w2 = fcn_2(row, weight)
                w = w1 if w1 and w2 else None
                return ((r1, r2), w)
            return f

        lead_iso = self.grid_search['']['leading_iso']
        sublead_iso = self.grid_search['']['subleading_iso']
        
        #@memo_last
        #def f_par_prob(m1Pt, m1JetPt,
        #               m2Pt, m2JetPt,
        #               tPt):
        #    p_m1 = (( frfits.highpt_mu_fr[lead_iso](muonJetPt=max(m1JetPt, m1Pt), muonPt=m1Pt) +\
        #              frfits.highpt_mu_qcd_fr[lead_iso](muonJetPt=max(m1JetPt, m1Pt), muonPt=m1Pt) )/2)
        #    p_m2 = (( frfits.lowpt_mu_fr[sublead_iso](muonJetPt=max(m2JetPt, m2Pt), muonPt=m2Pt) + \
        #              frfits.lowpt_mu_qcd_fr[sublead_iso](muonJetPt=max(m2JetPt, m2Pt), muonPt=m2Pt))/2)
        #    p_t  = frfits.tau_fr(tPt)
        #    return (p_m1 + p_m2*(1 - p_m1) + p_t*(1 - p_m1)*(1 - p_m2))
        #    
        #def f_prob(row, weight):
        #    val = f_par_prob(row.m1Pt, row.m1JetPt,
        #                     row.m2Pt, row.m2JetPt,
        #                     row.tPt)
        #    return val, weight
        #
        #def log_prob(row, weight):
        #    prob, weight = f_prob(row, weight)
        #    return ROOT.TMath.Log10(prob), weight
        
        #self.hfunc['faking_prob'] = f_prob
        #self.hfunc['log_prob']    = log_prob
        #self.hfunc["m2_t_Mass#faking_prob"] = merge_functions( attr_getter('m2_t_Mass'), f_prob  )
        #self.hfunc["m2_t_Mass#log_prob"   ] = merge_functions( attr_getter('m2_t_Mass'), log_prob)

        self.hfunc['subMTMass'] = lambda row, weight: (row.m2_t_Mass, weight) if row.m1MtToMET > row.m2MtToMET else (row.m1_t_Mass, weight) #maps the name of non-trivial histograms to a function to get the proper value, the function MUST have two args (evt and weight). Used in WHAnalyzerBase.fill_histos later
        self.hfunc['pt_ratio' ] = lambda row, weight: (row.m2Pt/row.m1Pt, weight)
        self.hfunc['SYNC'] = lambda row, weight: (row, None)#((row.run, row.lumi, row.evt, row.m1Pt, row.m1Eta, row.m1Phi, row.m2Pt, row.m2Eta, row.m2Phi, row.tPt, row.tEta, row.tPhi, weight), None)
        
        self.pucorrector = mcCorrectors.make_puCorrector('doublemu')
Example #9
0
    def __init__(self, tree, outfile, **kwargs):
        super(WHAnalyzeEMT, self).__init__(tree, outfile, EMuTauTree, **kwargs)
        #maps the name of non-trivial histograms to a function to get the proper value, the function MUST have two args (evt and weight). Used in WHAnalyzerBase.fill_histos later
        self.hfunc['subMass']   = lambda row, weight: (row.e_t_Mass, weight)    if row.ePt < row.mPt else (row.m_t_Mass, weight) 
        self.hfunc['tLeadDR']   = lambda row, weight: (row.m_t_DR,   weight)    if row.ePt < row.mPt else (row.e_t_DR,   weight) 
        self.hfunc['tSubDR']    = lambda row, weight: (row.e_t_DR,   weight)    if row.ePt < row.mPt else (row.m_t_DR,   weight) 
        self.hfunc['pt_ratio' ] = lambda row, weight: (row.ePt/row.mPt, weight) if row.ePt < row.mPt else (row.mPt/row.ePt, weight)
        self.hfunc["e*_t_Mass"] = lambda row, weight: ( frfits.mass_scaler['h2taucuts']( row.e_t_Mass), weight)
        self.hfunc["e*_m_Mass"] = lambda row, weight: ( frfits.mass_scaler['h2taucuts']( row.e_m_Mass), weight)
        self.hfunc["subMass*" ] = lambda row, weight: ( frfits.mass_scaler['h2taucuts']( row.e_t_Mass), weight) if row.ePt < row.mPt else (row.m_t_Mass, weight) 
        self.hfunc["_recoilDaught" ] = lambda row, weight: (math.sqrt(row.recoilDaught) , weight)
        self.hfunc["_recoilWithMet"] = lambda row, weight: (math.sqrt(row.recoilWithMet), weight)

        self.pucorrector = mcCorrectors.make_puCorrector('mueg')
Example #10
0
 def __init__(self, tree, outfile, **kwargs):
     super(FakeRatesEE, self).__init__(tree, outfile, **kwargs)
     # Use the cython wrapper
     self.tree = EETree.EETree(tree)
     self.out = outfile
     # Histograms for each category
     self.histograms = {}
     self.is7TeV = '7TeV' in os.environ['jobid']
     self.pucorrector = mcCorrectors.make_puCorrector('doublee')
     self.defined_eids = selections.electronIds.keys()
     self.iso_points   = ['idiso02', 'h2taucuts', 'h2taucuts020']
     self.lepIds  =  [ '_'.join([i,j])
                       for i in self.defined_eids
                       for j in self.iso_points]
 def __init__(self, tree, outfile, **kwargs):
     super(TauEffZMM, self).__init__(tree, outfile,MuMuTree.MuMuTree, **kwargs)
     self.pucorrector = mcCorrectors.make_puCorrector('singlemu')
     self.objId = [
         'h2Tau', 
         ]
     self.id_functions = {
         'h2Tau'    : lambda row: selections.mu_idIso(row, 'm2'),
         'sign_cut' : self.sign_cut,
         }
     
     self.id_functions_with_sys = {
         }
     self.hfunc['MET_Z_perp'] = lambda row, weight: (row.type1_pfMetEt*ROOT.TMath.Cos(row.m1_m2_ToMETDPhi_Ty1), weight)
     self.hfunc['MET_Z_para'] = lambda row, weight: (row.type1_pfMetEt*ROOT.TMath.Sin(row.m1_m2_ToMETDPhi_Ty1), weight)
Example #12
0
 def __init__(self, tree, outfile, **kwargs):
     super(FakeRatesEE, self).__init__(tree, outfile, **kwargs)
     # Use the cython wrapper
     self.tree = EETree.EETree(tree)
     self.out = outfile
     # Histograms for each category
     self.histograms = {}
     self.is7TeV = '7TeV' in os.environ['jobid']
     self.pucorrector = mcCorrectors.make_puCorrector('doublee')
     self.defined_eids = selections.electronIds.keys()
     self.iso_points = ['idiso02', 'h2taucuts', 'h2taucuts020']
     self.lepIds = [
         '_'.join([i, j]) for i in self.defined_eids
         for j in self.iso_points
     ]
Example #13
0
 def __init__(self, tree, outfile, **kwargs):
     super(WHAnalyzeMMT, self).__init__(tree, outfile, MuMuTauTree, **kwargs)
     self.hfunc['subMTMass'] = lambda row, weight: (row.m2_t_Mass, weight) if row.m1MtToMET > row.m2MtToMET else (row.m1_t_Mass, weight) #maps the name of non-trivial histograms to a function to get the proper value, the function MUST have two args (evt and weight). Used in WHAnalyzerBase.fill_histos later
     self.hfunc['pt_ratio' ] = lambda row, weight: (row.m2Pt/row.m1Pt, weight)
     
     self.pucorrector = mcCorrectors.make_puCorrector('doublemu')
Example #14
0
 def __init__(self, tree, outfile, **kwargs):
     super(ZHAnalyzeEETT, self).__init__(tree, outfile, EETauTauTree, 'TT',
                                         **kwargs)
     # Hack to use S6 weights for the one 7TeV sample we use in 8TeV
     target = os.environ['megatarget']
     self.pucorrector = mcCorrectors.make_puCorrector('doublee')
Example #15
0
    def __init__(self, tree, outfile, **kwargs):
        self.channel = 'EMT'
        super(WHAnalyzeEMT, self).__init__(tree, outfile, EMuTauTree, **kwargs)

	def attr_getter(attribute):
            def f(row, weight):
                return (getattr(row,attribute), weight)
            return f

        def mass_scaler(fcn):
            def f(row, weight):
                val, w = fcn(row, weight)
                res = val
                if row.ePt < row.mPt:
                    res = frfits.default_scaler(val)
                return res, w
            return f

        def merge_functions(fcn_1, fcn_2):
            def f(row, weight):
                r1, w1 = fcn_1(row, weight)
                r2, w2 = fcn_2(row, weight)
                w = w1 if w1 == w2 else None
                return ((r1, r2), w)
            return f

        def sub_mass(row, weight):
            return (row.e_t_Mass, weight) if row.ePt < row.mPt else (row.m_t_Mass, weight)
        
        lead_iso = self.grid_search['']['leading_iso']
        sublead_iso = self.grid_search['']['subleading_iso']
        
        #def f_prob(row, weight):
        #    p_m = ( self.obj1_weight(row, lead_iso, sublead_iso) + \
        #            self.obj1_qcd_weight(row, lead_iso, sublead_iso))/2
        #    p_e = ( self.obj2_weight(row, lead_iso, sublead_iso) + \
        #            self.obj2_qcd_weight(row, lead_iso, sublead_iso))/2
        #    p_t  = frfits.tau_fr(row.tPt)
        #    return ((p_m + p_e*(1 - p_m) + p_t*(1 - p_m)*(1 - p_e)), weight)
        #
        #def log_prob(row, weight):
        #    prob, weight = f_prob(row, weight)
        #    return math.log(prob), weight
        
        #self.hfunc['faking_prob'] = f_prob
        #self.hfunc['log_prob']    = log_prob
        #self.hfunc["subMass#faking_prob"] = merge_functions( sub_mass, f_prob  )
        #self.hfunc["subMass#log_prob"   ] = merge_functions( sub_mass, log_prob)
        self.hfunc["subMass#LT" ] = merge_functions( sub_mass, attr_getter('LT'))
        self.hfunc["subMass#tPt"] = merge_functions( sub_mass, attr_getter('tPt'))

        #self.hfunc["subMass*#faking_prob"] = merge_functions( mass_scaler( sub_mass ), f_prob  )
        #self.hfunc["subMass*#log_prob"   ] = merge_functions( mass_scaler( sub_mass ), log_prob)
        self.hfunc["subMass*#LT" ] = merge_functions( mass_scaler( sub_mass ), attr_getter('LT'))
        self.hfunc["subMass*#tPt"] = merge_functions( mass_scaler( sub_mass ), attr_getter('tPt'))

        #maps the name of non-trivial histograms to a function to get the proper value, the function MUST have two args (evt and weight). Used in WHAnalyzerBase.fill_histos later
        self.hfunc['subMass']   = sub_mass 
        self.hfunc['tLeadDR']   = lambda row, weight: (row.m_t_DR,   weight)    if row.ePt < row.mPt else (row.e_t_DR,   weight) 
        self.hfunc['tSubDR']    = lambda row, weight: (row.e_t_DR,   weight)    if row.ePt < row.mPt else (row.m_t_DR,   weight) 
        self.hfunc["subPt"]     = lambda row, weight: (row.ePt, weight)         if row.ePt < row.mPt else (row.mPt, weight) 
        self.hfunc["leadPt"]    = lambda row, weight: (row.mPt, weight)         if row.ePt < row.mPt else (row.ePt, weight)         
        self.hfunc["subJetPt"]  = lambda row, weight: (row.eJetPt, weight)      if row.ePt < row.mPt else (row.mJetPt, weight) 
        self.hfunc["leadJetPt"] = lambda row, weight: (row.mJetPt, weight)      if row.ePt < row.mPt else (row.eJetPt, weight)         
        self.hfunc['pt_ratio' ] = lambda row, weight: (row.ePt/row.mPt, weight) if row.ePt < row.mPt else (row.mPt/row.ePt, weight)
        self.hfunc["e*_t_Mass"] = lambda row, weight: ( frfits.default_scaler( row.e_t_Mass), weight)
        self.hfunc["e*_m_Mass"] = lambda row, weight: ( frfits.default_scaler( row.e_m_Mass), weight)
        self.hfunc["subMass*" ] = mass_scaler( sub_mass ) 
        self.hfunc["_recoilDaught" ] = lambda row, weight: (math.sqrt(row.recoilDaught) , weight)
        self.hfunc["_recoilWithMet"] = lambda row, weight: (math.sqrt(row.recoilWithMet), weight)
        self.hfunc['SYNC'] = lambda row, weight: (row, None) #((row.run, row.lumi, row.evt, row.mPt, row.mEta, row.mPhi, row.ePt, row.eEta, row.ePhi, row.tPt, row.tEta, row.tPhi, weight), None )

        self.pucorrector = mcCorrectors.make_puCorrector('mueg')
Example #16
0
    def __init__(self, tree, outfile, **kwargs):
        self.channel = 'EET'	
        super(WHAnalyzeEET, self).__init__(tree, outfile, EETauTree, **kwargs)
        self.hfunc['subMTMass'] = lambda row, weight: (row.e2_t_Mass, weight) if row.e1MtToMET > row.e2MtToMET else (row.e1_t_Mass, weight) #maps the name of non-trivial histograms to a function to get the proper value, the function MUST have two args (evt and weight). Used in WHAnalyzerBase.fill_histos later
	def make_both_barrel(fcn, negval=(0.,0.)):
		def f_(row, weight):
			return fcn(row, weight) if row.e1AbsEta < 1.48 and row.e2AbsEta < 1.48 else negval
		return f_

	def make_both_endcap(fcn, negval=(0.,0.)):
		def f_(row, weight):
			return fcn(row, weight) if row.e1AbsEta >= 1.48 and row.e2AbsEta >= 1.48 else negval
		return f_

	def make_mixed(fcn, negval=(0.,0.)):
		def f_(row, weight):
			return fcn(row, weight) \
			    if (row.e1AbsEta >= 1.48 and row.e2AbsEta < 1.48) or \
			    (row.e1AbsEta < 1.48 and row.e2AbsEta >= 1.48) \
		            else negval
		return f_

	def attr_getter(attribute):
            def f(row, weight):
                return (getattr(row,attribute), weight)
            return f

	def double_attr_getter(attr1, attr2):
            def f(row, weight):
                return ( (getattr(row,attr1), getattr(row,attr1)), weight)
            return f

        def charge_selector(fcn, chargeIds, negval=(0.,0.)):
            def f(row, weight):
                ok = all( getattr(row, chid) for chid in chargeIds )
                return fcn(row, weight) if ok else negval
            return f

        def mass_scaler(fcn):
            def f(row, weight):
                val, w = fcn(row, weight)
                return frfits.default_scaler(val), w
            return f

        def merge_functions(fcn_1, fcn_2):
            def f(row, weight):
                r1, w1 = fcn_1(row, weight)
                r2, w2 = fcn_2(row, weight)
                w = w1 if w1 and w2 else None
                return ((r1, r2), w)
            return f

        lead_iso = self.grid_search['']['leading_iso']
        sublead_iso = self.grid_search['']['subleading_iso']
        
        @memo_last
        def f_par_prob(e1Pt, e1JetPt,
                       e2Pt, e2JetPt,
                       tPt):
            p_e1 = (( frfits.highpt_ee_fr[lead_iso](electronJetPt=max(e1JetPt, e1Pt), electronPt=e1Pt) +\
                      frfits.highpt_ee_qcd_fr[lead_iso](electronJetPt=max(e1JetPt, e1Pt), electronPt=e1Pt) )/2)
            p_e2 = (( frfits.lowpt_ee_fr[sublead_iso](electronJetPt=max(e2JetPt, e2Pt), electronPt=e2Pt) + \
                      frfits.lowpt_ee_qcd_fr[sublead_iso](electronJetPt=max(e2JetPt, e2Pt), electronPt=e2Pt))/2)
            p_t  = frfits.tau_fr(tPt)
            return (p_e1 + p_e2*(1 - p_e1) + p_t*(1 - p_e1)*(1 - p_e2))
            

        def f_prob(row, weight):
            val = f_par_prob(row.e1Pt, row.e1JetPt,
                             row.e2Pt, row.e2JetPt,
                             row.tPt)
            return val, weight

        def log_prob(row, weight):
            prob, weight = f_prob(row, weight)
            return ROOT.TMath.Log10(prob), weight
        
        self.hfunc['faking_prob'] = f_prob
        self.hfunc['log_prob']    = log_prob
        self.hfunc["e2_t_Mass#faking_prob"] = merge_functions( attr_getter('e2_t_Mass'), f_prob  )
        self.hfunc["e2_t_Mass#log_prob"   ] = merge_functions( attr_getter('e2_t_Mass'), log_prob)

        self.hfunc["e*2_t_Mass#faking_prob"] = merge_functions( mass_scaler( attr_getter('e2_t_Mass')), f_prob  )
        self.hfunc["e*2_t_Mass#log_prob"   ] = merge_functions( mass_scaler( attr_getter('e2_t_Mass')), log_prob)
        self.hfunc["e*2_t_Mass#LT" ] = merge_functions( mass_scaler( attr_getter('e2_t_Mass')), attr_getter('LT'))
        self.hfunc["e*2_t_Mass#tPt"] = merge_functions( mass_scaler( attr_getter('e2_t_Mass')), attr_getter('tPt'))

        self.hfunc['pt_ratio' ] = lambda row, weight: (row.e2Pt/row.e1Pt, weight)
        self.hfunc["e*1_e2_Mass"] = mass_scaler( attr_getter('e1_e2_Mass'))
        self.hfunc["e*1_t_Mass" ] = mass_scaler( attr_getter('e1_t_Mass')) 
        self.hfunc["e1_e*2_Mass"] = mass_scaler( attr_getter('e1_e2_Mass'))
        self.hfunc["e*2_t_Mass" ] = mass_scaler( attr_getter('e2_t_Mass')) 

        #self.hfunc['evt_info'] = lambda row, weight: (array.array("f", [row.e1Pt, row.e2Pt, row.tPt, row.LT, weight] ), None)
        self.pucorrector = mcCorrectors.make_puCorrector('doublee')
Example #17
0
    def __init__(self, tree, outfile, **kwargs):
        self.channel = 'EMT'
        super(WHAnalyzeEMT, self).__init__(tree, outfile, EMuTauTree, **kwargs)

        def attr_getter(attribute):
            def f(row, weight):
                return (getattr(row, attribute), weight)

            return f

        def mass_scaler(fcn):
            def f(row, weight):
                val, w = fcn(row, weight)
                res = val
                if row.ePt < row.mPt:
                    res = frfits.default_scaler(val)
                return res, w

            return f

        def merge_functions(fcn_1, fcn_2):
            def f(row, weight):
                r1, w1 = fcn_1(row, weight)
                r2, w2 = fcn_2(row, weight)
                w = w1 if w1 == w2 else None
                return ((r1, r2), w)

            return f

        def sub_mass(row, weight):
            return (row.e_t_Mass,
                    weight) if row.ePt < row.mPt else (row.m_t_Mass, weight)

        lead_iso = self.grid_search['']['leading_iso']
        sublead_iso = self.grid_search['']['subleading_iso']

        #def f_prob(row, weight):
        #    p_m = ( self.obj1_weight(row, lead_iso, sublead_iso) + \
        #            self.obj1_qcd_weight(row, lead_iso, sublead_iso))/2
        #    p_e = ( self.obj2_weight(row, lead_iso, sublead_iso) + \
        #            self.obj2_qcd_weight(row, lead_iso, sublead_iso))/2
        #    p_t  = frfits.tau_fr(row.tPt)
        #    return ((p_m + p_e*(1 - p_m) + p_t*(1 - p_m)*(1 - p_e)), weight)
        #
        #def log_prob(row, weight):
        #    prob, weight = f_prob(row, weight)
        #    return math.log(prob), weight

        #self.hfunc['faking_prob'] = f_prob
        #self.hfunc['log_prob']    = log_prob
        #self.hfunc["subMass#faking_prob"] = merge_functions( sub_mass, f_prob  )
        #self.hfunc["subMass#log_prob"   ] = merge_functions( sub_mass, log_prob)
        self.hfunc["subMass#LT"] = merge_functions(sub_mass, attr_getter('LT'))
        self.hfunc["subMass#tPt"] = merge_functions(sub_mass,
                                                    attr_getter('tPt'))

        #self.hfunc["subMass*#faking_prob"] = merge_functions( mass_scaler( sub_mass ), f_prob  )
        #self.hfunc["subMass*#log_prob"   ] = merge_functions( mass_scaler( sub_mass ), log_prob)
        self.hfunc["subMass*#LT"] = merge_functions(mass_scaler(sub_mass),
                                                    attr_getter('LT'))
        self.hfunc["subMass*#tPt"] = merge_functions(mass_scaler(sub_mass),
                                                     attr_getter('tPt'))

        #maps the name of non-trivial histograms to a function to get the proper value, the function MUST have two args (evt and weight). Used in WHAnalyzerBase.fill_histos later
        self.hfunc['subMass'] = sub_mass
        self.hfunc['tLeadDR'] = lambda row, weight: (
            row.m_t_DR, weight) if row.ePt < row.mPt else (row.e_t_DR, weight)
        self.hfunc['tSubDR'] = lambda row, weight: (
            row.e_t_DR, weight) if row.ePt < row.mPt else (row.m_t_DR, weight)
        self.hfunc["subPt"] = lambda row, weight: (
            row.ePt, weight) if row.ePt < row.mPt else (row.mPt, weight)
        self.hfunc["leadPt"] = lambda row, weight: (
            row.mPt, weight) if row.ePt < row.mPt else (row.ePt, weight)
        self.hfunc["subJetPt"] = lambda row, weight: (
            row.eJetPt, weight) if row.ePt < row.mPt else (row.mJetPt, weight)
        self.hfunc["leadJetPt"] = lambda row, weight: (
            row.mJetPt, weight) if row.ePt < row.mPt else (row.eJetPt, weight)
        self.hfunc['pt_ratio'] = lambda row, weight: (
            row.ePt / row.mPt, weight) if row.ePt < row.mPt else (row.mPt / row
                                                                  .ePt, weight)
        self.hfunc["e*_t_Mass"] = lambda row, weight: (frfits.default_scaler(
            row.e_t_Mass), weight)
        self.hfunc["e*_m_Mass"] = lambda row, weight: (frfits.default_scaler(
            row.e_m_Mass), weight)
        self.hfunc["subMass*"] = mass_scaler(sub_mass)
        self.hfunc["_recoilDaught"] = lambda row, weight: (math.sqrt(
            row.recoilDaught), weight)
        self.hfunc["_recoilWithMet"] = lambda row, weight: (math.sqrt(
            row.recoilWithMet), weight)
        self.hfunc['SYNC'] = lambda row, weight: (
            row, None
        )  #((row.run, row.lumi, row.evt, row.mPt, row.mEta, row.mPhi, row.ePt, row.eEta, row.ePhi, row.tPt, row.tEta, row.tPhi, weight), None )

        self.pucorrector = mcCorrectors.make_puCorrector('mueg')
Example #18
0
    def __init__(self, tree, outfile, **kwargs):
        self.channel = 'EET'
        super(WHAnalyzeEET, self).__init__(tree, outfile, EETauTree, **kwargs)
        self.hfunc['subMTMass'] = lambda row, weight: (
            row.e2_t_Mass, weight
        ) if row.e1MtToMET > row.e2MtToMET else (
            row.e1_t_Mass, weight
        )  #maps the name of non-trivial histograms to a function to get the proper value, the function MUST have two args (evt and weight). Used in WHAnalyzerBase.fill_histos later

        def make_both_barrel(fcn, negval=(0., 0.)):
            def f_(row, weight):
                return fcn(
                    row, weight
                ) if row.e1AbsEta < 1.48 and row.e2AbsEta < 1.48 else negval

            return f_

        def make_both_endcap(fcn, negval=(0., 0.)):
            def f_(row, weight):
                return fcn(
                    row, weight
                ) if row.e1AbsEta >= 1.48 and row.e2AbsEta >= 1.48 else negval

            return f_

        def make_mixed(fcn, negval=(0., 0.)):
            def f_(row, weight):
                return fcn(row, weight) \
                    if (row.e1AbsEta >= 1.48 and row.e2AbsEta < 1.48) or \
                    (row.e1AbsEta < 1.48 and row.e2AbsEta >= 1.48) \
                           else negval

            return f_

        def attr_getter(attribute):
            def f(row, weight):
                return (getattr(row, attribute), weight)

            return f

        def double_attr_getter(attr1, attr2):
            def f(row, weight):
                return ((getattr(row, attr1), getattr(row, attr1)), weight)

            return f

        def charge_selector(fcn, chargeIds, negval=(0., 0.)):
            def f(row, weight):
                ok = all(getattr(row, chid) for chid in chargeIds)
                return fcn(row, weight) if ok else negval

            return f

        def mass_scaler(fcn):
            def f(row, weight):
                val, w = fcn(row, weight)
                return frfits.default_scaler(val), w

            return f

        def merge_functions(fcn_1, fcn_2):
            def f(row, weight):
                r1, w1 = fcn_1(row, weight)
                r2, w2 = fcn_2(row, weight)
                w = w1 if w1 and w2 else None
                return ((r1, r2), w)

            return f

        lead_iso = self.grid_search['']['leading_iso']
        sublead_iso = self.grid_search['']['subleading_iso']

        @memo_last
        def f_par_prob(e1Pt, e1JetPt, e2Pt, e2JetPt, tPt):
            p_e1 = (( frfits.highpt_ee_fr[lead_iso](electronJetPt=max(e1JetPt, e1Pt), electronPt=e1Pt) +\
                      frfits.highpt_ee_qcd_fr[lead_iso](electronJetPt=max(e1JetPt, e1Pt), electronPt=e1Pt) )/2)
            p_e2 = (( frfits.lowpt_ee_fr[sublead_iso](electronJetPt=max(e2JetPt, e2Pt), electronPt=e2Pt) + \
                      frfits.lowpt_ee_qcd_fr[sublead_iso](electronJetPt=max(e2JetPt, e2Pt), electronPt=e2Pt))/2)
            p_t = frfits.tau_fr(tPt)
            return (p_e1 + p_e2 * (1 - p_e1) + p_t * (1 - p_e1) * (1 - p_e2))

        def f_prob(row, weight):
            val = f_par_prob(row.e1Pt, row.e1JetPt, row.e2Pt, row.e2JetPt,
                             row.tPt)
            return val, weight

        def log_prob(row, weight):
            prob, weight = f_prob(row, weight)
            return ROOT.TMath.Log10(prob), weight

        self.hfunc['faking_prob'] = f_prob
        self.hfunc['log_prob'] = log_prob
        self.hfunc["e2_t_Mass#faking_prob"] = merge_functions(
            attr_getter('e2_t_Mass'), f_prob)
        self.hfunc["e2_t_Mass#log_prob"] = merge_functions(
            attr_getter('e2_t_Mass'), log_prob)

        self.hfunc["e*2_t_Mass#faking_prob"] = merge_functions(
            mass_scaler(attr_getter('e2_t_Mass')), f_prob)
        self.hfunc["e*2_t_Mass#log_prob"] = merge_functions(
            mass_scaler(attr_getter('e2_t_Mass')), log_prob)
        self.hfunc["e*2_t_Mass#LT"] = merge_functions(
            mass_scaler(attr_getter('e2_t_Mass')), attr_getter('LT'))
        self.hfunc["e*2_t_Mass#tPt"] = merge_functions(
            mass_scaler(attr_getter('e2_t_Mass')), attr_getter('tPt'))

        self.hfunc['pt_ratio'] = lambda row, weight: (row.e2Pt / row.e1Pt,
                                                      weight)
        self.hfunc["e*1_e2_Mass"] = mass_scaler(attr_getter('e1_e2_Mass'))
        self.hfunc["e*1_t_Mass"] = mass_scaler(attr_getter('e1_t_Mass'))
        self.hfunc["e1_e*2_Mass"] = mass_scaler(attr_getter('e1_e2_Mass'))
        self.hfunc["e*2_t_Mass"] = mass_scaler(attr_getter('e2_t_Mass'))

        #self.hfunc['evt_info'] = lambda row, weight: (array.array("f", [row.e1Pt, row.e2Pt, row.tPt, row.LT, weight] ), None)
        self.pucorrector = mcCorrectors.make_puCorrector('doublee')
Example #19
0
 def __init__(self, tree, outfile, **kwargs):
     super(ZHAnalyzeEETT, self).__init__(tree, outfile, EETauTauTree, "TT", **kwargs)
     # Hack to use S6 weights for the one 7TeV sample we use in 8TeV
     target = os.environ["megatarget"]
     self.pucorrector = mcCorrectors.make_puCorrector("doublee")
Example #20
0
 def __init__(self, tree, outfile, **kwargs):
     super(ZZAnalyzerEEMM, self).__init__(tree, outfile, ElecElecMuMuTree, "eemm", **kwargs)
     target = os.environ["megatarget"]
     self.pucorrector = mcCorrectors.make_puCorrector("doublemu")
Example #21
0
 def __init__(self, tree, outfile, **kwargs):
     super(ZHAnalyzeMMTT, self).__init__(tree, outfile, MuMuTauTauTree, 'TT', **kwargs)
     # Hack to use S6 weights for the one 7TeV sample we use in 8TeV
     target = os.environ['megatarget']
     self.pucorrector = mcCorrectors.make_puCorrector('doublemu')
Example #22
0
 def __init__(self, tree, outfile, **kwargs):
     super(ZZAnalyzerMMMM, self).__init__(tree, outfile, MuMuMuMuTree, 'mmmm', **kwargs)
     target = os.environ['megatarget']
     self.pucorrector = mcCorrectors.make_puCorrector('doublemu')
Example #23
0
 def __init__(self, tree, outfile, **kwargs):
     super(ZHAnalyzeMMEM, self).__init__(tree, outfile, EMuMuMuTree, 'EM', **kwargs)
     # Hack to use S6 weights for the one 7TeV sample we use in 8TeV
     self.pucorrector = mcCorrectors.make_puCorrector('doublemu')
     target = os.environ['megatarget']
Example #24
0
 def __init__(self, tree, outfile, **kwargs):
     super(ZZAnalyzerEEEE, self).__init__(tree, outfile, ElecElecElecElecTree, 'eeee', **kwargs)
     target = os.environ['megatarget']
     self.pucorrector = mcCorrectors.make_puCorrector('doublee')