# Core Analyzer ttHCoreEventAna.mhtForBiasedDPhi = "mhtJetXjvec" ttHCoreEventAna.jetPt = mt2JPt ### jet pt 30: this will change ht and mht # switch off the SV and MC matching #ttHSVAna.do_mc_match = False ##------------------------------------------ ## CONTROL VARIABLES ##------------------------------------------ from CMGTools.TTHAnalysis.analyzers.ttHMT2Control import ttHMT2Control ttHMT2Control = cfg.Analyzer( ttHMT2Control, name = 'ttHMT2Control', jetPt = mt2JPt, ### this will change control variables (gamma_ and zll_) ) ##------------------------------------------ ## TOPOLOGICAL VARIABLES: minMT, MT2 ##------------------------------------------ from CMGTools.TTHAnalysis.analyzers.ttHTopoVarAnalyzer import ttHTopoVarAnalyzer ttHTopoJetAna = cfg.Analyzer( ttHTopoVarAnalyzer, name = 'ttHTopoVarAnalyzer', doOnlyDefault = True, jetPt = mt2JPt, ### this will change diffMetMht and deltaPhiMin ) from PhysicsTools.Heppy.analyzers.eventtopology.MT2Analyzer import MT2Analyzer
lepAna.miniIsolationVetoLeptons = None # use 'inclusive' to veto inclusive leptons and their footprint in all isolation cones ## will become miniIso perhaps? #lepAna.loose_muon_isoCut = lambda muon : muon.relIso03 < 10.5 #lepAna.loose_electron_isoCut = lambda electron : electron.relIso03 < 10.5 # switch off slow photon MC matching photonAna.do_mc_match = False ##------------------------------------------ ## TOPOLOGICAL VARIABLES: RAZOR ##------------------------------------------ from PhysicsTools.Heppy.analyzers.eventtopology.RazorAnalyzer import RazorAnalyzer monoXRazorAna = cfg.Analyzer( RazorAnalyzer, name = 'RazorAnalyzer', doOnlyDefault = False ) ##------------------------------------------ ## TOLOLOGIAL VARIABLES: MT2 ##------------------------------------------ from CMGTools.TTHAnalysis.analyzers.ttHTopoVarAnalyzer import ttHTopoVarAnalyzer ttHTopoJetAna = cfg.Analyzer( ttHTopoVarAnalyzer, name = 'ttHTopoVarAnalyzer', doOnlyDefault = True ) from PhysicsTools.Heppy.analyzers.eventtopology.MT2Analyzer import MT2Analyzer monoXMT2Ana = cfg.Analyzer( MT2Analyzer, name = 'MT2Analyzer', metCollection = "slimmedMETs",
##------------------------------------------ #comment forSherpa from PhysicsTools.Heppy.analyzers.gen.LHEAnalyzer import LHEAnalyzer LHEAna = LHEAnalyzer.defaultConfig from PhysicsTools.Heppy.analyzers.gen.TauDecayModeAnalyzer import TauDecayModeAnalyzer TauDecayAna = TauDecayModeAnalyzer.defaultConfig from CMGTools.TTHAnalysis.analyzers.ttHMT2Control import ttHMT2Control ttHMT2Control = cfg.Analyzer( ttHMT2Control, name='ttHMT2Control', jetPt=mt2JPt, ) ##------------------------------------------ ## NUMBER of ISR JETS ##------------------------------------------ from CMGTools.TTHAnalysis.analyzers.ttHIsrJetAnalyzer import ttHIsrJetAnalyzer ttHIsrJetAna = cfg.Analyzer( ttHIsrJetAnalyzer, name='ttHIsrJetAnalyzer', jetPt=mt2JPt, )
# if not MC, nothing to do if not self.cfg_comp.isMC: return True # do MC level analysis self.makeMCInfo(event) return True import PhysicsTools.HeppyCore.framework.config as cfg setattr( GeneratorAnalyzer, "defaultConfig", cfg.Analyzer( GeneratorAnalyzer, # BSM particles that can appear with status <= 2 and should be kept stableBSMParticleIds=[1000022], # Particles of which we want to save the pre-FSR momentum (a la status 3). # Note that for quarks and gluons the post-FSR doesn't make sense, # so those should always be in the list savePreFSRParticleIds=[1, 2, 3, 4, 5, 11, 12, 13, 14, 15, 16, 21], # Make also the list of all genParticles, for other analyzers to handle makeAllGenParticles=True, # Make also the splitted lists makeSplittedGenLists=True, allGenTaus=False, # Save LHE weights in LHEEventProduct makeLHEweights=True, # Print out debug information verbose=False, ))
for info in trigger_infos: if to.hasPathName(info.name, True): info.objects.append(to) info.objIds.add(abs(to.pdgId())) event.trigger_infos = trigger_infos if self.cfg_ana.requireTrigger: if not trigger_passed: return False self.counters.counter('Trigger').inc('HLT') return True def __str__(self): tmp = super(TriggerAnalyzer, self).__str__() triglist = str(self.triggerList) return '\n'.join([tmp, triglist]) setattr( TriggerAnalyzer, 'defaultConfig', cfg.Analyzer( class_object=TriggerAnalyzer, requireTrigger=True, usePrescaled=False, addTriggerObjects=True, # vetoTriggers=[], ))
cfg.Analyzer( class_object=JetAnalyzer, jetCol='slimmedJets', copyJetsByValue= False, #Whether or not to copy the input jets or to work with references (should be 'True' if JetAnalyzer is run more than once) genJetCol='slimmedGenJets', rho=('fixedGridRhoFastjetAll', '', ''), jetPt=25., jetEta=4.7, jetEtaCentral=2.4, jetLepDR=0.4, jetLepArbitration=( lambda jet, lepton: lepton ), # you can decide which to keep in case of overlaps; e.g. if the jet is b-tagged you might want to keep the jet cleanSelectedLeptons= True, #Whether to clean 'selectedLeptons' after disambiguation. Treat with care (= 'False') if running Jetanalyzer more than once minLepPt=10, lepSelCut=lambda lep: True, relaxJetId=False, doPuId=False, # Not commissioned in 7.0.X doQG=False, checkLeptonPFOverlap=True, recalibrateJets=False, applyL2L3Residual= 'Data', # if recalibrateJets, apply L2L3Residual to Data only recalibrationType="AK4PFchs", shiftJEC= 0, # set to +1 or -1 to apply +/-1 sigma shift to the nominal jet energies addJECShifts= False, # if true, add "corr", "corrJECUp", and "corrJECDown" for each jet (requires uncertainties to be available!) smearJets=True, shiftJER=0, # set to +1 or -1 to get +/-1 sigma shifts jecPath="", calculateSeparateCorrections=False, calculateType1METCorrection=False, type1METParams={ 'jetPtThreshold': 15., 'skipEMfractionThreshold': 0.9, 'skipMuons': True }, addJERShifts= 0, # add +/-1 sigma shifts to jets, intended to be used with shiftJER=0 cleanJetsFromFirstPhoton=False, cleanJetsFromTaus=False, cleanJetsFromIsoTracks=False, alwaysCleanPhotons=False, do_mc_match=True, cleanGenJetsFromPhoton=False, attachNeutrinos=True, genNuSelection=lambda nu: True, #FIXME: add here check for ispromptfinalstate collectionPostFix=""))
print 'Trigger object: pt=%.2f, eta=%.2f, phi=%.2f, collection=%s, type_ids=%s, filters=%s, paths=%s' % ( ob.pt(), ob.eta(), ob.phi(), ob.collection(), types, filters, paths) if self.collToMatch: for lep in tcoll: mstring = 'None' ob = getattr(lep, 'matchedTrgObj' + self.label) if ob: mstring = 'trigger obj with pt=%.2f, eta=%.2f, phi=%.2f, collection=%s' % ( ob.pt(), ob.eta(), ob.phi(), ob.collection()) print 'Lepton pt=%.2f, eta=%.2f, phi=%.2f matched to %s' % ( lep.pt(), lep.eta(), lep.phi(), mstring) return True setattr( TriggerMatchAnalyzer, "defaultConfig", cfg.Analyzer(TriggerMatchAnalyzer, name="TriggerMatchAnalyzerDefault", label='DefaultTrigObjSelection', processName='PAT', fallbackProcessName='RECO', unpackPathNames=True, trgObjSelectors=[], collToMatch=None, collMatchSelectors=[], collMatchDRCut=0.3, univoqueMatching=True, verbose=False))
lepAna.packedCandidates = 'packedPFCandidates' lepAna.miniIsolationPUCorr = 'rhoArea' lepAna.miniIsolationVetoLeptons = None # use 'inclusive' to veto inclusive leptons and their footprint in all isolation cones ## will become miniIso perhaps? #lepAna.loose_muon_isoCut = lambda muon : muon.relIso03 < 10.5 #lepAna.loose_electron_isoCut = lambda electron : electron.relIso03 < 10.5 # switch off slow photon MC matching photonAna.do_mc_match = False ##------------------------------------------ ## TOLOLOGIAL VARIABLES: RAZOR ##------------------------------------------ from PhysicsTools.Heppy.analyzers.eventtopology.RazorAnalyzer import RazorAnalyzer monoXRazorAna = cfg.Analyzer(RazorAnalyzer, name='RazorAnalyzer', doOnlyDefault=False) ##------------------------------------------ ## TOLOLOGIAL VARIABLES: MT2 ##------------------------------------------ from CMGTools.TTHAnalysis.analyzers.ttHTopoVarAnalyzer import ttHTopoVarAnalyzer ttHTopoJetAna = cfg.Analyzer(ttHTopoVarAnalyzer, name='ttHTopoVarAnalyzer', doOnlyDefault=True) from PhysicsTools.Heppy.analyzers.eventtopology.MT2Analyzer import MT2Analyzer monoXMT2Ana = cfg.Analyzer( MT2Analyzer, name='MT2Analyzer', metCollection="slimmedMETs",
########################################################## ## MONOX COMMON MODULES ARE DEFINED HERE ## ## skimming modules are configured to not cut anything ## ########################################################## import PhysicsTools.HeppyCore.framework.config as cfg from PhysicsTools.Heppy.analyzers.core.all import * from PhysicsTools.Heppy.analyzers.objects.all import * from PhysicsTools.Heppy.analyzers.gen.all import * import os from CMGTools.TTHAnalysis.analyzers.ttHhistoCounterAnalyzer import ttHhistoCounterAnalyzer histoCounter = cfg.Analyzer( ttHhistoCounterAnalyzer, name="ttHhistoCounterAnalyzer", ) PDFWeights = [] #PDFWeights = [ ("CT10",53), ("MSTW2008lo68cl",41), ("NNPDF21_100",101) ] # Find the initial events before the skim skimAnalyzer = cfg.Analyzer( SkimAnalyzerCount, name='skimAnalyzerCount', useLumiBlocks=False, ) # Pick individual events (normally not in the path) eventSelector = cfg.Analyzer( EventSelector,
cfg.Analyzer( class_object=JetAnalyzer, jetCol='slimmedJets', copyJetsByValue= False, #Whether or not to copy the input jets or to work with references (should be 'True' if JetAnalyzer is run more than once) genJetCol='slimmedGenJets', rho=('fixedGridRhoFastjetAll', '', ''), jetPt=25., jetEta=4.7, jetEtaCentral=2.4, jetLepDR=0.4, jetLepArbitration=( lambda jet, lepton: lepton ), # you can decide which to keep in case of overlaps; e.g. if the jet is b-tagged you might want to keep the jet cleanSelectedLeptons= True, #Whether to clean 'selectedLeptons' after disambiguation. Treat with care (= 'False') if running Jetanalyzer more than once minLepPt=10, lepSelCut=lambda lep: True, relaxJetId=False, doPuId=False, # Not commissioned in 7.0.X doQG=False, recalibrateJets=False, recalibrationType="AK4PFchs", shiftJEC=0, # set to +1 or -1 to get +/-1 sigma shifts smearJets=True, shiftJER=0, # set to +1 or -1 to get +/-1 sigma shifts cleanJetsFromFirstPhoton=False, cleanJetsFromTaus=False, cleanJetsFromIsoTracks=False, jecPath="", cleanGenJetsFromPhoton=False, collectionPostFix=""))
selectedComponents = [comp] comp.files = [comp.files[0]] #10 bug on semilep comp.splitFactor = 1 comp.fineSplitFactor = 1 ############################################################################ # Analyzers ############################################################################ from PhysicsTools.Heppy.analyzers.core.JSONAnalyzer import JSONAnalyzer from PhysicsTools.Heppy.analyzers.core.SkimAnalyzerCount import SkimAnalyzerCount from CMGTools.cpTop.proto.analyzers.TriggerAnalyzer import TriggerAnalyzer from PhysicsTools.Heppy.analyzers.objects.VertexAnalyzer import VertexAnalyzer from CMGTools.cpTop.heppy.analyzers.Debugger import Debugger json = cfg.Analyzer( JSONAnalyzer, name='JSONAnalyzer', ) skim = cfg.Analyzer(SkimAnalyzerCount, name='SkimAnalyzerCount') trigger = cfg.Analyzer(TriggerAnalyzer, name='TriggerAnalyzer', addTriggerObjects=True, requireTrigger=False, usePrescaled=False) vertex = cfg.Analyzer(VertexAnalyzer, name='VertexAnalyzer', fixedWeight=1, keepFailingEvents=False, verbose=False)
dyJetsFakeAna.channel = 'tt' ### Define tau-tau specific modules tauTauAna = cfg.Analyzer( class_object = TauTauAnalyzer , name = 'TauTauAnalyzer' , pt1 = 45 , eta1 = 2.1 , iso1 = 1. , looseiso1 = 10. , pt2 = 45 , eta2 = 2.1 , iso2 = 1. , looseiso2 = 10. , # isolation = 'byIsolationMVA3newDMwLTraw', isolation = 'byCombinedIsolationDeltaBetaCorrRaw3Hits', # RIC: 9 March 2015 m_min = 10 , m_max = 99999 , dR_min = 0.5 , # triggerMap = pathsAndFilters , jetPt = 30. , jetEta = 4.7 , relaxJetId = False , verbose = False , ) tauDecayModeWeighter = cfg.Analyzer( TauDecayModeWeighter , 'TauDecayModeWeighter' , legs = ['leg1', 'leg2'],
tauAna.loose_etaMax = 2.3 tauAna.loose_decayModeID = "decayModeFindingNewDMs" tauAna.loose_tauID = "decayModeFindingNewDMs" tauAna.loose_vetoLeptons = False # no cleaning with leptons in production # jetAna.cleanJetsFromTaus = True # jetAnaScaleUp.cleanJetsFromTaus = True # jetAnaScaleDown.cleanJetsFromTaus = True #-------- ADDITIONAL ANALYZERS ----------- ## Event Analyzer for susy multi-lepton (at the moment, it's the TTH one) from CMGTools.TTHAnalysis.analyzers.ttHLepEventAnalyzer import ttHLepEventAnalyzer ttHEventAna = cfg.Analyzer( ttHLepEventAnalyzer, name="ttHLepEventAnalyzer", minJets25=0, ) ## JetTau analyzer, to be called (for the moment) once bjetsMedium are produced from CMGTools.TTHAnalysis.analyzers.ttHJetTauAnalyzer import ttHJetTauAnalyzer ttHJetTauAna = cfg.Analyzer( ttHJetTauAnalyzer, name="ttHJetTauAnalyzer", ) ## Insert the SV analyzer in the sequence susyCoreSequence.insert(susyCoreSequence.index(ttHCoreEventAna), ttHSVAna) ttHSVAna.preselection = lambda ivf: abs(ivf.dxy.value() ) < 2 and ivf.cosTheta > 0.98
ttHCoreEventAna.mhtForBiasedDPhi = "mhtJetXjvec" ttHCoreEventAna.jetPt = mt2JPt ### jet pt 30: this will change ht and mht # switch off the SV and MC matching #ttHSVAna.do_mc_match = False ##------------------------------------------ ## Z skim ##------------------------------------------ from CMGTools.TTHAnalysis.analyzers.ttHmllSkimmer import ttHmllSkimmer # Tree Producer ttHZskim = cfg.Analyzer(ttHmllSkimmer, name='ttHmllSkimmer', lepId=[13], maxLeps=3, massMin=60, massMax=120, doZGen=False, doZReco=True) ##------------------------------------------ ## PRODUCER ##------------------------------------------ ## TRIGGERS DEFINITION from CMGTools.RootTools.samples.triggers_13TeV_Spring16 import triggers_photon75, triggers_photon90, triggers_photon120, triggers_photon75ps from CMGTools.RootTools.samples.triggers_13TeV_Spring16 import triggers_photon90ps, triggers_photon120ps, triggers_photon155, triggers_photon165_HE10, triggers_photon175 from CMGTools.RootTools.samples.triggers_13TeV_Spring16 import triggers_doubleele33, triggers_mumu_noniso triggerFlagsAna.triggerBits = { # signal triggers
# ggH135_0 = c.makeMCComponentFromEOS('ggH135_rawaod', 'ggH135_rawaod', '/store/group/phys_higgs/cmshtt/steggema/HPSatHLTv5/GluGluHToTauTau_M125_13TeV_powheg_pythia8/TauHPSatHLTFine/161117_103941/0000/') # ggH135_1 = c.makeMCComponentFromEOS('ggH135_rawaod', 'ggH135_rawaod', '/store/group/phys_higgs/cmshtt/steggema/HPSatHLTv5/GluGluHToTauTau_M125_13TeV_powheg_pythia8/TauHPSatHLTFine/161117_103941/0001/') ggH135_0 = c.makeMCComponentFromEOS( 'ggH135_rawaod', 'ggH135_rawaod', '/store/group/phys_tau/HLT2016/HPSatHLT/GluGluHToTauTau_M125_13TeV_powheg_pythia8/HPSatHLT/161206_134434/0000/' ) ggH135_1 = c.makeMCComponentFromEOS( 'ggH135_rawaod_2', 'ggH135_rawaod', '/store/group/phys_tau/HLT2016/HPSatHLT/GluGluHToTauTau_M125_13TeV_powheg_pythia8/HPSatHLT/161206_134434/0001/' ) selectedComponents = [ggH135_0, ggH135_1] tauHLTAna = cfg.Analyzer( TauHLTAnalyzer, name='TauHLTAnalyzer', ) tauHLTTree = cfg.Analyzer(HLTTauTreeProducer, name='HLTTauTreeProducer') sequence = cfg.Sequence([tauHLTAna, tauHLTTree]) if not production: selectedComponents = selectedComponents[:1] for comp in selectedComponents: comp.splitFactor = 1 comp.fineSplitFactor = 1 # comp.files = comp.files[:1] else: for comp in selectedComponents: comp.splitFactor = 200
import PhysicsTools.HeppyCore.framework.config as cfg from PhysicsTools.Heppy.analyzers.core.AutoFillTreeProducer import AutoFillTreeProducer from PhysicsTools.Heppy.analyzers.core.TriggerMatchAnalyzer import TriggerMatchAnalyzer from CMGTools.HToZZ4L.analyzers.TwoLeptonAnalyzer import TwoLeptonAnalyzer from CMGTools.HToZZ4L.analyzers.ThreeLeptonAnalyzer import ThreeLeptonAnalyzer from CMGTools.HToZZ4L.analyzers.FourLeptonEventSkimmer import FourLeptonEventSkimmer from CMGTools.HToZZ4L.analyzers.ZTagAndProbeAnalyzer import ZTagAndProbeAnalyzer from CMGTools.HToZZ4L.analyzers.fourLeptonTree import * twoLeptonAnalyzer = cfg.Analyzer(TwoLeptonAnalyzer, name="twoLeptonAnalyzer", mode="Z") twoLeptonEventSkimmer = cfg.Analyzer(FourLeptonEventSkimmer, name="twoLeptonEventSkimmer", required=['bestIsoZ']) twoLeptonTreeProducer = cfg.Analyzer( AutoFillTreeProducer, name='twoLeptonTreeProducer', vectorTree=True, saveTLorentzVectors= False, # can set to True to get also the TLorentzVectors, but trees will be bigger globalVariables=hzz_globalVariables, # rho, nvertices, njets globalObjects=hzz_globalObjects, # met collections={ #"bestIsoZ" : NTupleCollection("z", ZType, 1, help="Dilepton Candidates"), "bestIsoZ": NTupleCollection("z", ZTypeLite, 1, help="Dilepton Candidates"), #"selectedLeptons" : NTupleCollection("Lep", leptonTypeHZZ, 10, help="Leptons after the preselection"),
syncntuple = False pick_events = False if reapplyJEC: if cmssw: jetAna.jetCol = 'patJetsReapplyJEC' dyJetsFakeAna.jetCol = 'patJetsReapplyJEC' else: jetAna.recalibrateJets = True dyJetsFakeAna.channel = 'tt' # Define tau-tau specific modules tauP4Scaler = cfg.Analyzer( class_object=TauP4Scaler, name='TauP4Scaler', ) tauTauAna = cfg.Analyzer( class_object=TauTauAnalyzer, name='TauTauAnalyzer', pt1=40., eta1=2.1, iso1=1., looseiso1=999999999., pt2=40., eta2=2.1, iso2=1., looseiso2=999999999., isolation='byIsolationMVArun2v1DBoldDMwLTraw', m_min=10,
if isolation == "ptRel": # delay isolation cut for leptons of pt > 10, for which we do pTrel recovery lepAna.loose_muon_isoCut = lambda muon : muon.relIso03 < 0.5 or muon.pt() > 10 lepAna.loose_electron_isoCut = lambda elec : elec.relIso03 < 0.5 or elec.pt() > 10 # in the cleaning, keep the jet if the lepton fails relIso or ptRel jetAna.jetLepArbitration = lambda jet,lepton : ( lepton if (lepton.relIso03 < 0.4 or ptRelv1(lepton.p4(),jet.p4()) > 5) else jet ) ttHCoreEventAna.leptonMVAKindTTH = "SusyWithBoost" ttHCoreEventAna.leptonMVAKindSusy = "SusyWithBoost" ttHCoreEventAna.leptonMVAPathTTH = "CMGTools/TTHAnalysis/macros/leptons/trainingPHYS14leptonMVA_PHYS14eleMVA_MiniIso_ttH/weights/%s_BDTG.weights.xml" ttHCoreEventAna.leptonMVAPathSusy = "CMGTools/TTHAnalysis/macros/leptons/trainingPHYS14leptonMVA_PHYS14eleMVA_MiniIso_SusyT1/weights/%s_BDTG.weights.xml" # insert a second skimmer after the jet cleaning ttHLepSkim2 = cfg.Analyzer( ttHLepSkimmer, name='ttHLepSkimmer2', minLeptons = 2, maxLeptons = 999, ) susyCoreSequence.insert(susyCoreSequence.index(jetAna)+1, ttHLepSkim2) elif isolation == "miniIso": lepAna.loose_muon_isoCut = lambda muon : muon.miniRelIso < 0.4 lepAna.loose_electron_isoCut = lambda elec : elec.miniRelIso < 0.4 elif isolation == None: lepAna.loose_muon_isoCut = lambda muon : True lepAna.loose_electron_isoCut = lambda elec : True elif isolation == "absIso03": lepAna.loose_muon_absIso = 10.0 lepAna.loose_electron_relIso = 99.0 lepAna.loose_muon_relIso = 99.0 lepAna.loose_electron_absIso = 10.0 else:
treeProducer = cfg.Analyzer( AutoFillVectorTreeProducer, name='treeProducer', vectorTree=False, saveTLorentzVectors= False, # can set to True to get also the TLorentzVectors, but trees will be bigger globalVariables=[], # rho, nvertices, njets globalObjects=[], collection=("selectedLeptons", NTupleCollection("lep", leptonTypeSusy, 10, help="leptons after the preselection")), vector_collections=[ ("pfCands_neutral", NTupleCollection("pfCand_neutral", pfParticleType, 100, help="neutral pf candidates associated")), ("pfCands_charged", NTupleCollection("pfCand_charged", pfParticleType, 100, help="charged pf candidates associated")), ("pfCands_photon", NTupleCollection("pfCand_photon", pfParticleType, 100, help="photon pf candidates associated")), ("pfCands_electron", NTupleCollection("pfCand_electron", pfParticleType, 100, help="electron pf candidates associated")), ("pfCands_muon", NTupleCollection("pfCand_muon", pfParticleType, 100, help="muon pf candidates associated")), ("ivf", NTupleCollection("SV", svType, 20, help="SVs from IVF")), ], defaultFloatType='F', )
dyJetsFakeAna.channel = 'tt' ### Define tau-tau specific modules tauTauAna = cfg.Analyzer( class_object=TauTauAnalyzer, name='TauTauAnalyzer', pt1=40., eta1=2.1, iso1=1., looseiso1=999999999., pt2=40., eta2=2.1, iso2=1., looseiso2=999999999., isolation='byIsolationMVArun2v1DBoldDMwLTraw', m_min=10, m_max=99999, dR_min=0.5, jetPt=30., jetEta=4.7, relaxJetId=False, verbose=False, from_single_objects=False, ) if not cmssw: tauTauAna.from_single_objects = True l1Ana = cfg.Analyzer(class_object=L1TriggerAnalyzer,
comp.files = comp.files[0:1] # comp.files = ['/tmp/manzoni/001784E5-D649-734B-A5FF-E151DA54CC02.root'] # one file from TTJets_ext on lxplus700 # comp.fineSplitFactor = 10 # fine splitting, multicore samples = [comp] selectedComponents = samples ################################################### ### ANALYZERS ### ################################################### toSelect = [] eventSelector = cfg.Analyzer( EventSelector, name='EventSelector', toSelect=toSelect, ) jsonAna = cfg.Analyzer( JSONAnalyzer, name='JSONAnalyzer', ) skimAna = cfg.Analyzer(SkimAnalyzerCount, name='SkimAnalyzerCount') triggerAna = cfg.Analyzer( TriggerAnalyzer, name='TriggerAnalyzer', addTriggerObjects=True, requireTrigger=True,
print 'gamma candidate had iso: ', event.selectedPhotons[ 0].chargedHadronIso() print 'gamma candidate neu iso: ', event.selectedPhotons[ 0].neutralHadronIso() print 'gamma candidate gamma iso: ', event.selectedPhotons[ 0].photonIso() print 'gamma idCutBased', event.selectedPhotons[0].idCutBased def process(self, event): self.readCollections(event.input) #call the photons functions self.makePhotons(event) # self.printInfo(event) ## ===> do matching if not self.cfg_comp.isMC: return True self.matchPhotons(event) return True setattr( PhotonAnalyzer, "defaultConfig", cfg.Analyzer(class_object=PhotonAnalyzer, photons='slimmedPhotons', ptMin=20, etaMax=2.5, gammaID="PhotonCutBasedIDLoose"))
sample.triggers += [ 'HLT_Ele135_CaloIdVT_GsfTrkIdT_v%d' % i for i in range(1, 15) ] #electron trigger sample.splitFactor = splitFactor(sample, 5e4) selectedComponents = samples ################################################### ### ANALYSERS ### ################################################### eventSelector = cfg.Analyzer(EventSelector, name='EventSelector', toSelect=[ 109, 541, 759, 836, 206, ]) lheWeightAna = cfg.Analyzer(LHEWeightAnalyzer, name="LHEWeightAnalyzer", useLumiInfo=False) jsonAna = cfg.Analyzer( JSONAnalyzer, name='JSONAnalyzer', ) skimAna = cfg.Analyzer(SkimAnalyzerCount, name='SkimAnalyzerCount')
from CMGTools.H2TauTau.proto.analyzers.LeptonWeighter import LeptonWeighter from CMGTools.RootTools.utils.splitFactor import splitFactor from CMGTools.H2TauTau.proto.samples.fall15.htt_common import backgrounds_mu, sm_signals, mssm_signals, data_single_muon, sync_list, WJetsHT from CMGTools.H2TauTau.proto.samples.fall15.triggers_tauMu import mc_triggers, mc_triggerfilters from CMGTools.H2TauTau.proto.samples.fall15.triggers_tauMu import data_triggers, data_triggerfilters from CMGTools.H2TauTau.htt_ntuple_base_cff import genAna, vertexAna, puFileData, puFileMC, eventSelector, jsonAna, triggerAna, pileUpAna, httGenAna, NJetsAna # Get all heppy options; set via "-o production" or "-o production=True" # production = True run on batch, production = False (or unset) run locally production = True skimAna = cfg.Analyzer(SkimAnalyzerCount, name='SkimAnalyzerCount') tauJetMuAna = cfg.Analyzer( TauJetMuAnalyzer, name='TauJetMuAnalyzer', pt1=20., eta1=2.1, iso1=0.1, looseiso1=0.1, pt2= 13., # looser cut because of subsequente jet re-calibration - 7 GeV difference to 20 should be fine eta2=2.3, iso2=None, looseiso2=None, m_min=10, m_max=99999,
############### # Analyzers ############### from CMGTools.H2TauTau.heppy.analyzers.Selector import Selector def select_tau(tau): return tau.pt() >= 23 and \ abs(tau.eta()) <= 2.3 and \ abs(tau.leadChargedHadrCand().dz()) < 0.2 and \ tau.tauID('decayModeFinding') > 0.5 and \ abs(tau.charge()) == 1. and \ tau.tauID('byVVLooseIsolationMVArun2017v2DBoldDMwLT2017') sel_taus = cfg.Analyzer( Selector, 'sel_taus', output = 'sel_taus', src = 'taus', filter_func = select_tau ) from CMGTools.H2TauTau.heppy.analyzers.EventFilter import EventFilter one_tau = cfg.Analyzer( EventFilter, 'one_tau', src = 'sel_taus', filter_func = lambda x : len(x)>0 ) def select_muon(muon): return muon.pt() >= 21 and \ abs(muon.eta()) <= 2.1 and \
from PhysicsTools.Heppy.analyzers.gen.all import * from CMGTools.VVResonances.analyzers.LeptonIDOverloader import * from CMGTools.VVResonances.analyzers.HbbTagComputer import * from CMGTools.VVResonances.analyzers.VVBuilder import * from CMGTools.VVResonances.analyzers.TTBuilder import * from CMGTools.VVResonances.analyzers.VTauBuilder import * from CMGTools.VVResonances.analyzers.Skimmer import * from CMGTools.VVResonances.analyzers.TopMergingAnalyzer import * from CMGTools.VVResonances.analyzers.ObjectWeightAnalyzer import * import os # Pick individual events (normally not in the path) eventSelector = cfg.Analyzer( EventSelector, name="EventSelector", toSelect=[] # here put the event numbers (actual event numbers from CMSSW) ) skimAnalyzer = cfg.Analyzer( SkimAnalyzerCount, name='skimAnalyzerCount', useLumiBlocks=False, ) # Apply json file (if the dataset has one) jsonAna = cfg.Analyzer( JSONAnalyzer, name="JSONAnalyzer", )
from PhysicsTools.Heppy.analyzers.core.AutoFillTreeProducer import * from CMGTools.VVResonances.analyzers.vvTypes import * from CMGTools.VVResonances.analyzers.Skimmer import * import PhysicsTools.HeppyCore.framework.config as cfg vvSkimmer = cfg.Analyzer(Skimmer, name='vvSkimmer', required=['LNuJJ', 'JJ', 'LLJJ', 'JJNuNu', 'TopCR']) vTauSkimmer = cfg.Analyzer( Skimmer, name='vTauSkimmer', required=['TauTau', 'TauJet', 'TauTauLoose', 'TauJetLoose']) leptonSkimmer = cfg.Analyzer(Skimmer, name='leptonSkimmer', required=['inclusiveLeptons']) vvTreeProducer = cfg.Analyzer( AutoFillTreeProducer, name='vvTreeProducer', vectorTree=True, saveTLorentzVectors= False, # can set to True to get also the TLorentzVectors, but trees will be bigger defaultFloatType='F', # use Float_t for floating point # PDFWeights = PDFWeights, globalVariables=[ NTupleVariable("rho", lambda ev: ev.rho, float, help="kt6PFJets rho"), NTupleVariable("rhoCN", lambda ev: ev.rhoCN, float,
########################################################## import PhysicsTools.HeppyCore.framework.config as cfg from PhysicsTools.Heppy.analyzers.core.all import * from PhysicsTools.Heppy.analyzers.objects.all import * from PhysicsTools.Heppy.analyzers.gen.all import * import os from CMGTools.TTHAnalysis.analyzers.ttHhistoCounterAnalyzer import ttHhistoCounterAnalyzer susyCounter = cfg.Analyzer( ttHhistoCounterAnalyzer, name="ttHhistoCounterAnalyzer", SMS_max_mass=3000, # maximum mass allowed in the scan SMS_mass_1='genSusyMScan1', # first scanned mass SMS_mass_2='genSusyMScan2', # second scanned mass SMS_varying_masses= [], # other mass variables that are expected to change in the tree (e.g., in T1tttt it should be set to ['genSusyMGluino','genSusyMNeutralino']) SMS_regexp_evtGenMass='genSusyM.+', bypass_trackMass_check= True # bypass check that non-scanned masses are the same in all events ) PDFWeights = [] #PDFWeights = [ ("CT10",53), ("MSTW2008lo68cl",41), ("NNPDF21_100",101) ] # Find the initial events before the skim skimAnalyzer = cfg.Analyzer( SkimAnalyzerCount, name='skimAnalyzerCount', useLumiBlocks=False, )
cfg.Analyzer( class_object=METAnalyzer, # metCollection = "slimmedMETsMuEGClean", # noPUMetCollection = "slimmedMETsMuEGClean", metCollection="slimmedMETs", noPUMetCollection="slimmedMETs", copyMETsByValue=False, recalibrate=True, applyJetSmearing=True, jetAnalyzerPostFix="", old74XMiniAODs= False, # need to set to True to get proper Raw MET on plain 74X MC produced with CMSSW <= 7_4_12 makeShiftedMETs=True, doTkMet=False, doPuppiMet=False, ### more on tkMET includeTkMetCHS=False, includeTkMetPVLoose=False, includeTkMetPVTight=False, includeTkMetNoPV=False, includeTkMetPVUsedInFit=False, ### doMetNoPU=False, # Not existing in MiniAOD at the moment doMetNoMu=False, doMetNoEle=False, doMetNoPhoton=False, storePuppiExtra=False, candidates='packedPFCandidates', candidatesTypes='std::vector<pat::PackedCandidate>', dzMax=0.1, collectionPostFix="", ))
# http://cmslxr.fnal.gov/lxr/source/PhysicsTools/PatAlgos/python/producersLayer1/tauProducer_cfi.py setattr(TauAnalyzer,"defaultConfig",cfg.Analyzer( class_object = TauAnalyzer, # inclusive very loose hadronic tau selection inclusive_ptMin = 18, inclusive_etaMax = 9999, inclusive_dxyMax = 1000., inclusive_dzMax = 0.4, inclusive_vetoLeptons = False, inclusive_leptonVetoDR = 0.4, inclusive_decayModeID = "decayModeFindingNewDMs", # ignored if not set or "" inclusive_tauID = "decayModeFindingNewDMs", inclusive_vetoLeptonsPOG = False, # If True, the following two IDs are required inclusive_tauAntiMuonID = "", inclusive_tauAntiElectronID = "", # loose hadronic tau selection loose_ptMin = 18, loose_etaMax = 9999, loose_dxyMax = 1000., loose_dzMax = 0.2, loose_vetoLeptons = True, loose_leptonVetoDR = 0.4, loose_decayModeID = "decayModeFindingNewDMs", # ignored if not set or "" loose_tauID = "byLooseCombinedIsolationDeltaBetaCorr3Hits", loose_vetoLeptonsPOG = False, # If True, the following two IDs are required loose_tauAntiMuonID = "againstMuonLoose3", loose_tauAntiElectronID = "againstElectronLooseMVA5" ) )