def prescaleBB(BBfraction=1): from Configurables import LoKi__ODINFilter as ODINFilter from Configurables import DeterministicPrescaler, GaudiSequencer odinFiltNonBB = ODINFilter('ODINBXTypeFilterNonBB', Code='ODIN_BXTYP < 3') odinFiltBB = ODINFilter('ODINBXTypeFilterBB', Code='ODIN_BXTYP == 3') prescBB = DeterministicPrescaler("DetPrescBB", AcceptFraction=BBfraction) prescBB_seq = GaudiSequencer("PrescBB") prescBB_seq.Members = [odinFiltBB, prescBB] collTypeSeq = GaudiSequencer('CollTypeSelector', ModeOR=True) collTypeSeq.Members = [odinFiltNonBB, prescBB_seq] GaudiSequencer('HltFilterSeq').Members.append(collTypeSeq)
def muonTracksFORmuonAlignment(): # this still needs to be worked out from Configurables import Escher Escher().RecoSequence = [ "Hlt", "Decoding", "AlignTr", "Vertex", "RICH", "CALO", "MUON", "PROTO" ] # ???? Escher().MoniSequence = ["Tr", "OT"] # if the Escher hlt filter is not set, set it here if not hasattr(Escher(), "HltFilterCode") or not Escher().HltFilterCode: Escher( ).HltFilterCode = "HLT_PASS_RE( 'Hlt1DiMuonHighMass*Decision' )" # Hlt2-->Hlt1 requirement # revive only particles used for trigger print 'Hlt lines to be used: ' print ReviveHltTracks(['Hlt1DiMuonHighMassDecision']) # Now create the J/psi candidates from Configurables import CombineParticles, FilterDesktop from CommonParticles.StdAllLooseMuons import StdAllLooseMuons # requires IsMuon==1 from CommonParticles.StdLooseJpsi2MuMu import StdLooseJpsi2MuMu # requires (ADAMASS('J/psi')<100.*MeV)&(ADOCACHI2CUT(30,'')) && "(VFASPF(VCHI2) < 25.)" StdLooseJpsi2MuMu.DaughtersCuts = {"mu-": "( P> 6000*MeV)"} # momentum cut ## tighten the mass window for candidates used in alignment AlignJpsi2MuMu = FilterDesktop( "AlignJpsi2MuMu", Inputs=["Phys/StdLooseJpsi2MuMu"], Code="(ADMASS('J/psi(1S)') < 35.*MeV) & (VFASPF(VCHI2) < 10.)" ) # tighter requirements from Configurables import ChargedProtoParticleMaker, ChargedProtoParticleAddMuonInfo #####, ChargedProtoCombineDLLsAlg from Configurables import TrackParticleMonitor, GaudiSequencer recoJpsiSeq = GaudiSequencer("RecoJpsiSeq") recoJpsiSeq.Members = [ ChargedProtoParticleMaker('ChargedProtoPMaker'), ChargedProtoParticleAddMuonInfo('ChargedProtoPAddMuon'), ###ChargedProtoCombineDLLsAlg('ChargedProtoPCombDLLs'), StdAllLooseMuons, StdLooseJpsi2MuMu, TrackParticleMonitor( 'StdLooseJpsi2MuMuMonitor', InputLocation='/Event/Phys/StdLooseJpsi2MuMu/Particles', MinMass=3000, MaxMass=3190), AlignJpsi2MuMu, TrackParticleMonitor( 'AlignJpsi2MuMuMonitor', InputLocation='/Event/Phys/AlignJpsi2MuMu/Particles', MinMass=3000, MaxMass=3190), ] sel = ParticleSelection(Name='MufromJpsiMuMu', Location='/Event/Phys/AlignJpsi2MuMu/Particles', Algorithm=recoJpsiSeq) return sel
def prescaleBB2(BBfraction=1): from Configurables import LoKi__Hybrid__HltFactory as HltFactory HltFactory().Modules.append("LoKiHlt.functions") from Configurables import LoKi__HDRFilter as HLTFilter from Configurables import DeterministicPrescaler, GaudiSequencer odinFiltNonBB = HLTFilter('Hlt1FilterNonBB', Code="HLT_PASS_SUBSTR('Hlt1BeamGas')") odinFiltBB = HLTFilter( 'Hlt1FilterBB', Code= "HLT_PASS_RE('Hlt1(?!ODIN)(?!L0)(?!Lumi)(?!Tell1)(?!MB)(?!NZS)(?!Velo)(?!BeamGas)(?!Incident).*Decision')" ) prescBB = DeterministicPrescaler("DetPrescBB", AcceptFraction=BBfraction) prescBB_seq = GaudiSequencer("PrescBB") prescBB_seq.Members = [odinFiltBB, prescBB] collTypeSeq = GaudiSequencer('CollTypeSelector', ModeOR=True) collTypeSeq.Members = [odinFiltNonBB, prescBB_seq] GaudiSequencer('HltFilterSeq').Members.append(collTypeSeq)
def ParticleRefitterSeq(inputs=[], rootInTES="/Event", scale=True): from Configurables import GaudiSequencer from Configurables import TrackScaleState as SCALER from Configurables import ParticleRefitter scaler = SCALER("Scaler", RootInTES=rootInTES) seq = GaudiSequencer("ParticleRefitterSeq") if scale: seq.Members = [scaler] else: seq.Members = [] refitter = ParticleRefitter() if isinstance(inputs, basestring): refitter.Inputs = [inputs] else: refitter.Inputs = inputs refitter.RootInTES = rootInTES print "ParticleRefitterSeq is applied to the following inputs:" for i in refitter.Inputs: print " - on of the inputs is ", i seq.Members += [refitter] return seq
def ParticleRefitterSeq(inputs = [],rootInTES = "/Event",scale=True): from Configurables import GaudiSequencer from Configurables import TrackScaleState as SCALER from Configurables import ParticleRefitter scaler = SCALER("Scaler", RootInTES = rootInTES) seq = GaudiSequencer("ParticleRefitterSeq") if scale: seq.Members = [ scaler ] else: seq.Members = [ ] refitter = ParticleRefitter() if isinstance(inputs, basestring) : refitter.Inputs = [inputs] else : refitter.Inputs = inputs refitter.RootInTES = rootInTES print "ParticleRefitterSeq is applied to the following inputs:" for i in refitter.Inputs: print " - on of the inputs is ", i seq.Members += [refitter] return seq
def redoMCLinks(self,init): """ Redo MC links. """ if ( self.getProp("Simulation") ): redo = self.getProp("RedoMCLinks") if ( redo ): from Configurables import (GaudiSequencer,TESCheck,EventNodeKiller,TrackAssociator) mcKillSeq = GaudiSequencer("KillMCLinks") # The sequence killing the node of it exists tescheck = TESCheck("DaVinciEvtCheck") # Check for presence of node ... tescheck.Inputs = ["Link/Rec/Track/Best"] # tescheck.Stop = False # But don't stop tescheck.OutputLevel = 5 # don't print warnings evtnodekiller = EventNodeKiller("DaVinciEvtNodeKiller") # kill nodes evtnodekiller.Nodes = ["Link/Rec/Track"] # Kill that mcKillSeq.Members = [ tescheck, evtnodekiller, TrackAssociator() ] mcLinkSeq = GaudiSequencer("RedoMCLinks") # The sequence redoing the links mcLinkSeq.IgnoreFilterPassed = True # Run it always mcLinkSeq.Members = [ mcKillSeq, TrackAssociator() ] init.Members += [ mcLinkSeq ]
def myHackedSequence(): decodeSeq = GaudiSequencer("RecoDecodingSeq") from DAQSys.Decoders import DecoderDB from DAQSys.DecoderClass import decodersForBank decodeSeq.Members = [] vdec = DecoderDB["DecodeVeloRawBuffer/createVeloLiteClusters"] vdec.DecodeToVeloClusters = False vdec.DecodeToVeloLiteClusters = True vdec.Active = True decodeSeq.Members += [vdec.setup()] tt = DecoderDB["RawBankToSTLiteClusterAlg/createTTLiteClusters"] it = DecoderDB["RawBankToSTLiteClusterAlg/createITLiteClusters"] decodeSeq.Members += [tt.setup(),it.setup()]
def configMonitor(): import os from Gaudi.Configuration import EventPersistencySvc, HistogramPersistencySvc from Configurables import (LHCbApp, LHCb__RawDataCnvSvc, GaudiSequencer, UpdateAndReset, createODIN, ApplicationMgr ) app = LHCbApp() app.DataType = '2015' app.EvtMax = -1 EventPersistencySvc().CnvServices.append( LHCb__RawDataCnvSvc('RawDataCnvSvc') ) HistogramPersistencySvc().OutputFile = '' HistogramPersistencySvc().Warnings = False UpdateAndReset().saveHistograms = 1 #UpdateAndReset().saverCycle = 3600 from Configurables import EventClockSvc EventClockSvc().EventTimeDecoder = 'OdinTimeDecoder' appMgr = ApplicationMgr() # Decoder from Configurables import HCRawBankDecoder decoder = HCRawBankDecoder() decoder.Monitoring = True # Monitor from Configurables import HCDigitMonitor monitor = HCDigitMonitor() monitor.CrateB = 0 monitor.CrateF = 1 monitor.ChannelsB0 = [47, 46, 45, 44] monitor.ChannelsB1 = [23, 22, 21, 20] monitor.ChannelsB2 = [11, 10, 9, 8] monitor.ChannelsF1 = [23, 22, 21, 46] monitor.ChannelsF2 = [11, 10, 9, 8] # Top level sequence topSeq = GaudiSequencer("TopSequence") topSeq.Members = [createODIN(), decoder, monitor] appMgr.TopAlg = [topSeq] return app, monitor
rej.Sequencer = aseq try: rej.__apply_configuration__() raise ValueError("Failed to protect against unset TCK") except AttributeError: pass conf.RawEventFormatConf().TCK = "0x0099" if "#TCK#" in conf._replaceWrap(test_locations[0.0]["Bank_C"]): print conf._replaceWrap(test_locations[0.0]["Bank_C"]) raise ValueError("Failed to replace TCK") rej.TCK = "0x0099" rej.GenericReplacePatterns = {"#two#": "Aha!"} aseq.Members = [] rej.__apply_configuration__() locs = [] for mem in aseq.Members: locs.append(mem.OutputRawEventLocation) locs = locs + [aloc for abank, aloc in mem.RawBanksToCopy.iteritems()] for loc in locs: if "#" in loc: print locs raise ValueError( "Should have replaced the TCK and random location, but I didn't!") print "Pass"
################################################## from Configurables import CondDB, CondDBAccessSvc CondDB().LatestGlobalTagByDataType = DataYear ################################################## from Configurables import LHCbApp LHCbApp().XMLSummary='summary.xml' #from Configurables import DataOnDemandSvc, L0SelReportsMaker, L0DecReportsMaker #DataOnDemandSvc().AlgMap["HltLikeL0/DecReports"] = L0DecReportsMaker( OutputLevel = 4 ) #DataOnDemandSvc().AlgMap["HltLikeL0/SelReports"] = L0SelReportsMaker( OutputLevel = 4 ) # User Algorithms from Configurables import GaudiSequencer userAlgos = GaudiSequencer("userAlgos") userAlgos.Members = [] #userAlgos=[] if IsMC: from Configurables import TrackSmearState as SMEAR smear = SMEAR('StateSmear') userAlgos.Members.append(smear) else: # from Configurables import CheckPV checkpv = CheckPV("CheckPV") checkpv.MinPVs = 1 userAlgos.Members.append(checkpv) from Configurables import TrackScaleState as SCALER scaler = SCALER('StateScale') userAlgos.Members.append(scaler) userAlgos.Members.append(selSeq.sequence())
seqBss2buk = makeSelectionSequence("B2XuMuNuBu2RhoLine", "Semileptonic") seqBss2bukSS = makeSelectionSequenceSS("B2XuMuNuBu2RhoLine", "Semileptonic") seqRhoSB = makeSelectionSequence("B2XuMuNuBu2RhoSBLine", "Semileptonic") seqRhoWS = makeSelectionSequence("B2XuMuNuBu2RhoWSLine", "Semileptonic") from bs2st_bu2rhomunu.options.rhomutuples import * tuple = makeTuple("Bs2st2BuK_RhoMuX_Tuple") tupleSS = makeTupleSS("Bs2st2BuKSS_RhoMuX_Tuple") tupleSB = makeTuple("Bs2st2BuK_RhoSBMuX_Tuple") tupleWS = makeTupleRhoWS("Bs2st2BuK_RhoWSMuX_Tuple") tuple.Inputs = [seqBss2buk.outputLocation()] tupleSS.Inputs = [seqBss2bukSS.outputLocation()] tupleSB.Inputs = [seqRhoSB.outputLocation()] tupleWS.Inputs = [seqRhoWS.outputLocation()] SequenceOS = GaudiSequencer('SequenceOS') SequenceSS = GaudiSequencer('SequenceSS') SequenceSB = GaudiSequencer('SequenceSB') SequenceWS = GaudiSequencer('SequenceWS') SequenceOS.Members = [seqBss2buk.sequence(), tuple] SequenceSS.Members = [seqBss2bukSS.sequence(), tupleSS] SequenceSB.Members = [seqRhoSB.sequence(), tupleSB] SequenceWS.Members = [seqRhoWS.sequence(), tupleWS] DaVinci().UserAlgorithms = [SequenceOS, SequenceSS, SequenceSB, SequenceWS]
from Gaudi.Configuration import * from Configurables import DaVinci,GaudiSequencer DaVinci().DataType = "2012" DaVinci().EvtMax = -1 DaVinci().TupleFile = "bs2st2buk_jpsik.root" DaVinci().Simulation=True from bs2st_bu2kmutau.options.jpsikkselection import * from bs2st_bu2kmutau.options.jpsikktuples import * tuple.Inputs = [ SeqBss2BuK.outputLocation() ] tupleSS.Inputs = [ SeqBss2BuKSS.outputLocation() ] SequenceOS = GaudiSequencer('SequenceOS') SequenceSS = GaudiSequencer('SequenceSS') SequenceOS.Members = [ strippingfilter, SeqBss2BuK.sequence(), tuple ] SequenceSS.Members = [ strippingfilter, SeqBss2BuKSS.sequence(), tupleSS ] DaVinci().UserAlgorithms = [ SequenceOS, SequenceSS ]
filter.Inputs = [inputs_template.format(stripping)] # Fill the branch templates with the appropriate particles mothers = {} daughters = {} for mother in mother_templates: mothers[mother] = mother_templates[mother].format(*tracks) for daughter in daughter_templates: daughters[daughter] = daughter_templates[daughter].format(*tracks) # Create a tuple for the mode tuple = tuple_templates.decay_tree_tuple( "Tuple{0}".format(line), decay_template.format(*tracks), mothers, daughters, # The input to the tuple is the output of the filter inputs_template.format(filter_name), mc ) # Refit the decay tree, storing refitted daughter information dtf = tuple.Lambdab.addTupleTool("TupleToolDecayTreeFitter/Fit") dtf.Verbose = True # Sequence to hold a succession of algorithms sequence = GaudiSequencer("SequenceBook{0}".format(line)) sequence.Members = [filter, tuple] DaVinci().UserAlgorithms.append(sequence)
from Configurables import (ApplicationMgr, EventDataSvc, RecordOutputStream, ReplayOutputStream, GaudiSequencer, GaudiTesting__OddEventsFilter as OddEvents, GaudiTesting__EvenEventsFilter as EvenEvents, SubAlg as EmptyAlg) outDelegate = ReplayOutputStream() outDelegate.OutputStreams = [EmptyAlg('Stream1'), EmptyAlg('Stream2')] oddEvtSelect = GaudiSequencer('OddEventsSelection') oddEvtSelect.Members = [OddEvents('OddEvents'), RecordOutputStream('Rec1', OutputStreamName='Stream1')] evenEvtSelect = GaudiSequencer('EvenEventsSelection') evenEvtSelect.Members = [EvenEvents('EvenEvents'), RecordOutputStream('Rec2', OutputStreamName='Stream2')] app = ApplicationMgr(EvtSel='NONE', EvtMax=4) app.TopAlg = [EmptyAlg("EventInit"), evenEvtSelect, oddEvtSelect] app.OutStream = [outDelegate] EventDataSvc(ForceLeaves=True) #from Gaudi.Configuration import VERBOSE #from Configurables import MessageSvc #MessageSvc(OutputLevel=VERBOSE)
def _configureForOnline(self): # DecodeRawEvent().DataOnDemand=False writer=InputCopyStream( self.writerName ) DstConf().setProp("SplitRawEventOutput", self.getProp("RawFormatVersion")) # Use RawEventJuggler to create the Turbo stream raw event format tck = "0x409f0045" # DUMMY TurboBanksSeq=GaudiSequencer("TurboBanksSeq") RawEventJuggler().TCK=tck RawEventJuggler().Input="Moore" RawEventJuggler().Output=self.getProp("RawFormatVersion") RawEventJuggler().Sequencer=TurboBanksSeq RawEventJuggler().WriterOptItemList=writer RawEventJuggler().KillExtraNodes=True RawEventJuggler().KillExtraBanks=True RawEventJuggler().KillExtraDirectories = True self.teslaSeq.Members += [TurboBanksSeq] # Begin Lumi configuration lumiSeq = GaudiSequencer("LumiSeq") # # Add ODIN decoder to LumiSeq *** from DAQSys.Decoders import DecoderDB CreateODIN=DecoderDB["createODIN"].setup() #******************************** # # Main algorithm config lumiCounters = GaudiSequencer("LumiCounters") lumiCounters.Members+=[CreateODIN] lumiSeq.Members += [ lumiCounters ] LumiAlgsConf().LumiSequencer = lumiCounters LumiAlgsConf().OutputLevel = self.getProp('OutputLevel') LumiAlgsConf().InputType = "MDF" # # Filter out Lumi only triggers from further processing, but still write to output # Trigger masks changed in 2016, see LHCBPS-1486 physFilterRequireMask = [] lumiFilterRequireMask = [] if self.getProp( "DataType" ) in ["2012","2015"]: # 2012 needed for nightlies tests. physFilterRequireMask = [ 0x0, 0x4, 0x0 ] lumiFilterRequireMask = [ 0x0, 0x2, 0x0 ] else: physFilterRequireMask = [ 0x0, 0x0, 0x80000000 ] lumiFilterRequireMask = [ 0x0, 0x0, 0x40000000 ] from Configurables import HltRoutingBitsFilter physFilter = HltRoutingBitsFilter( "PhysFilter", RequireMask = physFilterRequireMask ) lumiFilter = HltRoutingBitsFilter( "LumiFilter", RequireMask = lumiFilterRequireMask ) lumiSeq.Members += [ lumiFilter, physFilter ] lumiSeq.ModeOR = True # from Configurables import RecordStream FSRWriter = RecordStream( "FSROutputStreamDstWriter") FSRWriter.OutputLevel = INFO # # Sequence to be executed if physics sequence not called (nano events) notPhysSeq = GaudiSequencer("NotPhysicsSeq") notPhysSeq.ModeOR = True notPhysSeq.Members = [ physFilter ] writer.AcceptAlgs += ["LumiSeq","NotPhysicsSeq"] self.teslaSeq.Members += [lumiSeq, notPhysSeq]
#seq = makeSelectionSequenceNoBs2st("B2XuMuNuBu2RhoLine", "Semileptonic" ) line = "B2XuMuNuBu2RhoLine" stream = "AllStreams" #def makeSelectionSequenceNoBs2st( line, stream): location = "/Event/" + stream + "/Phys/" + line + "/Particles" from Configurables import LoKi__HDRFilter strippingfilter = LoKi__HDRFilter('StripPassFilter', Code="HLT_PASS('Stripping" + line + "Decision')", Location="/Event/Strip/Phys/DecReports") # AutomaticData(Location = location) from bs2st_bu2rhomunu.options.rhomutuples import * tuple = makeTupleNoBs2st("Bu2RhoMuX_Tuple") #print seq.outputLocation() #tuple.Inputs = [ seq.outputLocation() ] tuple.Inputs = [location] Sequence = GaudiSequencer('Sequence') Sequence.Members = [strippingfilter, tuple] DaVinci().UserAlgorithms = [Sequence]
def caloDigits(createADCs=False, detectors=['Ecal', 'Hcal'], ReadoutStatusConvert=False): """ Decoding of CaloFuture-Digits """ _cntx = 'Offline' from Configurables import (CaloFutureRawToDigits, RawBankReadoutStatusConverter, GaudiSequencer) conflist = [] alglist = [] if 'Ecal' in detectors: _log.debug('caloDigits : Ecal is added to the detector list') ecalSeq = GaudiSequencer('FutureEcalDigitsSeq', Context=_cntx) ecal = CaloFutureRawToDigits("FutureEcalZSup") from cppyy.gbl import DeCalorimeterLocation ecal.DetectorLocation = DeCalorimeterLocation.Ecal ecal.ZSupMethod = "2D" ecal.ZSupThreshold = 20 ecal.ZSupNeighbour = -5 conflist.append(ecal) if ReadoutStatusConvert: ecalCnv = RawBankReadoutStatusConverter("EcalProcStatus") ecalCnv.System = 'Ecal' ecalCnv.BankTypes = ['EcalPacked'] ecalSeq.Members = [ecal, ecalCnv] alglist.append(ecalSeq) else: alglist.append(ecal) if 'Hcal' in detectors: _log.debug('caloDigits : Hcal is added to the detector list') hcalSeq = GaudiSequencer('FutureHcalDigitsSeq', Context=_cntx) hcal = CaloFutureRawToDigits("FutureHcalZSup") from cppyy.gbl import DeCalorimeterLocation hcal.DetectorLocation = DeCalorimeterLocation.Hcal hcal.ZSupMethod = "1D" hcal.ZSupThreshold = 4 conflist.append(hcal) if ReadoutStatusConvert: hcalCnv = RawBankReadoutStatusConverter("HcalProcStatus") hcalCnv.System = 'Hcal' hcalCnv.BankTypes = ['HcalPacked'] hcalSeq.Members = [hcal, hcalCnv] alglist.append(hcalSeq) else: alglist.append(hcal) if createADCs: for a in conflist: t = a.OutputType.upper() if t in ['DIGIT', 'DIGITS', 'CALOFUTUREDIGIT', 'CALOFUTUREDIGITS']: t.OutputType = 'Both' _log.warning('Change OutputType for %s from %s to %' % (t.getFullName(), t, t.OutputType)) elif t in ['ADC', 'ADCS', 'CALOFUTUREADC', 'CALOFUTUREADCS']: pass elif t is 'BOTH': pass else: t.OutputType = 'Adc' _log.warning('Change OutputType for %s from %s to %' % (t.getFullName(), t, t.OutputType)) ## combine them into sequence alg = GaudiSequencer('CaloFutureDigits', Context=_cntx, IgnoreFilterPassed=True, Members=alglist) return alg
def defaultHLTJPsiSelection(lines=[]): # this still needs to be worked out from Configurables import Escher Escher().RecoSequence = [ "Hlt", "Decoding", "AlignTr", "Vertex", "RICH", "CALO", "MUON", "PROTO" ] Escher().MoniSequence = ["Tr", "OT"] # if the Escher hlt filter is not set, set it here if not hasattr(Escher(), "HltFilterCode") or not Escher().HltFilterCode: Escher().HltFilterCode = "HLT_PASS_RE( 'Hlt2.*JPsi.*Decision' )" # revive only particles used for trigger print 'Hlt lines to be used: ' print ReviveHltTracks(lines if len(lines) else [ 'Hlt2ExpressJPsiDecision', 'Hlt2DiMuonDetachedJPsiDecision', 'Hlt2DiMuonJPsiDecision', 'Hlt2DiMuonJPsiHighPTDecision' ]) # Now create the J/psi candidates from Configurables import CombineParticles, FilterDesktop from CommonParticles.StdAllLooseMuons import StdAllLooseMuons from CommonParticles.StdLooseJpsi2MuMu import StdLooseJpsi2MuMu StdLooseJpsi2MuMu.DaughtersCuts = { "mu-": "(PIDmu-PIDK>5.0) & (PIDmu-PIDe>8.0)" } ## tighten the mass window for candidates used in alignment AlignJpsi2MuMu = FilterDesktop( "AlignJpsi2MuMu", Inputs=["Phys/StdLooseJpsi2MuMu"], Code="(ADMASS('J/psi(1S)') < 35.*MeV) & (VFASPF(VCHI2) < 7.)" ) # Tight requirements on vchi2 - test 18/5/2012 from Configurables import ChargedProtoParticleMaker, ChargedProtoParticleAddMuonInfo, ChargedProtoCombineDLLsAlg from Configurables import TrackParticleMonitor, GaudiSequencer recoJpsiSeq = GaudiSequencer("RecoJpsiSeq") recoJpsiSeq.Members = [ ChargedProtoParticleMaker('ChargedProtoPMaker'), ChargedProtoParticleAddMuonInfo('ChargedProtoPAddMuon'), ChargedProtoCombineDLLsAlg('ChargedProtoPCombDLLs'), StdAllLooseMuons, # we could also get this from the DoD StdLooseJpsi2MuMu, # we could also get this from the DoD TrackParticleMonitor( 'StdLooseJpsi2MuMuMonitor', InputLocation='/Event/Phys/StdLooseJpsi2MuMu/Particles', MinMass=3000, MaxMass=3190), AlignJpsi2MuMu, TrackParticleMonitor( 'AlignJpsi2MuMuMonitor', InputLocation='/Event/Phys/AlignJpsi2MuMu/Particles', MinMass=3000, MaxMass=3190), ] sel = ParticleSelection(Name='Jpsi2MuMu', Location='/Event/Phys/AlignJpsi2MuMu/Particles', Algorithm=recoJpsiSeq) return sel
def defaultMinBiasD0Selection(): # this still needs to be worked out from Configurables import Escher Escher().RecoSequence = ["Decoding", "VELO", "Tr", "Vertex", "RICH"] Escher().MoniSequence = ["Tr", "OT"] # Tweak a little bit RICH MinimalRichSequence() # now create the D0->K-Pi+ candidates from Configurables import FilterDesktop from Configurables import ChargedProtoParticleMaker, ChargedProtoParticleAddRichInfo, ChargedProtoCombineDLLsAlg # take as much as possible from CommonParticles from CommonParticles.StdAllLooseKaons import StdAllLooseKaons from CommonParticles.StdAllLoosePions import StdAllLoosePions from CommonParticles.StdLooseKaons import StdLooseKaons from CommonParticles.StdLoosePions import StdLoosePions from CommonParticles.StdLooseD02HH import StdLooseD02KPi # remove cuts that require a PV StdLooseD02KPi.MotherCut = "((VFASPF(VCHI2)<10) & (ADMASS('D0')<100*MeV) & (MIPCHI2DV(PRIMARY)<100.0))" StdLooseKaons.Code = "((PT>800.*MeV) & (MIPCHI2DV(PRIMARY)>5.0))" StdLoosePions.Code = "((PT>800.*MeV) & (MIPCHI2DV(PRIMARY)>5.0))" # add tight PID cuts basically to ensure that we don't swap the # kaon and pion. AlignD02KPiWide = FilterDesktop("AlignD02KPiWide", Inputs = ["Phys/StdLooseD02KPi"], Code = "(ADMASS('D0') < 50.*MeV) & (VFASPF(VCHI2) < 9.)" \ # " & (MINTREE((ABSID=='D0'), VFASPF(VZ))-VFASPF(VZ) > 0.09*mm )" \ " & (MINTREE('K+'==ABSID, PIDK) > 0)" \ " & (MINTREE('pi+'==ABSID, PIDK) < 0)" ) # tighten the mass window for candidates used in alignment. AlignD02KPi = FilterDesktop("AlignD02KPi", Inputs=["Phys/AlignD02KPiWide"], Code="(ADMASS('D0') < 20.*MeV)") # create the sequence that we pass to the alignment from Configurables import TrackParticleMonitor, GaudiSequencer recoD0Seq = GaudiSequencer("RecoD0Seq", IgnoreFilterPassed=True) recoD0Seq.Members = [ ChargedProtoParticleMaker('ChargedProtoPMaker'), ChargedProtoParticleAddRichInfo('ChargedProtoPAddRich'), ChargedProtoCombineDLLsAlg('ChargedProtoPCombDLLs'), TrackParticleMonitor( 'StdLooseD02KPiMonitor', InputLocation='/Event/Phys/StdLooseD02KPi/Particles', MinMass=1810, MaxMass=1930), AlignD02KPiWide, TrackParticleMonitor( 'AlignD02KPiWideMonitor', InputLocation='/Event/Phys/AlignD02KPiWide/Particles', MinMass=1810, MaxMass=1930), AlignD02KPi, TrackParticleMonitor('AlignD02KPiMonitor', InputLocation='/Event/Phys/AlignD02KPi/Particles', MinMass=1810, MaxMass=1930) ] sel = ParticleSelection(Name='D02KPi', Location='/Event/Phys/AlignD02KPi/Particles', Algorithm=recoD0Seq) return sel
def defaultHLTDstarSelection(): # this still needs to be worked out from Configurables import Escher Escher().RecoSequence = ["Hlt", "Decoding", "AlignTr", "Vertex", "RICH"] Escher().MoniSequence = ["Tr", "OT"] # if the Escher hlt filter is not set, set it here if not hasattr(Escher(), "HltFilterCode") or not Escher().HltFilterCode: Escher( ).HltFilterCode = "HLT_PASS_RE( 'Hlt2ExpressDStar2D0PiDecision' )" # retrieve the trigger lines from the hlt filter code print 'Hlt lines to be used: ' print ReviveHltTracks() # Tweak a little bit RICH MinimalRichSequence() # now create the D0->K-Pi+ candidates from Configurables import FilterDesktop from Configurables import ChargedProtoParticleMaker, ChargedProtoParticleAddRichInfo, ChargedProtoCombineDLLsAlg # take as much as possible from CommonParticles from CommonParticles.StdAllLooseKaons import StdAllLooseKaons from CommonParticles.StdAllLoosePions import StdAllLoosePions from CommonParticles.StdLooseKaons import StdLooseKaons from CommonParticles.StdLoosePions import StdLoosePions from CommonParticles.StdLooseD02HH import StdLooseD02KPi from CommonParticles.StdLooseDstarWithD2HH import StdLooseDstarWithD02KPi # remove cuts that require a PV StdLooseD02KPi.MotherCut = "((VFASPF(VCHI2)<10) & (ADMASS('D0')<100*MeV))" StdLooseKaons.Code = "ALL" StdLoosePions.Code = "ALL" # add tight PID cuts basically toensure that we don't swap the # kaon and pion. AlignD02KPiWide = FilterDesktop( "AlignD02KPiWide", Inputs=["Phys/StdLooseD02KPi"], Code="(ADMASS('D0') < 50.*MeV) & (VFASPF(VCHI2) < 9.)") # tighten the mass window for candidates used in alignment. AlignD02KPi = FilterDesktop("AlignD02KPi", Inputs=["Phys/AlignD02KPiWide"], Code="(ADMASS('D0') < 20.*MeV)") # create the sequence that we pass to the alignment from Configurables import TrackParticleMonitor, GaudiSequencer recoDstarWithD0Seq = GaudiSequencer("RecoDstarWithD0Seq") recoDstarWithD0Seq.Members = [ ChargedProtoParticleMaker('ChargedProtoPMaker'), ChargedProtoParticleAddRichInfo('ChargedProtoPAddRich'), ChargedProtoCombineDLLsAlg('ChargedProtoPCombDLLs'), TrackParticleMonitor( 'StdLooseD02KPiMonitor', InputLocation='/Event/Phys/StdLooseD02KPi/Particles', MinMass=1810, MaxMass=1930), StdLooseDstarWithD02KPi, #apply Dstar cut AlignD02KPiWide, TrackParticleMonitor( 'AlignD02KPiWideMonitor', InputLocation='/Event/Phys/AlignD02KPiWide/Particles', MinMass=1810, MaxMass=1930), AlignD02KPi, TrackParticleMonitor('AlignD02KPiMonitor', InputLocation='/Event/Phys/AlignD02KPi/Particles', MinMass=1810, MaxMass=1930) ] sel = ParticleSelection(Name='DstarWithD02KPi', Location='/Event/Phys/AlignD02KPi/Particles', Algorithm=recoDstarWithD0Seq) return sel
def doIt(): #------------------------------ #Configure PrChecker #------------------------------ from Configurables import GaudiSequencer from Configurables import PrChecker from Configurables import IdealStateCreator GaudiSequencer("CheckPatSeq").Members += [] prChecker = PrChecker() from Configurables import IdealStateCreator if (Plotta): prChecker.WriteTTrackHistos = 2 prChecker.Eta25Cut = Eta25Cut prChecker.UseElectrons = False prChecker.TriggerNumbers = True GaudiSequencer("CheckPatSeq").Members +=[prChecker] from Configurables import MCParticle2MCHitAlg, IdealStateCreator, PrPlotFTHits # Define the algorithms FTAssoc = MCParticle2MCHitAlg( "MCP2FTMCHitAlg", MCHitPath = "MC/FT/Hits", OutputData = "/Event/MC/Particles2MCFTHits" ) # tell the Data On Demand Service about them DataOnDemandSvc().AlgMap[ "/Event/Link/MC/Particles2MCFTHits" ] = FTAssoc DataOnDemandSvc().NodeMap[ "/Event/Link" ] = "DataObject" DataOnDemandSvc().NodeMap[ "/Event/Link/MC" ] = "DataObject" #--------------------------------- #Configure the HitManager #--------------------------------- #if (ConfigureManager): # from Configurables import PrFTHitManager # manager = PrFTHitManager("PrFTHitManager") # manager.fracPosOffset = fracPos # manager.doTuple = manager # manager.HackSize1 = False # manager.FixError = False # manager.SizeFix = FixedSize #--------------------------------- #Configure the Seeding Tracking #--------------------------------- seedingSeq = GaudiSequencer("TrSeedingSeq") GaudiSequencer("TrBestSeq").Members = [] GaudiSequencer("TrSeedingSeq").Members = [] #if you do truthmatching in the pat reco GaudiSequencer("MCLinksUnpackSeq").Members =[] GaudiSequencer("RecoTrSeq").Members += [ seedingSeq ] from Configurables import PrHybridSeeding seeding = PrHybridSeeding() #seeding.OutputLevel = DEBUG #Uncomment this line for debug seeding.InputName ="" #Standalone seeding Put "Forward to get forward imput" seeding.MaxNHits = 12 #Force algorithm to find 12 Hits track when > 12 seeding.DecodeData = True # Switch it off if Runned after Forward seeding.XOnly = False #N Cases seeding.NCases = 3 seeding.MinXPlanes = 4 #Clones Kill seeding.RemoveClonesX = True seeding.RemoveClones = True seeding.minNCommonUV = 7 #>=7 seeding.minCommonX = [2,2,2] # N Common X (Remove Clones [Case0,Case1,Case2]) seeding.RemoveClonesUpDown = False # should Speed Up seeding.minNCommonUVUpDown = 2 #>=2 #Flag Hits seeding.FlagHits = True seeding.SizeToFlag = 12 # >=size seeding.RemoveFlagged = True # Case 1 and 2 will not use flagged Hits #If Flag Size = 11 seeding.Flag_MaxChi2 = 0.3 # track.chi2(hit) = 0.3 seeding.Flag_MaxX0 = 200 # <200 #dRatio Business seeding.UseCubicCorrection = True # dRatio correction in the fit seeding.dRatio = -0.000262 seeding.UseCorrPosition = True # dRatio( x,y) seeding.UseCorrSlopes = False # dRatio( bx, by) seeding.CConst = 2.458e8 # BackwardProjection ( To be used somewhere in the algo ) #Recover Track ( to be fully implemented properly) seeding.RecoverTrack = False seeding.ChiDoFRecover = -1.0 #Case 0,1,2 parameters 1st Last search #XZ-Search # 1st - Last Layer seeding.L0_AlphaCorr = [120.64, 510.64, 730.64 ] seeding.L0_tolHp = [280.0 , 540.0 , 1080.0 ] # ParabolaSeedHits seeding.x0Corr = [0.002152, 0.001534, 0.001534] seeding.X0SlopeChange = [500. , 500. , 500. ] seeding.x0Cut = [4000. , 4000., 4000. ] seeding.TolAtX0Cut = [12.0 , 8.0 , 8.0 ] seeding.ToleranceX0Up = [ 0.75 , 0.75, 0.75 ] seeding.X0SlopeChangeDown = [ 1500. , 2000., 1500. ] seeding.TolAtX0CutOpp = [3.0 , 2.0, 2.0 ] seeding.ToleranceX0Down = [ 0.75 , 0.75, 0.75 ] seeding.TolXRemaining = [1.0 , 1.0 , 1.0] seeding.maxParabolaSeedHits = 12 # Hits are sorted by the distance from # x0 = first-last projection to z=0 ; then hits sorted by xParabola - (x0+tx_1stLast*z_PlaneParabola + x0Corr*x0) and we keep the first maxParabolaSeedHits list seeding.maxChi2HitsX = [5.5 , 5.5 , 5.5 ] seeding.maxChi2DoFX = [4.0 , 5.0 , 6.0 ] #Add Stereo Part seeding.DoAsymm = True seeding.TriangleFix = True seeding.TriangleFix2ndOrder = True seeding.yMin = -1.0 seeding.yMin_TrFix = -2.0 seeding.yMax = 2700 seeding.yMax_TrFix = 30.0 seeding.RemoveHole = True #Hough Cluster Size settings Hits are sorted by y/z where y is computed from the XZ-segment processed seeding.TolTyOffset = [ 0.002 , 0.002 , 0.0035 ] seeding.TolTySlope = [ 0.0 , 0.0 , 0.015 ] #Once you find The UV hits you check the Line Y ? seeding.UseLineY = True #9 and 10 hits seeding.Chi2LowLine = [ 5.0 , 6.0 , 7.0 ] #Chi2PerDoF when 10 Hits or 9 seeding.maxChi2Hits_less11Hit = [ 2.5 , 2.5 , 2.5 ] seeding.maxYatZeroLow = [ 50., 50. , 50. ] seeding.maxYatzRefLow = [500., 500. , 500. ] #11 and 12 Hits seeding.Chi2HighLine = [30.0, 50.0 , 80.0] #Chi2PerDoF when 11 or 12 hits Using LineChi2DoF + XZChi2DoF seeding.maxChi2Hits_11and12Hit =[ 5.5, 5.5, 5.5 ] #Make the Full Fit seeding.maxChi2PerDoF = [4.0, 6.0, 7.0] seeding.RecoverTrack = False #Get only tracks with 6 UV or 6 X seeding.ChiDoFRecover = -1.0 #Change it only if RecoverTrack = True #TruthMatching Settings (Comment all of them if you want to run the normal seeding if(ConfigureManager): from Configurables import PrFTHitManager #seeding.addTool(manager) #seeding.PrFTHitManager.SizeFix = FixedSize #seeding.PrFTHitManager.FixError = True seedingSeq.Members = [seeding]
############################## #fill summary every event ############################## from Configurables import (XMLSummary) XMLSummary().XMLSummary = "summary.xml" from Configurables import XMLSummarySvc XMLSummarySvc("CounterSummarySvc").UpdateFreq = 1 ############################## #Run the merger, this bit should soon be made a configurable, # or automatic through either LHCbApp or some other Merger() ############################## from Configurables import GaudiSequencer LumiSeq = GaudiSequencer("LumiSeq") from Configurables import FSRCleaner, LumiMergeFSR, EventAccounting LumiSeq.Members = [ EventAccounting("EventAccount"), LumiMergeFSR("MergeFSR"), FSRCleaner() ] ApplicationMgr().TopAlg += [LumiSeq] ############################################## #Debug printout, lists all cleaned directories ############################################## FSRCleaner().OutputLevel = DEBUG #FSRCleaner().Enable=False
def configureSequences(self, withMC, handleLumi, vetoHltErrorEvents): brunelSeq = GaudiSequencer("BrunelSequencer") brunelSeq.Context = self.getProp("Context") ApplicationMgr().TopAlg += [ brunelSeq ] brunelSeq.Members += [ "ProcessPhase/Init" ] physicsSeq = GaudiSequencer( "PhysicsSeq" ) # Treatment of luminosity events if handleLumi: lumiSeq = GaudiSequencer("LumiSeq") # Prepare the FSR if self.getProp("WriteFSR"): self.setOtherProps(LumiAlgsConf(),["Context","DataType","InputType","Simulation"]) lumiCounters = GaudiSequencer("LumiCounters") lumiSeq.Members += [ lumiCounters ] LumiAlgsConf().LumiSequencer = lumiCounters # Trigger masks changed in 2016, see LHCBPS-1486 if self.getProp( "DataType" ) in self.Run1DataTypes or self.getProp( "DataType" ) in [ "2015" ]: physFilterRequireMask = [ 0x0, 0x4, 0x0 ] lumiFilterRequireMask = [ 0x0, 0x2, 0x0 ] else: physFilterRequireMask = [ 0x0, 0x0, 0x80000000 ] lumiFilterRequireMask = [ 0x0, 0x0, 0x40000000 ] # Filter out Lumi only triggers from further processing, but still write to output from Configurables import HltRoutingBitsFilter physFilter = HltRoutingBitsFilter( "PhysFilter", RequireMask = physFilterRequireMask ) physicsSeq.Members += [ physFilter ] lumiFilter = HltRoutingBitsFilter( "LumiFilter", RequireMask = lumiFilterRequireMask ) lumiSeq.Members += [ lumiFilter, physFilter ] lumiSeq.ModeOR = True # Sequence to be executed if physics sequence not called (nano events) notPhysSeq = GaudiSequencer("NotPhysicsSeq") notPhysSeq.ModeOR = True notPhysSeq.Members = [ physFilter ] brunelSeq.Members += [ lumiSeq, notPhysSeq ] # Hlt decreports decoders from DAQSys.Decoders import DecoderDB from Configurables import LoKi__HDRFilter, AddToProcStatus hltStages = ('Hlt1',) if self.getProp('OnlineMode') else ('Hlt1', 'Hlt2') hltDecoders = [] hltErrorFilters = [] hltFilters = [] for stage in hltStages: decoder = DecoderDB["HltDecReportsDecoder/%sDecReportsDecoder" % stage].setup() hltDecoders += [decoder] # decode DecReports # identifies events that are not of type ErrorEvent errorFilterCode = "HLT_PASS_RE('%s(?!ErrorEvent).*Decision')" % stage hltErrorFilter = LoKi__HDRFilter('%sErrorFilter' % stage, Code = errorFilterCode, Location = decoder.OutputHltDecReportsLocation) hltErrorFilters += [decoder, hltErrorFilter] # and apply filter filterCode = self.getProp(stage + "FilterCode") if filterCode: hltFilter = LoKi__HDRFilter('%sFilter' % stage, Code = filterCode, Location = decoder.OutputHltDecReportsLocation) hltFilters += [decoder, hltFilter] # Do not process events flagged as error in Hlt, but still write procstatus if vetoHltErrorEvents: """ By Patrick Koppenburg, 16/6/2011 """ # Make a sequence that selects HltErrorEvents hltErrorFilterSeq = GaudiSequencer("HltErrorFilterSeq") if handleLumi: hltErrorFilterSeq.Members = [ physFilter ] # protect against lumi (that doesn't have decreports) hltErrorFilterSeq.Members += hltErrorFilters # Sequence to be executed if HltErrorFilter is failing to set ProcStatus hltErrorSeq = GaudiSequencer("HltErrorSeq", ModeOR = True, ShortCircuit = True) # anti-logic addToProc = AddToProcStatus("HltErrorProc", Reason = "HltError", Subsystem = "Hlt") # write a procstatus hltErrorSeq.Members += [hltErrorFilterSeq, addToProc] # only run if hltErrorFilterSeq fails brunelSeq.Members += [hltErrorSeq] # add this sequece to Brunel _before_ physseq physicsSeq.Members += [hltErrorFilterSeq] # take good events in physics seq # Filter events based on HLT decisions if filters were specified if hltFilters: hltFilterSeq = GaudiSequencer("HltFilterSeq") hltFilterSeq.Members = hltFilters physicsSeq.Members += [hltFilterSeq] # Convert Calo ReadoutStatus to ProcStatus caloBanks=GaudiSequencer("CaloBanksHandler") caloDetectors = [det for det in ['Spd','Prs','Ecal','Hcal'] if det in self.getProp("Detectors")] CaloDigitConf(ReadoutStatusConvert=True,Sequence=caloBanks,Detectors=caloDetectors) physicsSeq.Members += [caloBanks] # Decode L0 (and HLT if not already done) trgSeq = GaudiSequencer("DecodeTriggerSeq") l0TrgSeq = GaudiSequencer("L0TriggerSeq") if self.getProp( "DataType" ) not in [ "2008", "2009" ]: trgSeq.Members += hltDecoders trgSeq.Members += [ l0TrgSeq ] physicsSeq.Members += [ trgSeq ] L0Conf().L0Sequencer = l0TrgSeq if self.getProp("RecL0Only"): # Setup L0 filtering if requested, runs L0 before Reco L0Conf().FilterL0FromRaw = True self.setOtherProps( L0Conf(), ["DataType"] ) else: L0Conf().DecodeL0DU = True if not self.isPropertySet("MainSequence"): if withMC: mainSeq = self.DefaultMCSequence else: mainSeq = self.DefaultSequence self.MainSequence = mainSeq physicsSeq.Members += self.getProp("MainSequence") from Configurables import ProcessPhase outputPhase = ProcessPhase("Output") brunelSeq.Members += [ physicsSeq ] brunelSeq.Members += [ outputPhase ]
from Gaudi.Configuration import * from Configurables import DaVinci, GaudiSequencer DaVinci().DataType = "2012" DaVinci().EvtMax = -1 DaVinci().TupleFile = "bs2st2buk_kmux.root" from bs2st_bu2kmutau.options.kmuxselection import * from bs2st_bu2kmutau.options.kmuxtuples import * tuple.Inputs = [seqBss2buk.outputLocation()] tupleSS.Inputs = [seqBss2bukSS.outputLocation()] SequenceOS = GaudiSequencer('SequenceOS') SequenceSS = GaudiSequencer('SequenceSS') SequenceOS.Members = [seqBss2buk.sequence(), tuple] SequenceSS.Members = [seqBss2bukSS.sequence(), tupleSS] DaVinci().UserAlgorithms = [SequenceOS, SequenceSS]
def ConfigureDaVinci(): config = Swimming() from Configurables import DaVinci from StrippingConf.Configuration import StrippingConf from Configurables import ProcStatusCheck from Configurables import EventNodeKiller, GaudiSequencer from PhysSelPython.Wrappers import (AutomaticData, SelectionSequence, MultiSelectionSequence) # Get the stripping line from StrippingSettings.Utils import lineBuilderConfiguration strippingFile = None if config.getProp('StrippingFile') != 'none': strippingFile = config.getProp('StrippingFile') else: strippingFile = config.getProp('StrippingLineGroup') myconfig = lineBuilderConfiguration(config.getProp('StrippingVersion'), config.getProp('StrippingLineGroup')) import StrippingArchive mylineconf = getattr( __import__( 'StrippingArchive.' + config.getProp('StrippingVersion') + '.Stripping' + strippingFile, globals(), locals(), [myconfig["BUILDERTYPE"]], -1), myconfig["BUILDERTYPE"]) mylinedict = myconfig["CONFIG"] substitutions = config.getProp('StrippingConfigSubstitutions') print "mylinedict before substitutions:", mylinedict print "stripping config substitutions:", substitutions mylinedict.update(substitutions) print "mylineconf:", mylineconf print "mylinedict after substitutions:", mylinedict from StrippingConf.StrippingStream import StrippingStream stream = StrippingStream(config.getProp('StrippingStream') + "Swimming") allLines = mylineconf(config.getProp('StrippingLineGroup'), mylinedict).lines() lines = [] #lineNames = [l.split('/')[-1] for l in config.getProp('StripCands').keys()] lineNames = config.getProp('StrippingLines') print "lineNames:", lineNames for l in allLines: for lineName in lineNames: if l.outputLocation().find(lineName) != -1: lines.append(l) print l.outputLocation() stream.appendLines(lines) # Define the stream filterBadEvents = ProcStatusCheck() sc = StrippingConf(Streams=[stream], MaxCandidates=2000, AcceptBadEvents=False, BadEventSelection=filterBadEvents) # Define the node killer, and make sure to kill everything corresponding to # the stream which we want to swim outputs = [] from Configurables import Swimming__PVReFitter as ReFitter for l in lines: for f in l.filterMembers(): if hasattr(f, 'ReFitPVs') and f.ReFitPVs: if not config.getProp('RefitPVs'): log.warning('RefitPVs is not set, but stripping line applies refitting. Refitted ' + \ 'PVs will be used for turning-point lifetime calculations.') config.setProp('RefitPVs', True) t = f.PVReFitters[''] f.PVReFitters = {'': 'Swimming::PVReFitter/PVReFitter'} f.addTool(ReFitter, 'PVReFitter') f.PVReFitter.PVReFitter = t elif not hasattr(f, 'Output'): continue # Remove the last item so we get everything (Particle, relations, # decayVertices, etc... o = '/'.join(f.Output.split('/')[:-1]) outputs.append(o) print "Outputs are", outputs mykiller = EventNodeKiller("killStripping") # Some default nodes which we will want to kill in all cases nodestokill = outputs + ['Strip', '/Event/Rec/Vertex/Primary'] mykiller.Nodes = nodestokill deathstar = GaudiSequencer("killStrippingSeq") deathstar.Members = [mykiller] # Configure DaVinci DaVinci().InputType = config.getProp('InputType') DaVinci().DataType = config.getProp('DataType') DaVinci().Simulation = config.getProp('Simulation') DaVinci().DDDBtag = config.getProp('DDDBtag') DaVinci().CondDBtag = config.getProp('CondDBtag') try: DaVinci().Persistency = config.getProp('Persistency') except AttributeError: print "DaVinci doesn't have a Persistency attribute to set" # The sequence for the swimming has to be configured # by hand inserting the node killer before it DaVinci().appendToMainSequence([deathstar]) DaVinci().appendToMainSequence([sc.sequence()]) # Since the name of the output file is configured in two places in DaVinci, # do some splitting. splitName = config.getProp('OutputFile').split(os.path.extsep) seqName = '' prefix = '' if len(splitName) <= 2: seqName = splitName[0] print "Warning, an output filename in three parts was specified. This does not work well, " + \ " so 'Swimming.' will be prefixed." prefix = 'Swimming' else: prefix = splitName[0] seqName = os.path.extsep.join(splitName[1:-1]) dstWriter = None print config.getProp('OutputType') # Offline candidate selection sequences sequences = [] offCands = config.getProp('OffCands').keys() for i, cands in enumerate(offCands): data = AutomaticData(Location=cands + "/Particles") offSeq = SelectionSequence("OfflineCandidates_%d" % i, TopSelection=data) sequences.append(offSeq) # selection sequence for offline candidates muCands = config.getProp('MuDSTCands') for i, cands in enumerate(muCands): # Add extra selections for additional MuDSTCands data = AutomaticData(Location=cands + "/Particles") seq = SelectionSequence("MuDSTCands_%d" % i, TopSelection=data) sequences.append(seq) selectionSeq = MultiSelectionSequence(seqName, Sequences=sequences) if config.getProp('OutputType') == 'MDST': pack = False isMC = config.getProp("Simulation") SwimmingConf = config.getProp('MicroDSTStreamConf') SwimmingElements = config.getProp('MicroDSTElements') if SwimmingConf == False: from DSTWriters.Configuration import stripMicroDSTStreamConf SwimmingConf = stripMicroDSTStreamConf(pack=pack, isMC=isMC) if len(SwimmingElements) == 0: from DSTWriters.Configuration import stripMicroDSTElements from DSTWriters.microdstelements import CloneSwimmingReports, CloneParticleTrees, CloneTPRelations mdstElements = stripMicroDSTElements(pack=pack, isMC=isMC) SwimmingElements = [CloneSwimmingReports()] for element in mdstElements: SwimmingElements += [element] if type(element) == CloneParticleTrees: SwimmingElements += [CloneTPRelations("P2TPRelations")] streamConf = {'default': SwimmingConf} elementsConf = {'default': SwimmingElements} try: from DSTWriters.__dev__.Configuration import MicroDSTWriter except: from DSTWriters.Configuration import MicroDSTWriter dstWriter = MicroDSTWriter('MicroDST', StreamConf=streamConf, MicroDSTElements=elementsConf, WriteFSR=config.getProp('WriteFSR'), OutputFileSuffix=prefix, SelectionSequences=[selectionSeq]) elif config.getProp('OutputType') == 'DST': try: from DSTWriters.__dev__.streamconf import OutputStreamConf from DSTWriters.__dev__.Configuration import SelDSTWriter except ImportError: from DSTWriters.streamconf import OutputStreamConf from DSTWriters.Configuration import SelDSTWriter streamConf = OutputStreamConf(streamType = InputCopyStream, fileExtension = '.dst', extraItems = [config.getProp('SwimmingPrefix') + '/Reports#1'] +\ list(set([l + '/P2TPRelations#1' for l in config.getProp('OffCands').values()]))) SelDSTWriterElements = {'default': []} SelDSTWriterConf = {'default': streamConf} dstWriter = SelDSTWriter('FullDST', StreamConf=SelDSTWriterConf, MicroDSTElements=SelDSTWriterElements, WriteFSR=config.getProp('WriteFSR'), OutputFileSuffix=prefix, SelectionSequences=[selectionSeq]) DaVinci().appendToMainSequence([dstWriter.sequence()])
mothers[mother] = mothers_templates[mother].format(*tracks) for daughter in daughters_templates: daughters[daughter] = daughters_templates[daughter].format(*tracks) for mother in mc_mothers_templates: mc_mothers[mother] = mc_mothers_templates[mother].format(*tracks) for daughter in mc_daughters_templates: mc_daughters[daughter] = mc_daughters_templates[daughter].format(*tracks) # Tuple for stripped and reconstructed events t = tuple_templates.lc2pxx_tuple( "Tuple{0}".format(line), decay_template.format(*tracks), mothers, daughters, inputs_template.format(stripping), True ) # Tuple for generated events mc_t = tuple_templates.mc_decay_tree_tuple( "MCGenTuple{0}".format(line), mc_decay_template.format(*tracks), mc_mothers, mc_daughters ) # Stripped tuple will only be filled if MC tuple is sequence = GaudiSequencer("SeqBook{0}".format(line)) sequence.Members = [mc_t, t] DaVinci().UserAlgorithms.append(sequence)
from Gaudi.Configuration import * from Configurables import LHCbApp, DataOnDemandSvc from Configurables import GaudiSequencer from Configurables import EventClockSvc importOptions("$STDOPTS/DecodeRawEvent.py") LHCbApp() EventClockSvc(EventTimeDecoder="OdinTimeDecoder") mySeq = GaudiSequencer("Decoding") mySeq.OutputLevel = VERBOSE def append(): for loc, alg in DataOnDemandSvc().AlgMap.iteritems(): if ('UT' not in loc) and ('FT' not in loc) and ("VL" not in loc) and ( "VP" not in loc): if alg not in mySeq.Members: mySeq.Members.append(alg) #make sure ODIN is first, to resolve ST dependencies manually... mySeq.Members = ["createODIN"] append() ApplicationMgr().TopAlg = [mySeq]
def configureSequences(self): # Check for special data options for option in self.getProp('SpecialData'): if option not in self.KnownSpecialData: raise RunTimeError("Unknown SpecialData option '%s'" % option) escherSeq = GaudiSequencer("EscherSequencer") #escherSeq.Context = self.getProp("Context") ApplicationMgr().TopAlg = [escherSeq] mainSeq = self.getProp("MainSequence") if len(mainSeq) == 0: self.MainSequence = self.DefaultSequence mainSeq = self.MainSequence escherSeq.Members += mainSeq ProcessPhase("Init").DetectorList += self.getProp("InitSequence") ProcessPhase("Init").Context = self.getProp("Context") from Configurables import RecInit, TrackSys log.info("Setting up alignment sequence") recInit = RecInit(name="EscherInit", PrintFreq=self.getProp("PrintFreq")) GaudiSequencer("InitEscherSeq").Members += [recInit] # set up the HltFilterSeq from Configurables import HltCompositionMonitor from Configurables import LoKi__HDRFilter as HDRFilter hltFilterSeq = GaudiSequencer("HltFilterSeq") from DAQSys.Decoders import DecoderDB from DAQSys.DecoderClass import decodersForBank from itertools import chain hltdecs = [DecoderDB.get("HltDecReportsDecoder/Hlt1DecReportsDecoder")] if not self.getProp("OnlineMode"): ## HLT2 decreports are only used offline. hltdecs += [ DecoderDB.get("HltDecReportsDecoder/Hlt2DecReportsDecoder") ] hltFilterSeq.Members = [d.setup() for d in hltdecs] ## FIXME: These lines should go back in as soon as an easy to use filter ## FIXME: is available that works for HLT1 and HLT2 decreports at the same time. ## identifies events that are not of type Hlt1ErrorEvent or Hlt2ErrorEvent ## hltErrCode = "HLT_PASS_RE('Hlt1(?!ErrorEvent).*Decision') & HLT_PASS_RE('Hlt2(?!ErrorEvent).*Decision')" ## hltErrorFilter = HDRFilter('HltErrorFilter', Code = hltErrCode ) # the filter ## hltFilterSeq.Members += [ HltCompositionMonitor(), hltErrorFilter ] ## add more hlt filters, if requested ## if hasattr(self,"HltFilterCode") and len(self.getProp("HltFilterCode"))>0: ## hltfilter = HDRFilter ( 'HLTFilter', ## Code = self.getProp("HltFilterCode")) ## hltfilter.Preambulo += [ "from LoKiCore.functions import *" ] ## hltFilterSeq.Members += [ hltfilter ] # in Escher we'll always use the DOD ApplicationMgr().ExtSvc += ["DataOnDemandSvc"] alignSeq = GaudiSequencer("AlignSequence") # if the patter reco is not run, we need the DataOnDemand svc # so that e.g. the track container(s) is unpacked: if not GaudiSequencer("RecoTrSeq").getProp("Enable"): DstConf(EnableUnpack=True) # Setup tracking sequence trackConf = TrackSys() self.setOtherProps(trackConf, [ "SpecialData", "OutputType", "DataType", "Simulation", "GlobalCuts" ]) trackConf.ExpertTracking = self.getProp("ExpertTracking") ta = TAlignment() ta.Upgrade = self.getProp("Upgrade") self.setOtherProps(ta, ["DatasetName", "OnlineMode", "OnlineAligWorkDir"]) ta.Sequencer = alignSeq if self.getProp("Millepede"): log.info("Using Millepede type alignment!") self.setProp("Incident", "GlobalMPedeFit") ta.Method = "Millepede" ta.Sequencer = GaudiSequencer("MpedeAlignSeq")
#the stream which we want to swim outputs = [] for l in lines: for f in l.filterMembers(): if not hasattr(f, 'Output'): continue # Remove the last item so we get everything (Particle, relations, # decayVertices, etc... o = '/'.join(f.Output.split('/')[:-1]) outputs.append(o) mykiller = EventNodeKiller("killStripping") #Some default nodes which we will want to kill in all cases nodestokill = outputs + ['Strip', '/Event/Rec/Vertex/Primary'] mykiller.Nodes = nodestokill deathstar = GaudiSequencer("killStrippingSeq") deathstar.Members = [mykiller] # Configure DaVinci DaVinci().InputType = 'DST' DaVinci().DataType = options.datatype DaVinci().Simulation = options.forMC DaVinci().DDDBtag = options.dddb DaVinci().CondDBtag = options.conddb # The sequence for the swimming has to be configured # by hand inserting the node killer before it DaVinci().appendToMainSequence([deathstar]) DaVinci().appendToMainSequence([sc.sequence()]) #uDST writer from DSTWriters.__dev__.Configuration import MicroDSTWriter, microDSTStreamConf from DSTWriters.__dev__.microdstelements import * SwimmingConf = microDSTStreamConf() streamConf = {'default': SwimmingConf}
MessageSvc().Format = "% F%60W%S%7W%R%T %0W%M" # database DaVinci().DDDBtag = "dddb-20150724" DaVinci().CondDBtag = "cond-20160522" # # Raw event juggler to split DAQ/RawEvent into FULL.DST format # from Configurables import GaudiSequencer, RawEventJuggler jseq=GaudiSequencer("RawEventSplitSeq") juggler=RawEventJuggler("rdstJuggler") juggler.Sequencer=jseq juggler.Input=0.3 # 2015 Online (Moore) format juggler.Output=4.2 # Reco15 format # filter out events triggered exclusively by CEP lines from Configurables import LoKi__HDRFilter as HDRFilter from DAQSys.Decoders import DecoderDB Hlt2DecReportsDecoder=DecoderDB["HltDecReportsDecoder/Hlt2DecReportsDecoder"].setup() HLTFilter2 = HDRFilter("LoKiHLT2Filter" , Code = "HLT_PASS_RE('Hlt2(?!Forward)(?!DebugEvent)(?!Lumi)(?!Transparent)(?!PassThrough)(?!LowMult).*Decision')" , Location = Hlt2DecReportsDecoder.OutputHltDecReportsLocation) otherseq=GaudiSequencer("filters") otherseq.Members=[jseq,HLTFilter2] DaVinci().EventPreFilters = [jseq,HLTFilter2] # input file importOptions("$STRIPPINGSELECTIONSROOT/tests/data/Reco16_Run182594.py")
vetra.EvtMax = 1000 vetra.HistogramFile = "/PUVetoAlg_vetra63466.root" # default settings LHCbApp().DDDBtag = 'head-20080905' LHCbApp().CondDBtag = 'head-20080905' from Configurables import PuVetoAlg l0PuVeto = PuVetoAlg() l0PuVeto.OutputLevel = 3 # if in TAE mode... #l0PuVeto.RawEventLocation = 'Prev1/DAQ/RawEvent' #l0PuVeto.OutputFileName = "/calib/trg/l0pus/BeamData/PUVetoAlg_l0PuVeto63466.root" #l0PuVeto.MakePlots = True moniL0Pu = GaudiSequencer('Moni_L0PileUp') moniL0Pu.Members = [l0PuVeto] def myPU(): GaudiSequencer('MoniVELOSeq').Members = [moniL0Pu] appendPostConfigAction(myPU) EventSelector().FirstEvent = 1 EventSelector().PrintFreq = 1 EventSelector().Input = [ "DATAFILE='/daqarea/lhcb/data/2009/RAW/FULL/LHCb/COLLISION09/63466/063466_0000000001.raw' SVC='LHCb::MDFSelector'" ] ###############################################################################
def caloDigits ( context , enableOnDemand , createADCs=False, detectors=['Ecal','Hcal','Prs','Spd'], ReadoutStatusConvert=False) : """ Decoding of Calo-Digits """ _cntx = 'Offline' if context != _cntx : _log.warning('CaloDigit: Context is redefined to be Offline') from Configurables import ( CaloZSupAlg , CaloDigitsFromRaw , RawBankReadoutStatusConverter, GaudiSequencer ) conflist=[] alglist=[] if 'Spd' in detectors : _log.debug('caloDigits : Spd is added to the detector list') spd = getAlgo ( CaloDigitsFromRaw , "SpdFromRaw" , _cntx , "Raw/Spd/Digits" , enableOnDemand ) conflist.append(spd) alglist.append(spd) if 'Prs' in detectors : _log.debug('caloDigits : Prs is added to the detector list') prsSeq = GaudiSequencer ('PrsDigitsSeq',Context = _cntx) prs = getAlgo ( CaloDigitsFromRaw , "PrsFromRaw" , _cntx , "Raw/Prs/Digits" , enableOnDemand ) conflist.append(prs) if ReadoutStatusConvert : prsCnv = getAlgo ( RawBankReadoutStatusConverter, "PrsProcStatus",_cntx) prsCnv.System='Prs' prsCnv.BankTypes=['PrsPacked'] prsSeq.Members = [prs,prsCnv] alglist.append(prsSeq) else : alglist.append(prs) if 'Ecal' in detectors : _log.debug('caloDigits : Ecal is added to the detector list') ecalSeq = GaudiSequencer ('EcalDigitsSeq',Context = _cntx) ecal = getAlgo ( CaloZSupAlg , "EcalZSup" , _cntx , "Raw/Ecal/Digits" , enableOnDemand ) conflist.append(ecal) if ReadoutStatusConvert : ecalCnv = getAlgo ( RawBankReadoutStatusConverter, "EcalProcStatus",_cntx) ecalCnv.System='Ecal' ecalCnv.BankTypes=['EcalPacked'] ecalSeq.Members = [ecal,ecalCnv] alglist.append(ecalSeq) else : alglist.append(ecal) if 'Hcal' in detectors : _log.debug('caloDigits : Hcal is added to the detector list') hcalSeq = GaudiSequencer ('HcalDigitsSeq',Context = _cntx) hcal = getAlgo ( CaloZSupAlg , "HcalZSup" , _cntx , "Raw/Hcal/Digits" , enableOnDemand ) conflist.append(hcal) if ReadoutStatusConvert : hcalCnv = getAlgo ( RawBankReadoutStatusConverter, "HcalProcStatus",_cntx) hcalCnv.System='Hcal' hcalCnv.BankTypes=['HcalPacked'] hcalSeq.Members = [hcal,hcalCnv] alglist.append(hcalSeq) else : alglist.append(hcal) if createADCs : for a in conflist : t = a.OutputType.upper() if t in [ 'DIGIT', 'DIGITS' , 'CALODIGIT' , 'CALODIGITS' ] : t.OutputType = 'Both' _log.warning( 'Change OutputType for %s from %s to %' % ( t.getFullName() , t , t.OutputType ) ) elif t in [ 'ADC' , 'ADCS' , 'CALOADC' , 'CALOADCS' ] : pass elif t is 'BOTH' : pass else : t.OutputType = 'Adc' _log.warning( 'Change OutputType for %s from %s to %' % ( t.getFullName() , t , t.OutputType ) ) ## combine them into sequence alg = GaudiSequencer ('CaloDigits',Context = context, IgnoreFilterPassed = True, Members=alglist) return alg
# Standard app configuration from Configurables import (ApplicationMgr, GaudiSequencer, GaudiTesting__OddEventsFilter as OddEvents, GaudiTesting__EvenEventsFilter as EvenEvents, SubAlg as EmptyAlg, OutputStream, InputCopyStream) oddEvtSelect = GaudiSequencer('OddEventsSelection') oddEvtSelect.Members = [OddEvents('OddEvents'), OutputStream('Stream1')] evenEvtSelect = GaudiSequencer('EvenEventsSelection') evenEvtSelect.Members = [EvenEvents('EvenEvents'), InputCopyStream('Stream2')] app = ApplicationMgr(EvtSel='NONE', EvtMax=4) app.TopAlg = [EmptyAlg("EventInit"), evenEvtSelect, oddEvtSelect] # Extension used to enforce deferring from Configurables import (ApplicationMgr, EventDataSvc, ReplayOutputStream, SubAlg as EmptyAlg) app = ApplicationMgr() app.AlgTypeAliases['OutputStream'] = 'RecordOutputStream' app.AlgTypeAliases['InputCopyStream'] = 'RecordOutputStream' EventDataSvc(ForceLeaves=True) outDelegate = ReplayOutputStream()
def configure(datafiles, catalogs=[], castor=True, params=None): """ Configure the job """ from Configurables import DaVinci # needed for job configuration # from Configurables import EventSelector # needed for job configuration # from Configurables import NTupleSvc from PhysConf.Filters import LoKi_Filters fltrs = LoKi_Filters( STRIP_Code=""" HLT_PASS_RE ( 'Stripping.*DiMuonHighMass.*Decision' ) """, VOID_Code=""" 0 < CONTAINS ( '/Event/AllStreams/Phys/FullDSTDiMuonDiMuonHighMassLine/Particles') """ ) filters = fltrs.filters('Filters') filters.reverse() from PhysSelPython.Wrappers import AutomaticData, Selection, SelectionSequence # # defimuon in stripping DST # # DiMuLocation = # '/Event/Dimuon/Phys/FullDSTDiMuonDiMuonHighMassLine/Particles' DiMuLocation = '/Event/AllStreams/Phys/FullDSTDiMuonDiMuonHighMassLine/Particles' from PhysSelPython.Wrappers import AutomaticData DiMuData = AutomaticData(Location=DiMuLocation) # ========================================================================= # Upsilon -> mumu, cuts by Giulia Manca # ======================================================================== from GaudiConfUtils.ConfigurableGenerators import FilterDesktop UpsAlg = FilterDesktop( Code=""" ( M > 7 * GeV ) & DECTREE ('Meson -> mu+ mu-' ) & CHILDCUT( 1 , HASMUON & ISMUON ) & CHILDCUT( 2 , HASMUON & ISMUON ) & ( MINTREE ( 'mu+' == ABSID , PT ) > 1 * GeV ) & ( MAXTREE ( ISBASIC & HASTRACK , TRCHI2DOF ) < 4 ) & ( MINTREE ( ISBASIC & HASTRACK , CLONEDIST ) > 5000 ) & ( VFASPF ( VPCHI2 ) > 0.5/100 ) & ( abs ( BPV ( VZ ) ) < 0.5 * meter ) & ( BPV ( vrho2 ) < ( 10 * mm ) ** 2 ) """, Preambulo=[ "vrho2 = VX**2 + VY**2" ], ReFitPVs=True ) UpsSel = Selection( 'UpsSel', Algorithm=UpsAlg, RequiredSelections=[DiMuData] ) # ========================================================================= # chi_b -> Upsilon gamma # ======================================================================== from GaudiConfUtils.ConfigurableGenerators import CombineParticles ChibCombine = CombineParticles( DecayDescriptor="chi_b1(1P) -> J/psi(1S) gamma", DaughtersCuts={ "gamma": " ( 350 * MeV < PT ) & ( CL > 0.01 ) " }, CombinationCut=""" ( AM - AM1 ) < 3 * GeV """, MotherCut=" PALL", # # we are dealing with photons! # ParticleCombiners={ '': 'LoKi::VertexFitter' } ) from StandardParticles import StdLooseAllPhotons # needed for chi_b ChibSel1 = Selection( 'PreSelChib', Algorithm=ChibCombine, RequiredSelections=[UpsSel, StdLooseAllPhotons] ) from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger TagAlg = Pi0Veto__Tagger( ExtraInfoIndex=25001, # should be unique! MassWindow=20 * MeV, # cut on delta-mass MassChi2=-1, # no cut for chi2(mass) ) ChibSel2 = Selection( 'Chi_b', Algorithm=TagAlg, RequiredSelections=[ChibSel1] ) Chib = SelectionSequence("ChiB", TopSelection=ChibSel2) # print 'OUTPUT!!!' , output_loc # ========================================================================= # Upsilons # ======================================================================== Ups = SelectionSequence("UpsSelSeq", TopSelection=UpsSel) # ======================================================================== from Configurables import GaudiSequencer myChibSeq = GaudiSequencer('MyChibSeq') myChibSeq.Members = [Chib.sequence()] + ["ChibAlg"] myUpsSeq = GaudiSequencer('MyUpsSeq') myUpsSeq.Members = [Ups.sequence()] + ["UpsilonAlg"] davinci = DaVinci( EventPreFilters=filters, DataType='2011', Simulation=True, InputType='DST', HistogramFile="chib_histos.root", TupleFile="chib_tuples.root", PrintFreq=1000, Lumi=True, EvtMax=-1 ) davinci.UserAlgorithms = [myChibSeq, myUpsSeq] # ========================================================================= from Configurables import Gaudi__IODataManager as IODataManager IODataManager().AgeLimit = 2 # ========================================================================= # come back to Bender setData(datafiles, catalogs, castor) gaudi = appMgr() alg_chib = ChibMC( 'ChibAlg', # Algorithm name , # input particles Inputs=[ Chib.outputLocation() ], # take care about the proper particle combiner ParticleCombiners={'': 'LoKi::VertexFitter'} ) alg_ups = UpsilonMC( 'UpsilonAlg', # Algorithm name , # input particles Inputs=[ Ups.outputLocation() ], # take care about the proper particle combiner ParticleCombiners={'': 'LoKi::VertexFitter'} ) alg_chib.nb = alg_ups.nb = params['nb'] alg_chib.np = alg_ups.np = params['np'] # ========================================================================= return SUCCESS
"Tuple{0}".format(line), decay_template.format(*tracks), mothers, daughters, # The input to the tuple is the output of the filter inputs_template.format(stripping), mc ) # MCDecayTreeTuple mc_tuple = tuple_templates.mc_decay_tree_tuple( "MCGenTuple{0}".format(line), mc_decay_template.format(*tracks), mc_mothers, mc_daughters ) # Sequences for ntuple creation dec_sequence = GaudiSequencer("SeqBook{0}".format(line)) dec_sequence.Members = [tuple] mc_sequence = GaudiSequencer("SeqMCGenBook{0}".format(line)) mc_sequence.Members = [mc_tuple] # Sequence for ntuple sequences tuples_sequence = GaudiSequencer("SeqTuples{0}".format(line)) tuples_sequence.IgnoreFilterPassed = True tuples_sequence.Members = [dec_sequence, mc_sequence] DaVinci().UserAlgorithms.append(tuples_sequence)
def configure_dv() : global toolList streams = get_streams(strippingVersion) configs = [] userSeq = GaudiSequencer('UserSeq') userSeq.IgnoreFilterPassed = True if isMC : toolList += ['TupleToolMCTruth', #'TupleToolGeneration' 'TupleToolMCBackgroundInfo' ] for baryon in charmBaryons : for bachelor in bachelors : lineName = stripping_line_name(bachelor, baryon) dtt, line, stream, conf = \ make_stripping_tuple(lineName, streams, toolList, lineName[:-len('Beauty2Charm')] + 'Tuple') dttSeq = GaudiSequencer(line.name() + 'Seq', Members = [dtt]) if 'Xic' == baryon : decayDesc = dtt.Decay dtt.Decay = decayDesc.replace('Lambda_b0', 'Xi_b0') decayDescPlus = reduce(lambda x,y : x.replace(y, ''), ('^', '[', ']', 'CC'), decayDesc) decayDescMinus = decayDescPlus.replace('+', '!').replace('->', '#').replace('-', '+')\ .replace('!', '-').replace('0', '~0').replace('#', '->').replace('Xi_c-', 'Xi_c~-').replace('p-', 'p~-') subPID = SubstitutePID(baryon+bachelor+'SubstitutePID', Code = 'ALL', Substitutions = {decayDescPlus : 'Xi_b0', decayDescMinus : 'Xi_b~0'}, Inputs = dtt.Inputs, Output = dtt.Inputs[0].replace('Beauty2CharmLine', 'Beauty2CharmLine-Subd')) dtt.Inputs = [subPID.Output] dttSeq.Members.insert(0, subPID) configs.append({'dtt' : dtt, 'dttSeq' : dttSeq, 'line' : line, 'stream' : stream, 'conf' : conf, 'charmBaryon' : baryon, 'bachelor' : bachelor}) dtt.ReFitPVs = True # Do I need this? - probably yes if you want to use vertex constraints without biasing the lifetime. # Configure DTF decayNoCaret = dtt.Decay.replace('^', '') baryonSymbol = baryonSymbols[baryon] baryonDec = baryonDescriptors[baryon] decayCharmCaret = decayNoCaret.replace('('+baryonDec, '^(' + baryonDec) dtt.addBranches( {'X_b0' : decayNoCaret, 'X_cplus' : decayCharmCaret } ) bachelorDec = bachelorDescriptors[bachelor] otherBachelorDec = oppositeBachelors[bachelor] decayBachelorCaret = decayNoCaret[::-1].replace(bachelorDec[::-1], bachelorDec[::-1]+'^', 1)[::-1] bachelorSub = {decayBachelorCaret : otherBachelorDec} for mass, massName in ([baryonDec], '_Mass'), ([], '') : for vtx, vtxName in (True, '_Vtx'), (False, '') : tt_dtf_name = 'TupleToolDecayTreeFitter/DTF' + massName + vtxName tt_dtf = dtt.X_b0.addTupleTool(tt_dtf_name) tt_dtf_sub = dtt.X_b0.addTupleTool(tt_dtf_name + '_Sub') tt_dtf_sub.Substitutions = bachelorSub for dtf in tt_dtf, tt_dtf_sub : dtf.constrainToOriginVertex = vtx dtf.daughtersToConstrain = mass dtf.Verbose = True dv.DataType = get_data_type(firstInputFile) if isMC : for config in configs : line = config['line'] #algs = line.filterMembers() algs = line._members # Remove TISTOS tagger and BDT filter. algs = algs[:-3] # Remove VoidFilter for nTracks < 250. #algs.pop(0) #remove_selection_criteria(algs[3], algs[5], algs[7], # algs[9]) # std particles. selalgs = filter(lambda x : isinstance(x, (FilterDesktop, CombineParticles)), algs) remove_selection_criteria(*selalgs) decayDesc = config['dtt'].Decay.replace('^', '') preamble = [ "from LoKiPhysMC.decorators import *" , "from LoKiPhysMC.functions import mcMatch" ] # for alg in selalgs : # alg.Preambulo = preamble # if hasattr(alg, 'Code') : # alg.Code += ' & mcMatch({0!r})'.format(decayDesc) # else : # alg.MotherCut += ' & mcMatch({0!r})'.format(decayDesc) # filter(lambda x : isinstance(x, CombineParticles), algs)[-1].CombinationCut = '(AM<7000*MeV) & (AM>5200*MeV)' #remove_selection_criteria(algs[8], # Lc/Xic # algs[10]) # Lb combineParticles = filter(lambda x : isinstance(x, CombineParticles), algs) for comb in combineParticles : comb.Preambulo = preamble motherSymbol = baryonDescriptors[config['charmBaryon']] decayDesc = decayDesc.replace('->', '==>') # to allow for intermediate resonances. combineParticles[0].MotherCut += ' & mcMatch({0!r})'.format(decayDesc.replace('(' + motherSymbol, '^(' + motherSymbol)) combineParticles[0].DaughtersCuts = {'p+' : 'mcMatch({0!r})'.format(decayDesc.replace('p+', '^p+')), 'K-' : 'mcMatch({0!r})'.format(decayDesc.replace('K-', '^K-')), 'pi+' : 'mcMatch({0!r})'.format(decayDesc.replace('pi+', '^pi+'))} bachelorSymbol = bachelorDescriptors[config['bachelor']] combineParticles[1].DaughtersCuts = {bachelorSymbol : 'mcMatch({0!r})'.format(decayDesc.replace(bachelorSymbol, '^' + bachelorSymbol)), motherSymbol : 'ALL'} #algs = remove_selection_criteria(*algs) algs[-1].Output = config['dttSeq'].Members[0].Inputs[0] config['dttSeq'].Members = algs + config['dttSeq'].Members dv.Simulation = True dv.DDDBtag = dddbTag dv.CondDBtag = conddbTag else : scaler = TrackScaleState() #scaler.CONDDBpath = '/dd/Conditions/Calibration/LHCb/MomentumScale' #scaler.CONDDBpath = '/dd/Conditions/Calibration/LHCb' userSeq.Members = [scaler] from Configurables import CondDB CondDB(LatestGlobalTagByDataType = dv.DataType) for config in configs : userSeq.Members.append(config['dttSeq']) dv.UserAlgorithms = [userSeq] dv.InputType = firstInputFile.split('.')[-1].upper() if 'MDST' == dv.InputType : dv.RootInTES = '/Event/' + configs[0]['stream'].name()
killer.ILinePersistenceSvc = svc.getFullName() killer.LineFilter.Code = stream_filter.Code assert set(killer.AlwaysKeepBanks) == set(['ODIN', 'HltRoutingBits', 'DAQ']) def decreport_pass(name, report): """Return whether the decreport would pass the filter above.""" return (bool(report.decision()) and report.executionStage() & 0x80 == 0x80 and re.match(r'^Hlt2.*(?<!TurboCalib)Decision$', name)) topSeq = GaudiSequencer("TopSequence") topSeq.Members = [ content_filter, decoder.setup(), stream_filter, copyRaw, killer, ] ApplicationMgr().TopAlg = [topSeq] gaudi = GaudiPython.AppMgr() TES = gaudi.evtsvc() RawBank = GaudiPython.gbl.LHCb.RawBank def rawbank_sizes(rawevent): """Return (name, size) for each raw bank type.""" def size(i): return sum(bank.totalSize() for bank in rawevent.banks(i))
def defaultD0Selection(): from Configurables import Escher, TAlignment Escher().RecoSequence = ["Hlt","Decoding","AlignTr","Vertex","RICH" ] Escher().MoniSequence = ["Tr","OT"] # Tweak a little bit RICH from TAlignment.ParticleSelections import MinimalRichSequence MinimalRichSequence() # now create the D0->K-Pi+ candidates from Configurables import FilterDesktop from Configurables import ChargedProtoParticleMaker, ChargedProtoParticleAddRichInfo, ChargedProtoCombineDLLsAlg # take as much as possible from CommonParticles from CommonParticles.StdAllLooseKaons import StdAllLooseKaons from CommonParticles.StdAllLoosePions import StdAllLoosePions from CommonParticles.StdLooseKaons import StdLooseKaons from CommonParticles.StdLoosePions import StdLoosePions from CommonParticles.StdLooseD02HH import StdLooseD02KPi # remove cuts that require a PV StdLooseD02KPi.MotherCut = "((VFASPF(VCHI2)<10) & (ADMASS('D0')<100*MeV))" StdLooseKaons.Code = "ALL" StdLoosePions.Code = "ALL" # add tight PID cuts basically toensure that we don't swap the # kaon and pion. AlignD02KPiWide = FilterDesktop("AlignD02KPiWide", Inputs = ["Phys/StdLooseD02KPi"], Code = "(ADMASS('D0') < 50.*MeV) & (VFASPF(VCHI2) < 9.)" \ " & (MINTREE('K+'==ABSID, PIDK) > 0)" \ " & (MINTREE('pi+'==ABSID, PIDK) < 0)" ) # tighten the mass window for candidates used in alignment. AlignD02KPi = FilterDesktop("AlignD02KPi", Inputs = ["Phys/AlignD02KPiWide"], Code = "(ADMASS('D0') < 20.*MeV)" ) # create the sequence that we pass to the alignment from Configurables import TrackParticleMonitor, GaudiSequencer recoD0Seq= GaudiSequencer("RecoD0Seq") recoD0Seq.Members = [ ChargedProtoParticleMaker('ChargedProtoPMaker'), ChargedProtoParticleAddRichInfo('ChargedProtoPAddRich'), ChargedProtoCombineDLLsAlg('ChargedProtoPCombDLLs'), TrackParticleMonitor('StdLooseD02KPiMonitor', InputLocation = '/Event/Phys/StdLooseD02KPi/Particles', MinMass = 1810, MaxMass = 1930), AlignD02KPiWide, TrackParticleMonitor('AlignD02KPiWideMonitor', InputLocation = '/Event/Phys/AlignD02KPiWide/Particles', MinMass = 1810, MaxMass = 1930), AlignD02KPi, TrackParticleMonitor('AlignD02KPiMonitor', InputLocation = '/Event/Phys/AlignD02KPi/Particles', MinMass = 1810, MaxMass = 1930) ] from TAlignment.ParticleSelections import ParticleSelection sel = ParticleSelection( Name = 'D02KPi', Location = '/Event/Phys/AlignD02KPi/Particles', Algorithm = recoD0Seq ) return sel
def configure ( inputdata , ## the list of input files catalogs = [] , ## xml-catalogs (filled by GRID) castor = False , ## use the direct access to castor/EOS ? params = {} ) : ## configure Track <--> MC relation table import LoKiPhysMC.Track2MC_Configuration import LoKiMC.MC ## import DaVinci from Configurables import DaVinci, GaudiSequencer ## delegate the actual configurtaion to DaVinci dv = DaVinci ( DataType = '2011' , InputType = 'MDST', Lumi = True, Simulation = True, DDDBtag="MC11-20111102", CondDBtag="sim-20111111-vc-md100", HistogramFile = "mcd02kpi_tracks7_histo.root", TupleFile = "mcd02kpi_tracks7_ntuple.root", PrintFreq = 1000) from Configurables import DecayTreeTuple, FilterDesktop, TupleToolGeometry, CombineParticles from Configurables import MCDecayTreeTuple, TupleToolMCTruth, MCTupleToolHierarchy from PhysSelPython.Wrappers import AutomaticData, Selection, SelectionSequence, DataOnDemand from Configurables import CheckPV # First using CombineParticle to create the D0 ################################################################################ #from StandardParticles import StdAllNoPIDsPions, StdAllNoPIDsKaons _pions = DataOnDemand(Location='Phys/StdAllNoPIDsPions/Particles') _kaons = DataOnDemand(Location='Phys/StdAllNoPIDsKaons/Particles') _d2kpi = CombineParticles("d2kpi") _d2kpi.DecayDescriptor = "[D0 -> K- pi+]cc" _d2kpi.DaughtersCuts = { "K-" : "(PT > 500.0) & (0.0 < PIDK)", "pi+" : "(PT > 500.0) & (5.0 > PIDK)", "K+" : "(PT > 500.0) & (0.0 < PIDK)", "pi-" : "(PT > 500.0) & (5.0 > PIDK) " } _d2kpi.MotherCut = "(VFASPF(VCHI2/VDOF)<10)" _d2kpi.CombinationCut = "(ADAMASS('D0') < 50.0)" _d2kpi.Preambulo = [ "from LoKiPhysMC.decorators import *" , "from PartProp.Nodes import CC" ] #_d2kpi.ReFitPVs = True SelD2KPi = Selection( "SelD2KPi", Algorithm= _d2kpi, RequiredSelections=[_pions,_kaons] ) SeqD2KPi = SelectionSequence('SeqD2KPi',TopSelection = SelD2KPi) # Now the CheckPV method to filter algorithms c = CheckPV("OnePV") c.MinPVs = 1 # And a sequencer to put them together gseq = GaudiSequencer() gseq.Members = [ c, SeqD2KPi.sequence() ] ## define the input data setData ( inputdata , catalogs , castor ) ## get/create application manager gaudi = appMgr() # ## modify/update the configuration: # ## (1) create the algorithm alg = TrackFilter( 'TrackFilter' ) #seq = createSequencer() ## (2) replace the list of top level algorithm by # new list, which contains only *THIS* algorithm gaudi.setAlgorithms( [ gseq, alg ] ) return SUCCESS
def configureReco(self, init): """ Configure Reconstruction to be redone """ ## CaloReco & CaloPIDs on-demand clusters = ['Digits', 'Clusters'] from Configurables import CaloProcessor caloProc = CaloProcessor(EnableOnDemand=True, OutputLevel=self.getProp("OutputLevel")) caloProc.RecList = clusters # --- if self.getProp('DataType') == 'Upgrade': caloProc.NoSpdPrs = True ## General unpacking from Configurables import DstConf if self.isPropertySet('EnableUnpack'): DstConf().setProp('EnableUnpack', self.getProp('EnableUnpack')) ## unpack Calo Hypos ? from Configurables import CaloDstUnPackConf unpack = CaloDstUnPackConf() hypos = ['Photons', 'MergedPi0s', 'SplitPhotons', 'Electrons'] # CaloHypos if self.isPropertySet( 'EnableUnpack') and "Reconstruction" in self.getProp( 'EnableUnpack'): unpack.setProp('Enable', True) else: caloProc.RecList += hypos # enable caloHypos onDemand # Reprocess explicitely the full calo sequence in the init sequence ? inputtype = self.getProp('InputType').upper() if (self.getProp("CaloReProcessing") and inputtype != 'MDST'): caloProc.RecList = clusters + hypos caloSeq = caloProc.sequence( ) # apply the CaloProcessor configuration cSeq = GaudiSequencer('CaloReProcessing') cSeq.Members += [caloSeq] init.Members += [cSeq] unpack.setProp('Enable', False) # update CaloHypo->MC Linker if self.getProp('Simulation'): log.info( "CaloReprocessing : obsolete CaloHypo2MC Links is updated") from Configurables import (TESCheck, EventNodeKiller, CaloHypoMCTruth) caloMCLinks = ["Link/Rec/Calo"] caloMCSeq = GaudiSequencer("cleanCaloMCLinks") checkCaloMCLinks = TESCheck("checkCaloMCLinks") checkCaloMCLinks.Inputs = caloMCLinks checkCaloMCLinks.Stop = False killCaloMCLinks = EventNodeKiller("killCaloMCLinks") killCaloMCLinks.Nodes = caloMCLinks caloMCSeq.Members = [checkCaloMCLinks, killCaloMCLinks] init.Members += [caloMCSeq] update = self.getProp("UpdateCaloMCLinks") if update: redoCaloMCLinks = CaloHypoMCTruth("recreteCaloMCLinks") init.Members += [redoCaloMCLinks] else: caloProc.applyConf() if inputtype != 'MDST': log.info( "CaloReProcessing cannot be processed on reduced (m)DST data" ) # For backwards compatibility with MC09, we need the following to rerun # the Muon Reco on old data. To be removed AS SOON as this backwards compatibility # is no longer needed if (self.getProp("DataType") == 'MC09' and inputtype != 'MDST' and self.getProp("AllowPIDRerunning") and inputtype != 'RDST'): from Configurables import DataObjectVersionFilter, MuonRec, TESCheck from MuonID import ConfiguredMuonIDs rerunPIDSeq = GaudiSequencer("ReRunMuonPID") init.Members += [rerunPIDSeq] # Check data version, to see if this is needed or not rerunPIDSeq.Members += [ DataObjectVersionFilter( "MuonPIDVersionCheck", DataObjectLocation="/Event/Rec/Muon/MuonPID", MaxVersion=0) ] # Check raw event is available rerunPIDSeq.Members += [ TESCheck("TESCheckRawEvent", Inputs=["DAQ/RawEvent"], Stop=False) ] # Run Muon PID cm = ConfiguredMuonIDs.ConfiguredMuonIDs( data=self.getProp("DataType")) rerunPIDSeq.Members += [MuonRec(), cm.getMuonIDSeq()] # If muon PID has rerun, need to re make the Combined DLLS... from Configurables import (ChargedProtoParticleAddMuonInfo, ChargedProtoCombineDLLsAlg) rerunPIDSeq.Members += [ ChargedProtoParticleAddMuonInfo("CProtoPAddNewMuon"), ChargedProtoCombineDLLsAlg("CProtoPCombDLLNewMuon") ] # Compatibility with pre-2011 data, where Rec/Summary and Trigger/RawEvent are missing import PhysConf.CheckMissingTESData as DataCheck DataCheck.checkForMissingData()
def ConfigureMoore(): config = Swimming() from Swimming.HltTransforms import getTransform thisTransform = getTransform( config.getProp('TransformName'), config.getProp('Hlt1Triggers') + config.getProp('Hlt2Triggers')) from Configurables import (HltConfigSvc, EventNodeKiller, HltMoveVerticesForSwimming, Moore) #Global configuration mykiller = EventNodeKiller("killHlt") mykiller.Nodes = config.getProp('HltNodesToKill') deathstar = GaudiSequencer("killHltSeq") deathstar.Members = [mykiller] from Swimming import MooreSetup # dddb = config.getProp('DDDBtag') conddb = config.getProp('CondDBtag') tck = config.getProp('TCK') run = config.getProp('RunNumber') if not dddb and not conddb and not tck and run: import shelve tag_db = os.path.expandvars(config.getProp('TagDatabase')) if not os.path.exists(tag_db): raise OSError, "Tag database file %s does not exist" % config.getProp( 'TagDatabase') tag_db = shelve.open(tag_db, 'r') info = tag_db['info'] tags = info[run] Moore().DDDBtag = tags['DDDBtag'] Moore().CondDBtag = tags['CondDBtag'] Moore().InitialTCK = tags['TCK'] Swimming().TCK = tags['TCK'] elif dddb and conddb and tck and not run: Moore().DDDBtag = dddb Moore().CondDBtag = conddb Moore().InitialTCK = tck else: raise TypeError, 'You must specify either the CondDB tag, DDDB tag and TCK and _not_ the run number' + \ ' or only the run number.' Moore().Simulation = config.getProp('Simulation') Moore().DataType = config.getProp('DataType') Moore().outputFile = config.getProp('OutputFile') Moore().WriteFSR = config.getProp('WriteFSR') Moore().Persistency = config.getProp('Persistency') Moore().WriterRequires = [] # Add extra locations to writer from Configurables import InputCopyStream writer = InputCopyStream('Writer') writer.ItemList = [config.getProp('SwimmingPrefix') + '/Reports#1'] writer.OptItemList = list( set([ l + '/P2TPRelations#1' for l in config.getProp('OffCands').values() ])) # # Define the TCK transformation # HltConfigSvc().ApplyTransformation = thisTransform from pprint import pprint pprint(HltConfigSvc().ApplyTransformation) # # Define the swimming algorithm # myswimmer = HltMoveVerticesForSwimming("HltMovePVs4Swimming") myswimmer.SwimmingDistance = 0.0 loc = config.getProp( 'SwimmingPrefix') # TODO check differences with trunk more carefully myswimmer.Bcontainer = loc myswimmer.InputSelection = config.getProp('OnlinePV') myswimmer.OutputSelection = config.getProp('OutPVSel') myswimmer.OutputLevel = 4 # Configure an extra TisTos Tool and some decoder algos to debug TisTos issues prefix = config.getProp('SwimmingPrefix') from Configurables import HltDecReportsDecoder, HltSelReportsDecoder decoders = [(HltDecReportsDecoder, [('OutputHltDecReportsLocation', 'Hlt/DecReports')]), (HltSelReportsDecoder, [('OutputHltSelReportsLocation', 'Hlt/SelReports'), ('HltDecReportsLocation', 'Hlt/DecReports')])] from Configurables import TriggerTisTos ToolSvc().addTool(TriggerTisTos, 'SwimmingDebugTisTos') ToolSvc().SwimmingDebugTisTos.TOSFracMuon = 0.0 ToolSvc().SwimmingDebugTisTos.TOSFracTT = 0.0 for conf, d in decoders: configurable = conf('Swimming' + d[0][1].split('/')[-1]) print configurable try: configurable.InputRawEventLocation = 'PrevTrig/RawEvent' except: configurable.RawEventLocations = [ 'PrevTrig/RawEvent' ] + configurable.RawEventLocations output = None for prop, loc in d: if not output: output = prefix + '/' + loc setattr(configurable, prop, prefix + '/' + loc) DataOnDemandSvc().AlgMap[output] = configurable prop = d[0][0][6:] print prop, output setattr(ToolSvc().SwimmingDebugTisTos, prop, output) class Deathstar(object): def __init__(self, seq): self._seq = seq def insert(self): ApplicationMgr().TopAlg.insert(0, self._seq) d = Deathstar(deathstar) appendPostConfigAction(d.insert)
def applyConf(self): # Check the special data options for option in self.getProp("SpecialData"): if option not in self.KnownSpecialData: raise RuntimeError("Unknown SpecialData option '%s'"%option) # Phases if not self.isPropertySet("RecoSequence"): self.setProp("RecoSequence",self.DefaultSubDetsFieldOn) if self.getProp("DataType") in self.Run2DataTypes: self.setProp("RecoSequence",self.DefaultSubDetsFieldOnRun2) recoSeq = self.getProp("RecoSequence") if self.getProp("SkipTracking"): for det in self.DefaultTrackingSubdets: if det in recoSeq: recoSeq.remove(det) from Configurables import ProcessPhase ProcessPhase("Reco").DetectorList += recoSeq # Primary Vertex and V0 finding if "Vertex" in recoSeq and self.getProp("DataType") not in self.Run2DataTypes: from Configurables import PatPVOffline, TrackV0Finder pvAlg = PatPVOffline() if "2009" == self.getProp("DataType"): from PatPV import PVConf PVConf.VLoosePV().configureAlg() elif ( "veloOpen" in self.getProp("SpecialData") or "microBiasTrigger" in self.getProp("SpecialData") ): from PatPV import PVConf PVConf.LoosePV().configureAlg() elif ( not ( self.getProp("Simulation") and self.getProp("DataType") in ["2008","2009","2010","MC09"] ) ): # Default setting uses beam spot constraint from DB, available from 2011. Prior to 2011 stability of beam spot is not certain from PatPV import PVConf PVConf.StandardPV().configureAlg() # MC with particle gun cannot reconstruct PV, hence need to introduce one by hand if "pGun" in self.getProp("SpecialData"): from Configurables import PGPrimaryVertex pgPV = PGPrimaryVertex() GaudiSequencer("RecoVertexSeq").Members += [ pgPV ] else: GaudiSequencer("RecoVertexSeq").Members += [ pvAlg ]; trackV0Finder = TrackV0Finder() GaudiSequencer("RecoVertexSeq").Members += [ trackV0Finder ] # for Run 2, we run a different algorithm and don't want to have # the V0 finder in the vertex sequence (which is now after HLT1) if "Vertex" in recoSeq and self.getProp("DataType") in self.Run2DataTypes: from Configurables import PatPV3D, PVOfflineTool, LSAdaptPV3DFitter pvAlg = PatPV3D("PatPV3D") ## this should go in a configuration file when we know what to use pvAlg.addTool(PVOfflineTool,"PVOfflineTool") pvAlg.PVOfflineTool.addTool(LSAdaptPV3DFitter, "LSAdaptPV3DFitter") pvAlg.PVOfflineTool.PVFitterName = "LSAdaptPV3DFitter" pvAlg.PVOfflineTool.LSAdaptPV3DFitter.UseFittedTracks = True pvAlg.PVOfflineTool.LSAdaptPV3DFitter.AddMultipleScattering = False pvAlg.PVOfflineTool.LSAdaptPV3DFitter.TrackErrorScaleFactor = 1.0 pvAlg.PVOfflineTool.LSAdaptPV3DFitter.MinTracks = 4 pvAlg.PVOfflineTool.LSAdaptPV3DFitter.trackMaxChi2 = 12.0 pvAlg.PVOfflineTool.UseBeamSpotRCut = True pvAlg.PVOfflineTool.BeamSpotRCut = 0.2 pvAlg.PVOfflineTool.BeamSpotRHighMultiplicityCut = 0.4 pvAlg.PVOfflineTool.BeamSpotRMultiplicityTreshold = 10 pvAlg.PVOfflineTool.InputTracks = [ "Rec/Track/FittedHLT1VeloTracks" ] pvAlg.OutputVerticesName = "Rec/Vertex/Primary" pvAlg.PrimaryVertexLocation = "Rec/Vertex/PrimaryVertices" # Remove all tracks that don't belong to a PV (to save space on a DST) from Configurables import TrackContainerCleaner, SelectTrackInVertex pvVeloTracksCleaner = TrackContainerCleaner("PVVeloTracksCleaner") pvVeloTracksCleaner.inputLocation = "Rec/Track/FittedHLT1VeloTracks" pvVeloTracksCleaner.selectorName = "SelectTrackInVertex" GaudiSequencer("RecoVertexSeq").Members += [ pvAlg, pvVeloTracksCleaner ]; GaudiSequencer("RecoVertexSeq").IgnoreFilterPassed = True ## Upgrade type? if self.getProp("DataType") == 'Upgrade' : specialDataList = self.getProp("SpecialData") specialDataList.append("upgrade") self.setProp("SpecialData",specialDataList) # Tracking (Should make it more fine grained ??) DoTracking = False for seq in self.DefaultTrackingSubdets : if seq in recoSeq: DoTracking = True if DoTracking: trackConf = TrackSys() self.setOtherProps(trackConf,["SpecialData","OutputType"]) trackConf.ExpertHistos = self.expertHistos() # RICH if "RICH" in recoSeq: # The main sequence seq = GaudiSequencer("RecoRICHSeq") # Create the top level Conf object richConf = RichRecSysConf(self.richRecConfName) # set some general options self.setOtherProps(richConf,["SpecialData","Context","OutputLevel", "Simulation","DataType","OnlineMode"]) # Set the sequencer the RICH reco algs should be added to richConf.RecoSequencer = seq # Input Tracks (would be better to not hard code this. Get from TrackSys() or DstConf()) # Only set if not previously set, to allow for custom studies using non standard locations # set at the top level options file level if not richConf.trackConfig().isPropertySet("InputTracksLocation") : richConf.trackConfig().setProp("InputTracksLocation","Rec/Track/Best") # Output PID Location. Again allow for pre-defined custom locations. if not richConf.isPropertySet("RichPIDLocation") : richConf.setProp("RichPIDLocation","Rec/Rich/PIDs") # Printout import GaudiKernel.ProcessJobOptions GaudiKernel.ProcessJobOptions.PrintOn() log.debug(richConf) GaudiKernel.ProcessJobOptions.PrintOff() # CALO if "CALO" in recoSeq: import GaudiKernel.ProcessJobOptions seq = GaudiSequencer ( 'RecoCALOSeq' ) caloConf=CaloProcessor( Context = self.getProp('Context') , OutputLevel = self.getProp('OutputLevel') , UseTracks = True , EnableOnDemand = False , DataType = self.getProp ('DataType') ) GaudiKernel.ProcessJobOptions.PrintOn() seq.Members = [caloConf.caloSequence()] GaudiKernel.ProcessJobOptions.PrintOff() # MUON if "MUON" in recoSeq: from MuonID import ConfiguredMuonIDs from Configurables import RawBankReadoutStatusConverter,RawBankReadoutStatusFilter cm=ConfiguredMuonIDs.ConfiguredMuonIDs(data=self.getProp("DataType"), specialData=self.getProp("SpecialData")) MuonIDSeq=cm.getMuonIDSeq() RawBankReadoutStatusConverter("MuonProcStatus").System="Muon" RawBankReadoutStatusConverter("MuonProcStatus").BankTypes=["Muon"] RawBankReadoutStatusFilter("MuonROFilter").BankType=13 RawBankReadoutStatusFilter("MuonROFilter").RejectionMask=2067 GaudiSequencer("RecoMUONSeq").Members += [ "MuonRec", "RawBankReadoutStatusConverter/MuonProcStatus", "RawBankReadoutStatusFilter/MuonROFilter", MuonIDSeq ] # PROTO if "PROTO" in recoSeq: self.setOtherProps(GlobalRecoConf(),["DataType","SpecialData", "Context","OutputLevel"]) GlobalRecoConf().RecoSequencer = GaudiSequencer("RecoPROTOSeq") # SUMMARY if "SUMMARY" in recoSeq: from Configurables import RecSummaryAlg summary = RecSummaryAlg("RecSummary") # make a new list of uppered detectors dets = [] for det in self.getProp("Detectors"): dets.append(det.upper()) summary.Detectors = dets GaudiSequencer("RecoSUMMARYSeq").Members += [summary]
# Configure Stripping from Configurables import ProcStatusCheck filterBadEvents = ProcStatusCheck() sc = StrippingConf( Streams = [ MyStream ], MaxCandidates = 2000, AcceptBadEvents = False, BadEventSelection = filterBadEvents ) from Configurables import StrippingReport sr = StrippingReport(Selections = sc.selections()); MySequencer = GaudiSequencer('Sequence') MySequencer.Members = [sc.sequence(),sr] MySequencer.IgnoreFilterPassed = True DaVinci().appendToMainSequence([killer, MySequencer]) from Configurables import FilterInTrees rho_list = FilterInTrees( 'rho_list', Code = "'rho(770)0'==ABSID") rho_Sel = Selection ( "rho_Sel" , Algorithm =rho_list , RequiredSelections = [ AutomaticData(Location = location) ] ) rho_Seq = SelectionSequence("rho_Seq", TopSelection = rho_Sel)
def delphesForGauss(): from Configurables import (GaudiSequencer, SimInit, DelphesAlg, ApplicationMgr, DelphesHist, DelphesTuple, DelphesProto, DelphesCaloProto, BooleInit, PGPrimaryVertex) from Configurables import (DelphesRecoSummary, DelphesParticleId, ChargedProtoCombineDLLsAlg, ChargedProtoParticleAddRichInfo, ChargedProtoParticleAddMuonInfo) DelphesPID = DelphesParticleId( "DelphesParticleId", RichGan={ ###### New Yandex models 'mu+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/FastFastRICH_Cramer_muon_tfScaler/", 'mu-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/FastFastRICH_Cramer_muon_tfScaler/", 'p+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/FastFastRICH_Cramer_proton_tfScaler/", 'p~-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/FastFastRICH_Cramer_proton_tfScaler/", 'K+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/FastFastRICH_Cramer_kaon_tfScaler/", 'K-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/FastFastRICH_Cramer_kaon_tfScaler/", 'pi+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/FastFastRICH_Cramer_pion_tfScaler/", 'pi-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/FastFastRICH_Cramer_pion_tfScaler/", }, RichGanInput='x', RichGanOutput='QuantileTransformerTF_3/stack', MuonLLGan={ 'mu+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/diamondGan-2019-04-2401_43_08.499323/", 'mu-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/diamondGan-2019-04-2401_43_08.499323/", 'pi+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/MuGanPion-2019-04-2913_16_04.022940/", 'pi-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/MuGanPion-2019-04-2913_16_04.022940/", 'K+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/MuGanKaon-2019-04-2913_16_43.860800/", 'K-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/MuGanKaon-2019-04-2913_16_43.860800/", 'p+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/MuGanProton-2019-04-3000_01_07.504122/", 'p~-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/MuGanProton-2019-04-3000_01_07.504122/", }, MuonLLGanInput='X_gen', MuonLLGanOutput='output', isMuonMlp={ 'mu+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/MuonIsMuon2019-05-03__11_11_19.978969/", 'mu-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/MuonIsMuon2019-05-03__11_11_19.978969/", 'pi+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/PionIsMuon2019-05-03__11_12_17.633997/", 'pi-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/PionIsMuon2019-05-03__11_12_17.633997/", 'K+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/KaonIsMuon2019-05-03__11_11_58.436823/", 'K-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/KaonIsMuon2019-05-03__11_11_58.436823/", 'p+': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/ProtonIsMuon2019-05-03__11_09_52.500469/", 'p~-': "/eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/ProtonIsMuon2019-05-03__11_09_52.500469/", }, isMuonMlpInput='inputds', isMuonMlpOutput='strided_slice', #OutputLevel = VERBOSE, ) DelphesRecStat = DelphesRecoSummary( 'DelphesRecoSummary', nTracksHistogramFile= "root://eosuser.cern.ch//eos/lhcb/user/l/landerli/FastSimulationModels/v20190503/KS0_nTracks_Brunel.root", nTracksHistogramName="dataHist", #OutputLevel = VERBOSE, ) #make the default card from LbDelphes.LbDelphesCardTemplates import DelphesCard c = DelphesCard.DelphesCard(name="delphes_card", year='2012', magPolarity='up', efficiencies=True, resolutions=True) # #configure the card here for expert users # #c.modules['ECAL'].custom_xy_binning(...) c.FinalizeCard() #PGPrimaryVertex().OutputLevel=DEBUG PGPrimaryVertex().OutputVerticesName = '/Event/Rec/Vertex/Primary' PGPrimaryVertex().InputVerticesName = '/Event/MCFast/MCVertices' DelphesAlg().LHCbDelphesCardLocation = '$PWD/' + c.name + '.tcl' DelphesAlg().DelphesTrackLocation = 'FinalTrackMerger/tracks' #DelphesAlg().OutputLevel=VERBOSE DelphesCaloProto().LHCbDelphesCardLocation = DelphesAlg( ).LHCbDelphesCardLocation DelphesProto().TrackLUT = "$LBDELPHESROOT/LookUpTables/lutTrack.dat" DelphesProto( ).TrackCovarianceLUT = "$LBDELPHESROOT/LookUpTables/lutCovarianceProf.dat" #DelphesProto().OutputLevel=VERBOSE #DelphesCaloProto().OutputLevel=DEBUG #DelphesTuple().OutputLevel=DEBUG delphesSeq = GaudiSequencer("DelphesSeq") delphesSeq.Members = [SimInit("InitDelphes")] delphesSeq.Members += [DelphesAlg("DelphesAlg")] delphesSeq.Members += [DelphesHist("DelphesHist")] delphesSeq.Members += [DelphesProto("DelphesProto")] #delphesSeq.Members += [ DelphesRecStat ] #delphesSeq.Members += [ DelphesPID ] delphesSeq.Members += [ ChargedProtoParticleAddRichInfo('ChargedProtoParticleAddRichInfo') ] delphesSeq.Members += [ ChargedProtoParticleAddMuonInfo('ChargedProtoParticleAddMuonInfo') ] delphesSeq.Members += [ ChargedProtoCombineDLLsAlg('ChargedProtoCombineDLLsAlg') ] delphesSeq.Members += [DelphesCaloProto("DelphesCaloProto")] delphesSeq.Members += [DelphesTuple("DelphesTuple")] delphesSeq.Members += [BooleInit("BooleInit", ModifyOdin=True)] delphesSeq.Members += [PGPrimaryVertex("PGPrimaryVertex")] delphesSeq.Members += [GaudiSequencer("DelphesMonitor")] ApplicationMgr().TopAlg += [delphesSeq]
lb_combine = lb_combine_template.clone(lb_combine_name) lb_combine.DecayDescriptor = lb_decay lb_combine.DaughtersCuts = lb_daughters lb_combine.MotherCut = lb_mother # Output of combining Lc daughters is input to combining Lb daughters lb_combine.Inputs = [ "Phys/StdAllNoPIDsMuons", "Phys/{0}".format(lc_combine_name) ] # Clone the DecayTreeTuple created above cheat_tuple = tuple.clone("Tuple{0}".format(lb_combine_name)) cheat_tuple.Inputs = ["Phys/{0}".format(lb_combine_name)] # Sequences for ntuple creation dec_sequence = GaudiSequencer("SeqBook{0}".format(line)) dec_sequence.Members = [filter, tuple] mc_sequence = GaudiSequencer("SeqMCGenBook{0}".format(line)) mc_sequence.Members = [mc_tuple] cheat_sequence = GaudiSequencer("SeqMCCheatBook{0}".format(line)) cheat_sequence.Members = [lc_combine, lb_combine, cheat_tuple] # Sequence for ntuple sequences tuples_sequence = GaudiSequencer("SeqTuples{0}".format(line)) tuples_sequence.IgnoreFilterPassed = True tuples_sequence.Members = [dec_sequence, mc_sequence, cheat_sequence] # Sequence for MC filter then ntuple sequences master_sequence = GaudiSequencer("SeqMaster{0}".format(line)) master_sequence.Members = [mc_filter, tuples_sequence] DaVinci().UserAlgorithms.append(master_sequence)
jseq = GaudiSequencer("RawEventSplitSeq") juggler = RawEventJuggler("rdstJuggler") juggler.Sequencer = jseq juggler.Input = 0.3 # 2015 Online (Moore) format juggler.Output = 4.2 # Reco15 format #help(juggler) #from Configurables import DaVinci #DaVinci().prependToMainSequence( [jseq] ) # filter out events triggered exclusively by CEP lines from Configurables import LoKi__HDRFilter as HDRFilter from DAQSys.Decoders import DecoderDB Hlt2DecReportsDecoder = DecoderDB[ "HltDecReportsDecoder/Hlt2DecReportsDecoder"].setup() HLTFilter2 = HDRFilter( "LoKiHLT2Filter", Code= "HLT_PASS_RE('Hlt2(?!Forward)(?!DebugEvent)(?!Lumi)(?!Transparent)(?!PassThrough)(?!LowMult).*Decision')", Location=Hlt2DecReportsDecoder.OutputHltDecReportsLocation) otherseq = GaudiSequencer("filters") otherseq.Members = [jseq, HLTFilter2] DaVinci().EventPreFilters = [jseq, HLTFilter2] #DaVinci().prependToMainSequence(HLTFilter2) # input file importOptions("$STRIPPINGSELECTIONSROOT/tests/data/Reco15a_Run164668.py")