class DsoDb (_Dso.PyDsoDb): """ The repository of 'rootmap' files (location, content,...) and a set of operations one can apply on them (load dict, query dicts,...) """ def __init__(self): super (DsoDb, self).__init__('AthenaDsoDb') import AthenaCommon.Logging self.msg = AthenaCommon.Logging.logging.getLogger("AthenaDsoDb") #self.msg.setLevel(AthenaCommon.Logging.logging.VERBOSE) # inject the known aliases NOW for k,v in _aliases.iteritems(): try: self.db[k] = self.db[v] except KeyError,err: self.msg.info("could not install alias [%s] -> [%s]", k,v) # make sure we'll be able to load dicts import PyCintex PyCintex.Cintex.Enable() # load reflex self._load_dict = PyCintex.loadDict self._load_dict('ReflexRflx') self._rflx = PyCintex.makeNamespace('Reflex') if not self._rflx: self._rflx = PyCintex.makeNamespace('ROOT::Reflex') self._rflx_type = self._rflx.Type.ByName self._gbl = PyCintex.makeNamespace('') return
def muon_cuts(self, cg): import PyCintex PyCintex.loadDictionary("muonEventDict") from ROOT import MuonParameters if self.muon_algo == "Muid": good_author = lambda mu : mu.isAuthor(MuonParameters.MuidCo) else: good_author = lambda mu : mu.isAuthor(MuonParameters.STACO) and mu.isCombinedMuon() == 1 cg(Cut("author", good_author)) cg(Cut("pt", lambda mu : mu.pt() > 20*GeV, dep=good_author)) cg(Cut("msidmatch", lambda mu : abs(mu.inDetTrackParticle().pt() - mu.muonExtrapolatedTrackParticle().pt()) < 15*GeV, dep=good_author)) cg(Cut("mspt", lambda mu : mu.muonExtrapolatedTrackParticle().pt() > 10*GeV, dep=good_author)) cg(Cut("eta", lambda mu : abs(mu.eta()) < 2.4)) def vx_id_err(mu): vxp = self.vertices[0].recVertex().position() pavV0 = self.tool_ttv.perigeeAtVertex(mu.inDetTrackParticle(), vxp) return pavV0.parameters()[0]/pavV0.localErrorMatrix().error(0) cg(Cut("vx", lambda mu : abs(vx_id_err(mu)) < 10, dep=lambda mu : good_author(mu) and len(self.vertices) > 0)) cg(Cut("ptcone20", lambda mu : mu.parameter(MuonParameters.ptcone20)/mu.pt() < 0.1)) return cg
def test19ObjectIdentity(self) : c1 = self.A.B.C.Calling() c2 = self.A.B.C.Calling() # PyROOT objects have no exposed object _theObject (b/c of performance) # AddressOf() yields a ptr-to-ptr ('**'), hence deref[0] gives address as long #self.failUnless(c1.retByPointer()._theObject == c1.retByReference()._theObject) #self.failUnless(c1.retByPointer()._theObject != c2.retByPointer()._theObject) self.failUnless(PyCintex.addressOf(c1.retByPointer()) == PyCintex.addressOf(c1.retByReference())) self.failUnless(PyCintex.addressOf(c1.retByPointer())!= PyCintex.addressOf(c2.retByPointer()))
def loadLibs(): "Setup for both ROOTCORE and athena" import ROOT if 'AtlasVersion' in os.environ: # athena import PyCintex PyCintex.loadDictionary('egammaMVACalib') else: # ROOTCORE / AnalysisRelease if ROOT.gROOT.GetVersion()[0] == '5': # only for ROOT 5, not 6 tmp = ROOT.gROOT.ProcessLine('gSystem->Load("libCint")') tmp = ROOT.gROOT.ProcessLine(".x $ROOTCOREDIR/scripts/load_packages.C");
def initialize(self): self.msg.info( "Initializing %s", self.name() ) ## storegate self.sg = PyAthena.StoreGate.pointer("StoreGateSvc") # or: PyAthena.py_svc("StoreGateSvc",createIf=True) ## Datavector stuff import PyCintex PyCintex.loadDict("libAthExThinningEventDict") from RootUtils import PyROOTFixes return StatusCode.Success
def test19ObjectIdentity(self): c1 = self.A.B.C.Calling() c2 = self.A.B.C.Calling() # PyROOT objects have no exposed object _theObject (b/c of performance) # AddressOf() yields a ptr-to-ptr ('**'), hence deref[0] gives address as long #self.failUnless(c1.retByPointer()._theObject == c1.retByReference()._theObject) #self.failUnless(c1.retByPointer()._theObject != c2.retByPointer()._theObject) self.failUnless( PyCintex.addressOf(c1.retByPointer()) == PyCintex.addressOf( c1.retByReference())) self.failUnless( PyCintex.addressOf(c1.retByPointer()) != PyCintex.addressOf( c2.retByPointer()))
def __init__(self): # import cintex import PyCintex; PyCintex.Cintex.Enable() # import root import PyUtils.RootUtils as ru ROOT = ru.import_root() self._cxx = ROOT.Ath.DsoDb.instance() # load reflex _load_dict = PyCintex.loadDict _load_dict('ReflexRflx') self._rflx = PyCintex.makeNamespace('Reflex') if not self._rflx: self._rflx = PyCintex.makeNamespace('ROOT::Reflex') return
def __init__(self): # import cintex import PyCintex PyCintex.Cintex.Enable() # import root import PyUtils.RootUtils as ru ROOT = ru.import_root() self._cxx = ROOT.Ath.DsoDb.instance() # load reflex _load_dict = PyCintex.loadDict _load_dict('ReflexRflx') self._rflx = PyCintex.makeNamespace('Reflex') if not self._rflx: self._rflx = PyCintex.makeNamespace('ROOT::Reflex') return
def gen_typeregistry_dso(oname=_dflt_typereg_fname): '''inspect all the accessible reflex types and get their rootmap-naming. also associate the clid if available. ''' import CLIDComps.clidGenerator as _c cliddb = _c.clidGenerator(db=None) del _c import PyUtils.path as _p oname = _p.path(oname) del _p import PyUtils.Logging as _L msg = _L.logging.getLogger('typereg-dso') #msg.setLevel(_L.logging.INFO) msg.setLevel(_L.logging.VERBOSE) del _L msg.info("installing registry in [%s]...", oname) # FIXME: should use the Cxx one... #reg = DsoDb() reg = PyDsoDb() cls_names = reg.db.keys() msg.debug("::: loading reflex") import PyCintex PyCintex.Cintex.Enable() PyCintex.loadDict('libReflexRflx.so') rflx = PyCintex.makeNamespace('Reflex') if not rflx: rflx = PyCintex.makeNamespace('ROOT::Reflex') rflx = rflx.Type assert (rflx) import PyCintex _load_lib = PyCintex.loadDict def _load_dict(libname, retry=10): msg.debug("::: loading [%s]...", libname) try: return _load_lib(libname) except ( Exception, SystemError, ), err: msg.warning("**error** %s", err) return
def __init__(self, poolfile, **args): """An AOD file pointer (not a string) must be given as argument""" self.__dict__.__setitem__("_TriggerConfigARA__curConf", _TrigConfHolder()) self.__currentIOV = {} self.verbose = False if 'verbose' in args: self.verbose = args['verbose'] plcClass = PyCintex.makeClass("IOVPayloadContainer") plcClass.iter = _iter tiClass = PyCintex.makeClass("TrigConf::TriggerItem") tiClass.__str__ = lambda x: "%s (ctpid %i), ps %g" % (x.name(), x.ctpId(), x.ps) chClass = PyCintex.makeClass("TrigConf::HLTChain") chClass.__str__ = _chPrint ROOT.IOVTime.__str__ = lambda x: "%i/%i" % (x.run(),x.event()) ROOT.IOVTime.__lt__ = lambda s,o: s.run()<o.run() or s.run()==o.run() and s.event()<o.event() ROOT.IOVTime.__eq__ = lambda s,o: s.run()==o.run() and s.event()==o.event() ROOT.IOVTime.__le__ = lambda s,o: s<o or s==o ROOT.IOVRange.__str__ = lambda x: "%s - %s" % (x.start(),x.stop()) ROOT.IOVRange.isInRange = lambda x,e: x.start()<=e and e<x.stop() print "Opening file(s) and creating transient metadata ..." if type(poolfile) == list: self._collection = ROOT.AthenaROOTAccess.TChainROOTAccess('MetaData') for file in poolfile: self._collection.Add(file) self.mdt = AthenaROOTAccess.transientTree.makeTree(self._collection, persTreeName = 'MetaData', dhTreeName = 'MetaDataHdr', dhfilt = _elemFilter ) else: treeNames = [k.GetName() for k in poolfile.GetListOfKeys()] if "MetaDataHdrDataHeader" in treeNames: dhTreeName = 'MetaDataHdrDataHeader' # old style else: dhTreeName = 'MetaDataHdr' self.mdt = AthenaROOTAccess.transientTree.makeTree(poolfile, persTreeName = 'MetaData', dhTreeName = dhTreeName, dhfilt = _elemFilter ) for key in self.__class__.__keysInUpdateOrder: if not hasattr(self.mdt,self.__class__.__folderName[key]): print "No key %s in file %s, use checkFile.py to verify" % (self.__class__.__folderName[key],poolfile) sys.exit(0)
def electron_cuts(self, cg): # container selection electrons - all standalone # Electron Filters: import PyCintex PyCintex.loadDictionary('egammaEnumsDict') gROOT.ProcessLine(".L checkOQ.C+") from ROOT import egammaParameters, egammaOQ egOQ = egammaOQ() egOQ.initialize() from robustIsEMDefs import elRobusterTight cg(Cut("author", lambda el : el.author() in (1,3))) def author(el): return el.author() in (1,3) def pass_otx(el): rn = 999999 if self.is_mc else self.run_number eta, phi = el.cluster().eta(), el.cluster().phi() return egOQ.checkOQClusterElectron(rn, eta, phi) != 3 cg(Cut("otx", pass_otx, dep=author)) def cluster_pt(el): return el.cluster().e()*sin(theta_from_eta(el.cluster().eta())) cg(Cut("pt", lambda el : cluster_pt(el) > 20*GeV, dep=author)) def eta_in_range(eta): return abs(eta) < 2.47 and not (1.37 <= abs(eta) <= 1.52) cg(Cut("eta", lambda el: eta_in_range(el.cluster().eta()), dep=author)) cg(Cut("robusterTight", lambda el : elRobusterTight(el), dep=author)) def vertex0_d0_sig(el): assert self.vertices assert self.vertices[0] assert self.vertices[0].recVertex() assert self.vertices[0].recVertex().position() assert el.trackParticle() vxp = self.vertices[0].recVertex().position() pavV0 = self.tool_ttv.perigeeAtVertex(el.trackParticle(), vxp) return pavV0.parameters()[0]/pavV0.localErrorMatrix().error(0) cg(Cut("vertex_d0", lambda el : abs(vertex0_d0_sig(el)) < 10, dep=lambda el : author(el) and self.vertices)) cg(Cut("etiso30", lambda el : el.detailValue(egammaParameters.etcone30) < 6*GeV)) return cg
def BadChan_GetCoolChannelNameFromHWIdentifier(self, sHWid): """ Get channel name from HW identifier (only for text menu purpose)""" self.class_LArBadChannelState = PyCintex.makeClass( 'LArBadChannelState') inst_larBadChannelState = self.class_LArBadChannelState() sChannelName = "" if self.onlineID.isEMBchannel(sHWid): if self.onlineID.pos_neg(sHWid) == 1: iEnumChannel = inst_larBadChannelState.EMBA else: iEnumChannel = inst_larBadChannelState.EMBC if self.onlineID.isEMECchannel(sHWid): if self.onlineID.pos_neg(sHWid) == 1: iEnumChannel = inst_larBadChannelState.EMECA else: iEnumChannel = inst_larBadChannelState.EMECC if self.onlineID.isHECchannel(sHWid): if self.onlineID.pos_neg(sHWid) == 1: iEnumChannel = inst_larBadChannelState.HECA else: iEnumChannel = inst_larBadChannelState.HECC if self.onlineID.isFCALchannel(sHWid): if self.onlineID.pos_neg(sHWid) == 1: iEnumChannel = inst_larBadChannelState.FCALA else: iEnumChannel = inst_larBadChannelState.FCALC sChannelName = inst_larBadChannelState.coolChannelName( iEnumChannel) + " (" + str(iEnumChannel) + ")" return sChannelName
def initialize(self): self.msg.info( "Initializing %s", self.name() ) ## storegate self.sg = PyAthena.StoreGate.pointer("StoreGateSvc") # or: PyAthena.py_svc("StoreGateSvc",createIf=True) ## Datavector stuff import PyCintex PyCintex.loadDict("libAthExThinningEventDict") from RootUtils import PyROOTFixes ## thinningsvc self.thinSvc = PyAthena.py_svc("ThinningSvc", createIf=True, iface='IThinningSvc') self.msg.info( "==> thinning svc...: %s", self.thinSvc.name() ) self.filter = self.Filter return StatusCode.Success
def getMultiCondContainer(self, channels, attrListColl, iovs) : # Args: # channels - list of channels from COOL # attrListColl - collection of AttributeLists with string address from COOL for each channel # iovs - list of IOVs for each channel # cont - the LArConditionsContainer object # Print out channels and IOV print "Channels, IOV, and string addresses" i = 0 for chan in channels : iov = iovs[i] print "added chan/iov: ", chan, iov.iovPrint() i += 1 # print out collection #attrListColl.dump() # Must set dummy string address to include type name for # AthenaPoolCnvSvc statistics gathering strAddress = self.header # Get IOpaqueAddress print "Create IOpaqueAddress pointer" ioa = PyLCGDict.libPyROOT.MakeNullPointer('GenericAddress') # Get DataBucketBase pointer print "Create DataBucketBase pointer" dbb = PyLCGDict.libPyROOT.MakeNullPointer('DataBucketBase') # Create IOA from string address print "Create IOpaqueAddress for address list" sc = self.iaddr.createAddress( 0, 0, strAddress, ioa ) print "Status code: ", sc # Create CondAttrListCollAddress and add in attribute list CondAttrListCollAddress = PyLCGDict.makeClass('CondAttrListCollAddress') collAddr = CondAttrListCollAddress(ioa) collAddr.setAttrListColl(attrListColl) # Read in object as DataObject (or DatabucketBase) print "Retrieve data object for IOA" sc = self.icnv.createObj(collAddr, dbb) print "Status code: ", sc if sc.isFailure(): raise RuntimeError("Cannot read object") # Cast to correct type and return it print "cast data object to correct type: ",self.typeName result = PyLCGDict.libPyROOT.MakeNullPointer(self.type) self.dbCast.castObject(self.typeName, dbb, result) # Reset iterator to allow a new type - bug in pyroot #self.pyroot_typedef_bug_workaround() return result
def gen_typeregistry_dso(oname=_dflt_typereg_fname): '''inspect all the accessible reflex types and get their rootmap-naming. also associate the clid if available. ''' import CLIDComps.clidGenerator as _c cliddb = _c.clidGenerator(db=None) del _c import PyUtils.path as _p oname = _p.path(oname) del _p import PyUtils.Logging as _L msg = _L.logging.getLogger('typereg-dso') #msg.setLevel(_L.logging.INFO) msg.setLevel(_L.logging.VERBOSE) del _L msg.info("installing registry in [%s]...", oname) # FIXME: should use the Cxx one... #reg = DsoDb() reg = PyDsoDb() cls_names = reg.db.keys() msg.debug("::: loading reflex") import PyCintex PyCintex.Cintex.Enable() PyCintex.loadDict('libReflexRflx.so') rflx = PyCintex.makeNamespace('Reflex') if not rflx: rflx = PyCintex.makeNamespace('ROOT::Reflex') rflx = rflx.Type assert(rflx) import PyCintex _load_lib = PyCintex.loadDict def _load_dict(libname,retry=10): msg.debug("::: loading [%s]...", libname) try: return _load_lib(libname) except (Exception,SystemError,), err: msg.warning("**error** %s", err) return
def _loadCintexDict(): """Simple wrapper around the loading of dictionary class for the Chain class. """ import PyCintex PyCintex.Cintex.Enable() ## klass = PyCintex.makeClass("HLT::Chain") return klass
def __initialize(self): PyCintex.Cintex.Enable() # global name space self.gbl = gbl = PyCintex.Namespace('') # load reflex _load_dict = PyCintex.loadDict _load_dict('ReflexRflx') # Create the Reflex::Type class print "...creating Reflex::Type class..." _rflx = PyCintex.makeNamespace('Reflex') if not _rflx: _rflx = PyCintex.makeNamespace('ROOT::Reflex') _rflx_type = _rflx.Type.ByName self.rflxType = _rflx.Type return
def __initialize(self): PyCintex.Cintex.Enable() # global name space self.gbl = gbl = PyCintex.Namespace('') # load reflex _load_dict = PyCintex.loadDict _load_dict ('ReflexRflx') # Create the Reflex::Type class print "...creating Reflex::Type class..." _rflx = PyCintex.makeNamespace ('Reflex') if not _rflx: _rflx = PyCintex.makeNamespace ('ROOT::Reflex') _rflx_type = _rflx.Type.ByName self.rflxType = _rflx.Type return
def setDescription(self, descr) : self.descr = descr # extract type name and type decoder = DescriptionDecoder(descr) self.typeName = decoder.extract('<typeName>','</typeName>') self.type = PyLCGDict.makeClass(self.typeName) # extract the address header self.header = decoder.extract('<addrHeader>','</addrHeader>') self.header += 'POOLContainer_CondAttrListCollection][CLID=x' print "Type name, type, header ",self.typeName,self.type,self.header
def __init__(self, theApp) : # PersistencySvc pers = theApp.service('EventPersistencySvc') # PersistencySvc via its IConverter interface iConverter = InterfaceRDS('IConverter') self.icnv = iConverter.cast(pers) # PersistencySvc via its IAddressCreator interface iAddrCreator = InterfaceRDS('IAddressCreator') self.iaddr = iAddrCreator.cast(pers) # Create instance of cast object self.dbCast = PyLCGDict.makeClass('DataBucketCast')
def __init__(self, t): # t is either the class name or the class itself if type(t) is str: self.typeName = t t = PyLCGDict.makeClass(t) else: self.typeName = t.type.__name__ # Save type self.type = t # Add on iterators for the different containers self.type.conditionsIter = conditionsIter self.type.correctionsIter = correctionsIter self.type.coolChannelIter = coolChannelIter self.type.coolIOVIter = coolIOVIter
__version__ = "$Revision: 1.8 $" __author__ = "Sebastien Binet <*****@*****.**>" __all__ = [ 'CpuHdr', 'IoHdr', 'MemHdr', 'PersHdr', 'CpuData', 'IoData', 'MemData', #'PersData', ] import ROOT import PyCintex PyCintex.Cintex.Enable() PyCintex.loadDictionary('libPerfMonEventDict') PerfMon = PyCintex.makeNamespace('PerfMon') CpuHdr = PerfMon.CpuHdr IoHdr = PerfMon.IoHdr MemHdr = PerfMon.MemHdr PersHdr = PerfMon.PersHdr CpuData = PerfMon.CpuData IoData = PerfMon.IoData MemData = PerfMon.MemData #PersData = PerfMon.PersData
def configure(self, joboptions=None, commands=None, dllname=None, factname=None, extra_options=None): if not (self.app is None): self.msg.info('C++ application already configured') return self.app self.msg.info('configuring application...') usr_cfg = AthCfg() self.cfg.seek(0) usr_cfg << self.cfg.read() # reset self.cfg = AthCfg() if commands: self.cfg << commands + '\n' # common configuration self.cfg << """ # basic job configuration include('AthenaCommon/Atlas.UnixStandardJob.py') include.block('AthenaCommon/Atlas.UnixStandardJob.py') if not (not %(run_batch)s and theApp.EventLoop == 'PyAthenaEventLoopMgr'): # make SIG_INT fatal svcMgr.CoreDumpSvc.FatalHandler = -1 """ % { 'run_batch': self.options.run_batch } self.cfg << """ # user level configuration try: include('$HOME/.athenarc') except IncludeError: pass """ # another user level configuration usr_cfg.seek(0) self.cfg << usr_cfg.read() if isinstance(joboptions, (list, tuple)): for jobo_name in joboptions: self.cfg.include(jobo_name) if not self.options.run_batch: self.cfg << """ theApp.EventLoop = 'PyAthenaEventLoopMgr' svcMgr += CfgMgr.PyAthenaEventLoopMgr() """ self.cfg << """ ### logging and messages --------- from AthenaCommon.Logging import * _msg = log _msg.setLevel(getattr(logging, '%(output_level)s')) import AthenaCommon.Constants as Lvl theApp.setOutputLevel(%(output_level)s) theApp.OutputLevel = Lvl.%(output_level)s from AthenaCommon.AppMgr import ServiceMgr as svcMgr svcMgr.MessageSvc.OutputLevel = Lvl.%(output_level)s """ % dict(output_level=self.options.msg_lvl) self.cfg << """ from AthenaCommon.Include import Include, IncludeError, include include.setShowIncludes(%(showincludes)s) if %(showincludes)s: import AthenaCommon.Include as AthCIncMod AthCIncMod.marker=' -#-' # distinguish bootstrap from other jo-code """ % dict(showincludes=self.options.showincludes) cfg_name = self.cfg._jobo.name.replace('.py', '.pkl') self.msg.info('dumping job-configuration into [%s]...', cfg_name) # run configuration in a forked-subprocess... sc = _app_configure(self.cfg, cfg_name, extra_options) if sc: err = 'could not configure application [sc=%d]' % sc self.msg.error(err) raise RuntimeError(err) self.msg.info('configuring application w/ [%s]', cfg_name) import os self.cfg._jobo.close() os.remove(self.cfg._jobo.name) import PyCintex PyCintex.Cintex.Enable() gbl = PyCintex.makeNamespace('') import GaudiPython.Bindings as gaudi # remove the gaudimodule exit handler as to prevent them from clobering import atexit for hdlr in reversed(atexit._exithandlers[:]): module_name = hdlr[0].__module__ if ('GaudiPython' in module_name or 'gaudimodule' in module_name): atexit._exithandlers.remove(hdlr) del hdlr # install our own exit handler (if needed) import sys if hasattr(sys, 'ps1'): # ie: is interactive atexit.register(self.exit) del atexit from . import ResourceLimits ResourceLimits.SetMaxLimits() try: import cPickle as pickle except ImportError: import pickle import PyUtils.dbsqlite as dbs db = dbs.open(cfg_name, 'r') jobo_cfg = db['jobopts'] kw = jobo_cfg['ApplicationMgr'] for k in ('Go', 'Exit', 'AuditInitialize', 'AuditFinalize'): if k in kw: del kw[k] outputlevel = jobo_cfg['ApplicationMgr']['OutputLevel'] self.app = gaudi.AppMgr(outputlevel=outputlevel, selfoptions=kw, dllname=dllname, factname=factname) # open the pycomps folder pycomps = db.get('pycomps', None) # just opening it should do if pycomps: import AthenaPython.Configurables as _C _C.PyComponents.instances = dict((p.name, p) for p in pycomps) #_C.PyComponents.instances = pycomps for p in pycomps: if hasattr(p, 'setup'): if callable(p.setup): p.setup() setattr(self, '_pycomps', pycomps) import AthenaPython.PyAthena as PyAthena josvc = PyAthena.py_svc('JobOptionsSvc', createIf=False, iface='IJobOptionsSvc') assert josvc is not None for client in jobo_cfg: if client == 'ApplicationMgr': continue for n, v in jobo_cfg[client].iteritems(): p = gaudi.StringProperty(n, v) if not josvc.addPropertyToCatalogue(client, p).isSuccess(): self.msg.error('could not add property [%s.%s = %s]', client, n, v) if client in ('MessageSvc', 'JobOptionsSvc'): svc = PyAthena.py_svc(client, iface='IProperty') svc.setProperty(p) db.close() import os if os.path.exists(cfg_name): os.remove(cfg_name) pass #import AthenaCommon.Debugging as dbg #dbg.hookDebugger() return self.app
def execute(self): print "Executing ReconAnaAlg",self.name() # reset self.obsQ = array('d', 192*[0]) self.expQ = array('d', 192*[0]) self.totalObsQ = 0.0 evt = self.evtSvc() # SimEvent Data simhdr = evt['/Event/Sim/SimHeader'] if simhdr == None: roh = evt['/Event/Readout/ReadoutHeader'] ilist = roh.findHeaders(51301) if ilist.size() == 1: simhdr = ilist[0] if simhdr == None: print "No SimHeader in this ReadOut. Skip." return SUCCESS statshdr = simhdr.unobservableStatistics() stats = statshdr.stats() self.qEdep = stats["QEDepInGdLS"].sum() + stats["QEDepInLS"].sum() scintX = stats["xQESumGdLS"].sum() + stats["xQESumLS"].sum() scintY = stats["yQESumGdLS"].sum() + stats["yQESumLS"].sum() scintZ = stats["zQESumGdLS"].sum() + stats["zQESumLS"].sum() if self.qEdep < 0.1: print "Low energy deposit in LS or GdLS. Skip." return SUCCESS scintX = scintX/self.qEdep scintY = scintY/self.qEdep scintZ = scintZ/self.qEdep genX = stats["x_Trk1"].sum() genY = stats["y_Trk1"].sum() genZ = stats["z_Trk1"].sum() # Get underlying DE object de = self.getDet(self.target_de_name) if not de: print 'Failed to get DE', self.target_de_name return FAILURE # Get the AD coordinates of the vertexes Gaudi = PyCintex.makeNamespace('Gaudi') scintGlbPoint = Gaudi.XYZPoint(scintX, scintY, scintZ) scintLclPoint = de.geometry().toLocal(scintGlbPoint) genGlbPoint = Gaudi.XYZPoint(genX, genY, genZ) genLclPoint = de.geometry().toLocal(genGlbPoint) self.scintX = scintLclPoint.x() self.scintY = scintLclPoint.y() self.scintZ = scintLclPoint.z() self.genX = genLclPoint.x() self.genY = genLclPoint.y() self.genZ = genLclPoint.z() self.stats["file0/hists/scintX"].Fill(self.scintX / units.centimeter) self.stats["file0/hists/scintY"].Fill(self.scintY / units.centimeter) self.stats["file0/hists/scintZ"].Fill(self.scintZ / units.centimeter) self.stats["file0/hists/scintE"].Fill(self.qEdep) radialRGen = math.sqrt(self.genX*self.genX+self.genY*self.genY) if radialRGen > 2000. or ROOT.TMath.Abs(self.genZ) > 2000: self.info("Generated vertex is beyond the LS. Skip.") return SUCCESS # CalibReadoutEvent Data croHdr = evt["/Event/CalibReadout/CalibReadoutHeader"] if croHdr == None: self.error("Failed to get current calib readout header") return FAILURE readout = croHdr.calibReadout() if readout == None: self.info("No calibrated readout this cycle") print "scintE: ", self.qEdep, " MeV" return SUCCESS if readout.channelReadout().size() == 0: self.info("no channel Readout") return SUCCESS svcMode = ServiceMode(croHdr.context(), 0) for channel in readout.channelReadout(): #channel = channelPair.second pmtId = channel.pmtSensorId().fullPackedData() #pmtId = self.cableSvc.adPmtSensor(chanId, svcMode) # localId = (pmtId.ring()-1)*24 + (pmtId.column()-1) ring = (pmtId & 0x0000ff00)>>8 column = (pmtId & 0x000000ff) localId = (ring-1)*24 + (column-1) self.obsQ[localId] = channel.maxCharge() self.totalObsQ = self.totalObsQ + channel.maxCharge() self.stats["file0/hists/croPmtPeakAdcPrf"].Fill(localId, channel.maxCharge()) if self.totalObsQ < 50: self.info("Low total p.e number. Skip.") return SUCCESS # RecEvent Data recHdr = evt["/Event/Rec/AdMLRec"] if recHdr == None: self.error("Failed to get current RecHeader") return FAILURE #recResults = recHdr.recTrigger() recTrigger = recHdr.recTrigger() if recTrigger == None: self.info("No recTrigger this cycle") return SUCCESS #for recPair in irange(recResults.begin(), recResults.end()): #recTrigger = recPair.second recName = "ML" histRecXName = "recX_" + recName histRecYName = "recY_" + recName histRecZName = "recZ_" + recName histDeltaXName = "deltaX_" + recName histDeltaYName = "deltaY_" + recName histDeltaZName = "deltaZ_" + recName histResoRName = "resoR_" + recName histDriftRName = "driftR_" + recName histRZName = "RZ_" + recName prfqmuRName = "qmuR_" + recName prfqmuThetaName = "qmuTheta_" + recName prfqmuQobsName = "qmuQobs_" + recName prfqmuPMTName = "qmuPMT_" + recName histErecRrecName = "hist_ErecR_" + recName histErecZrecName = "hist_ErecZ_" + recName histChi2TestName = "Chi2Test_" + recName vtxQualityName = "vtxQuality_" + recName prfErecRrecName = "prf_ErecRrec_" + recName prfErecZrecName = "prf_ErecZrec_" + recName prfQsumRtrueName = "prf_QsumR_" + recName prfQsumZtrueName = "prf_QsumZ_" + recName prfEratioRtrueName = "prf_EratioRtrue_" + recName prfEratioZtrueName = "prf_EratioZtrue_" + recName prfRbiasRtrueName = "prf_Rbias_Rtrue_" + recName prfZbiasZtrueName = "prf_Zbias_Ztrue_" + recName histRbiasRtrueName = "hist_Rbias_Rtrue_" + recName histZbiasZtrueName = "hist_Zbias_Ztrue_" + recName if self.firstEntry: # Make the histograms self.stats["file0/hists/%s"%histRecXName] = TH1F(histRecXName, "recX (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histRecYName] = TH1F(histRecYName, "recY (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histRecZName] = TH1F(histRecZName, "recZ (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histDeltaXName] = TH1F(histDeltaXName, "recX - scintX (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histDeltaYName] = TH1F(histDeltaYName, "recY - scintY (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histDeltaZName] = TH1F(histDeltaZName, "recZ - scintZ (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histResoRName] = TH1F(histResoRName, "|#vec{R}_{rec} - #vec{R}_{scint}| (cm)", 50, 0, 100) self.stats["file0/hists/%s"%histDriftRName] = TH1F(histDriftRName, "|#vec{R}_{rec}| - |#vec{R}_{scint}| (cm)", 200, -100, 100) self.stats["file0/hists/%s"%histRZName] = TH2F(histRZName, "R_{rec}^{radial} v.s Z_{rec} [cm]", 250, 0, 250, 500, -250.0, 250.) self.stats["file0/hists/%s"%histErecRrecName] = TH2F(histErecRrecName, "E_{rec} (a.u) v.s (R_{rec}^{radial})^{2} [m]", 60, 0.0, 6.0, 1000, 0.0, 10.) self.stats["file0/hists/%s"%histErecZrecName] = TH2F(histErecZrecName, "E_{rec} (a.u) v.s Z_{rec} [m]", 50, -2.5, 2.5, 1000, 0.0, 10.) self.stats["file0/hists/%s"%prfErecRrecName] = TProfile(prfErecRrecName, "E_{rec} (a.u) v.s (R_{rec}^{radial})^{2} [m^{2}]", 60, 0.0, 6.0, 0.0, 10., "e") self.stats["file0/hists/%s"%prfErecZrecName] = TProfile(prfErecZrecName, "E_{rec} (a.u) v.s Z_{rec} [m]", 50, -2.5, 2.5, 0.0, 10., "e") self.stats["file0/hists/%s"%prfQsumZtrueName] = TProfile(prfQsumZtrueName, "totalCharge v.s Z_{true} [m]", 50, -2.5, 2.5, 0.0, 500., "e") self.stats["file0/hists/%s"%prfQsumRtrueName] = TProfile(prfQsumRtrueName, "totalCharge v.s (R_{true}^{radial})^{2} [m^{2}]", 60, 0, 6.0, 0.0, 500., "e") self.stats["file0/hists/%s"%prfEratioRtrueName] = TProfile(prfEratioRtrueName, "E_{rec}/E_{true} [a.u] v.s (R_{true}^{radial})^{2} [m]", 60, 0., 6.0, 0.0, 5., "e") self.stats["file0/hists/%s"%prfEratioZtrueName] = TProfile(prfEratioZtrueName, "E_{rec}/E_{true} [a.u] v.s Z_{true} [m]", 50, -2.5, 2.5, 0.0, 5., "e") self.stats["file0/hists/%s"%prfRbiasRtrueName] = TProfile(prfRbiasRtrueName, "R_{bias} [cm] v.s (R_{true}^{radial})^{2} [m]", 60, 0., 6.0, -100., 100., "e") self.stats["file0/hists/%s"%prfZbiasZtrueName] = TProfile(prfZbiasZtrueName, "Z_{bias} [cm] v.s Z_{true}^{2} [m]", 50, -2.5, 2.5, -100., 100., "e") self.stats["file0/hists/%s"%histRbiasRtrueName] = TH2F(histRbiasRtrueName, "R_{bias} [cm] v.s (R_{true}^{radial})^{2} [m]", 60, 0., 6.0, 400, -100., 100.) self.stats["file0/hists/%s"%histZbiasZtrueName] = TH2F(histZbiasZtrueName, "Z_{bias} [cm] v.s Z_{true}^{2} [m]", 50, -2.5, 2.5, 400, -100., 100.) self.stats["file0/hists/%s"%vtxQualityName] = TH1F(vtxQualityName, "vertex quality", 2000, 0, 4000.) recX = recTrigger.position().x() recY = recTrigger.position().y() recZ = recTrigger.position().z() recE = recTrigger.energy() vtxQuality = recTrigger.positionQuality() print "vtxQuality: ", vtxQuality vtxRec = ROOT.TVector3( recX, recY, recZ) vtxScint = ROOT.TVector3(self.scintX, self.scintY, self.scintZ) deltaX = recX - self.scintX deltaY = recY - self.scintY deltaZ = recZ - self.scintZ vtxReso = ROOT.TVector3( deltaX, deltaY, deltaZ) driftR = vtxRec.Mag() - vtxScint.Mag() resoR = vtxReso.Mag() radialR2 = recX*recX + recY*recY radialR = math.sqrt(recX*recX + recY*recY) radialTrueR2 = (self.scintX*self.scintX + self.scintY*self.scintY) radialTrueR = math.sqrt(self.scintX*self.scintX + self.scintY*self.scintY) self.stats["file0/hists/%s"%histRecXName].Fill(recX/units.centimeter) self.stats["file0/hists/%s"%histRecYName].Fill(recY/units.centimeter) self.stats["file0/hists/%s"%histRecZName].Fill(recZ/units.centimeter) self.stats["file0/hists/%s"%histDeltaXName].Fill(deltaX/units.centimeter) self.stats["file0/hists/%s"%histDeltaYName].Fill(deltaY/units.centimeter) self.stats["file0/hists/%s"%histDeltaZName].Fill(deltaZ/units.centimeter) self.stats["file0/hists/%s"%histResoRName].Fill(resoR/units.centimeter) self.stats["file0/hists/%s"%histDriftRName].Fill(driftR/units.centimeter) self.stats["file0/hists/%s"%histRZName].Fill(radialR/units.centimeter, recZ/units.centimeter) self.stats["file0/hists/%s"%histErecRrecName].Fill(radialR2/(units.meter*units.meter), recE) self.stats["file0/hists/%s"%histErecZrecName].Fill(recZ/units.meter, recE) self.stats["file0/hists/%s"%prfErecRrecName].Fill(radialR2/(units.meter*units.meter), recE) self.stats["file0/hists/%s"%prfErecZrecName].Fill(recZ/units.meter, recE) self.stats["file0/hists/%s"%prfQsumRtrueName].Fill(radialTrueR2/(units.meter*units.meter), self.totalObsQ) self.stats["file0/hists/%s"%prfQsumZtrueName].Fill(self.scintZ/units.meter, self.totalObsQ) self.stats["file0/hists/%s"%prfEratioRtrueName].Fill(radialTrueR2/(units.meter*units.meter), recE/self.qEdep) self.stats["file0/hists/%s"%prfEratioZtrueName].Fill(self.scintZ/units.meter, recE/self.qEdep) self.stats["file0/hists/%s"%prfRbiasRtrueName].Fill(radialTrueR2/(units.meter*units.meter), (radialR-radialTrueR)/units.centimeter) self.stats["file0/hists/%s"%prfZbiasZtrueName].Fill(self.scintZ/units.meter, (recZ-self.scintZ)/units.centimeter) self.stats["file0/hists/%s"%histRbiasRtrueName].Fill(radialTrueR2/(units.meter*units.meter), (radialR-radialTrueR)/units.centimeter) self.stats["file0/hists/%s"%histZbiasZtrueName].Fill(self.scintZ/units.meter, (recZ-self.scintZ)/units.centimeter) self.stats["file0/hists/%s"%vtxQualityName].Fill(vtxQuality) if self.firstEntry: self.stats["file0/hists/%s"%prfqmuRName] = TProfile(prfqmuRName, "Q_{obs}/Q_{exp} v.s R [m]", 60, 0.0, 6.0, -1.0, 500.0, "e") self.stats["file0/hists/%s"%prfqmuThetaName]=TProfile(prfqmuThetaName, "Q_{obs}/Q_{exp} (#cos#theta)", 50, 0.0, 1.0, -1.0, 500.0, "e") self.stats["file0/hists/%s"%prfqmuPMTName] = TProfile(prfqmuPMTName, "Q_{obs}/Q_{exp} (PMTLocalId)", 192, 0.0, 192, -1.0, 500.0, "e") self.stats["file0/hists/%s"%prfqmuQobsName]=TProfile(prfqmuQobsName, "Q_{obs}/Q_{exp} (Qobs)", 50, 0.0, 50., -1.0, 500.0, "e") self.stats["file0/hists/%s"%histChi2TestName] = TH1F(histChi2TestName, "#chi^{2} test", 150, 0, 300) self.stats["file0/hists/recPmtExpQPrf"]=TProfile("recPmtExpQPrf", "expected charge profile for each PMT", 192, 0, 192, 0.0, 50, "e") self.vertex = CLHEP.Hep3Vector(recX*units.mm, recY*units.mm, recZ*units.mm) self.siteName = "DayaBay" self.detName = "AD1" self.siteIds = { 'DayaBay' : gbl.Site.kDayaBay, 'LingAo' : gbl.Site.kLingAo, 'Far' : gbl.Site.kFar, } self.detIds = { 'AD1' : gbl.DetectorId.kAD1, 'AD2' : gbl.DetectorId.kAD2, 'AD3' : gbl.DetectorId.kAD3, 'AD4' : gbl.DetectorId.kAD4, } site = self.siteIds[self.siteName] detector = self.detIds[self.detName] qtool = self.tool('IReconHelperTool', 'ExpQCalcTool') expq = qtool.expqcalc(site, detector, self.vertex) for localId in range(0, 192): expq[localId] = expq[localId]*recE*3.14*103*103*9000*0.2/4.0/3.14159625 self.expQ[localId] = expq[localId] self.stats["file0/hists/recPmtExpQPrf"].Fill(localId, self.expQ[localId]) # self.chi2 = 0.0 svcmode = ServiceMode(recHdr.context(), 0) for localId in range(0, 192): ring = localId/24 + 1 column = localId%24 + 1 pmtId = AdPmtSensor(ring, column, recTrigger.detector().site(), recTrigger.detector().detectorId()) chanId = self.cableSvc.feeChannelId(pmtId, svcmode) pmtCalib = self.calibSvc.pmtCalibData(pmtId, svcmode) if pmtCalib == None: self.error("No calib data for pmt local ID:%I" %localId) return FAILURE if pmtCalib.m_status != PmtCalibData.kGood: continue pmtPos = self.pmtSvc.get(pmtId.fullPackedData()).localPosition() pmtNorm = self.pmtSvc.get(pmtId.fullPackedData()).localDirection() distVec = ROOT.TVector3( recX - pmtPos.x(), recY - pmtPos.y(), recZ - pmtPos.z() ) dist = distVec.Mag() costheta =(distVec.x()*pmtNorm.x() + \ distVec.y()*pmtNorm.y() + \ distVec.z()*pmtNorm.z() )/dist dist = dist/1000. self.stats["file0/hists/%s"%prfqmuRName].Fill(dist, self.obsQ[localId]/self.expQ[localId]) self.stats["file0/hists/%s"%prfqmuThetaName].Fill(costheta, self.obsQ[localId]/self.expQ[localId]) self.stats["file0/hists/%s"%prfqmuQobsName].Fill(self.obsQ[localId], self.obsQ[localId]/self.expQ[localId]) self.stats["file0/hists/%s"%prfqmuPMTName].Fill(localId, self.obsQ[localId]/self.expQ[localId]) self.chi2 += math.pow(self.obsQ[localId]-self.expQ[localId], 2) \ / self.expQ[localId] self.stats["file0/hists/%s"%histChi2TestName].Fill(self.chi2) # self.firstEntry = False return SUCCESS
def _pythonize_tfile(): import PyCintex PyCintex.Cintex.Enable() root = import_root() import PyUtils.Helpers as H with H.ShutUp(filters=[ re.compile( 'TClass::TClass:0: RuntimeWarning: no dictionary for.*'), re.compile('Warning in <TEnvRec::ChangeValue>: duplicate entry.*'), ]): PyCintex.loadDict("RootUtilsPyROOTDict") rootutils = getattr(root, "RootUtils") pybytes = getattr(rootutils, "PyBytes") read_root_file = getattr(rootutils, "_pythonize_read_root_file") tell_root_file = getattr(rootutils, "_pythonize_tell_root_file") pass def read(self, size=-1): """read([size]) -> read at most size bytes, returned as a string. If the size argument is negative or omitted, read until EOF is reached. Notice that when in non-blocking mode, less data than what was requested may be returned, even if no size parameter was given. FIXME: probably doesn't follow python file-like conventions... """ SZ = 4096 if size >= 0: #size = _adjust_sz(size) #print "-->0",self.tell(),size c_buf = read_root_file(self, size) if c_buf and c_buf.sz: #print "-->1",self.tell(),c_buf.sz #self.seek(c_buf.sz+self.tell()) #print "-->2",self.tell() buf = c_buf.buffer() buf.SetSize(c_buf.sz) return str(buf[:]) return '' else: size = SZ out = [] while True: #size = _adjust_sz(size) c_buf = read_root_file(self, size) if c_buf and c_buf.sz: buf = c_buf.buffer() buf.SetSize(c_buf.sz) out.append(str(buf[:])) else: break return ''.join(out) root.TFile.read = read del read root.TFile.seek = root.TFile.Seek root.TFile.tell = lambda self: tell_root_file(self) ## import os ## def tell(self): ## fd = os.dup(self.GetFd()) ## return os.fdopen(fd).tell() ## root.TFile.tell = tell ## del tell return
def BadChan_SaveBadChannelCorrectionsToDatabase(self, dbstring, dbSvc, dbFolderName, selectedTag): listKeys = self.dict_vectBadChanEntry.keys() listKeys.sort() # Create LArBadChannel object for defined BadChanEntry vector and coolChan self.class_LArBadChannelState = PyCintex.makeClass( 'LArBadChannelState') inst_larBadChannelState = self.class_LArBadChannelState() # Loop over cool channels bStoreNewCoolChannels = False bNewDBCreated = False for coolChan in listKeys: vect_BadChanEntry = PyCintex.gbl.std.vector( 'std::pair<HWIdentifier,LArBadChannel>')() listHWidKeys = [ x for x in self.dict_vectBadChanEntry[coolChan].keys() ] listHWidKeys.sort() iNbCorrection = 0 for key in listHWidKeys: sHWid = key if self.dict_vectBadChanEntry_Status[coolChan][ sHWid] == STATUS_INIT: badChan_word = self.dict_vectBadChanEntry_Init[coolChan][ sHWid][1] elif self.dict_vectBadChanEntry_Status[coolChan][ sHWid] == STATUS_MODIFIED or self.dict_vectBadChanEntry_Status[ coolChan][sHWid] == STATUS_NEW: iNbCorrection += 1 badChan_word = self.dict_vectBadChanEntry[coolChan][sHWid][ 1] elif self.dict_vectBadChanEntry_Status[coolChan][ sHWid] == STATUS_REMOVED: iNbCorrection += 1 continue obj_HWid = self.class_HWIdentifier() obj_HWid.set(sHWid) larBadChannel = self.class_LArBadChannel(badChan_word) pair_BadChanEntry = PyCintex.gbl.pair( 'HWIdentifier,LArBadChannel')(obj_HWid, larBadChannel) vect_BadChanEntry.push_back(pair_BadChanEntry) # if correction were made => store BadChanEntry vector if iNbCorrection > 0: for sEntry in vect_BadChanEntry: inst_larBadChannelState.add(sEntry, coolChan) bStoreNewCoolChannels = True else: continue # Create object based on new LArBadChannelState (via LArBadChannelDBTools python interface) attrListSpec = PyCintex.gbl.coral.AttributeListSpecification() athenaAttrList = PyCintex.gbl.AthenaAttributeList() attrListSpec = self.nspace_LArBadChannelDBTools.createCoolSpec() athenaAttrList = self.nspace_LArBadChannelDBTools.createPayload( inst_larBadChannelState.coolChannel(coolChan), attrListSpec) # if save DB has not been created => do it if bNewDBCreated == False: import os try: dbSave = dbSvc.createDatabase(dbstring) except Exception, e: print 'Problem opening database', e sys.exit(-1) print "Opened database", dbstring desc = '<timeStamp>run-event</timeStamp><addrHeader><address_header service_type="71" clid="40774348" /></addrHeader><typeName>AthenaAttributeList</typeName>' # Create cool spec from AttributeListSpec coolSpec = cool.RecordSpecification() for iElemt in range(0, attrListSpec.size()): attrSpec = attrListSpec[iElemt] typeName = attrSpec.typeName() if typeName == "unsigned int": coolSpec.extend(attrSpec.name(), cool.StorageType.UInt32) elif typeName == "blob": coolSpec.extend(attrSpec.name(), cool.StorageType.Blob64k) else: print "Undefined cool.StorageType " + typeName # myfolder=dbSave.createFolder(dbFolderName, coolSpec, desc, cool.FolderVersioning.SINGLE_VERSION,True) myfolder = dbSave.createFolder( dbFolderName, coolSpec, desc, cool.FolderVersioning.MULTI_VERSION, True) import string IOVBeginEnd = ["90", "9999999"] beginRun = string.atoi(IOVBeginEnd[0]) << 32 endRun = string.atoi(IOVBeginEnd[1]) << 32 bNewDBCreated = True # Create cool payload from AthenaAttributeList payload coolPayload = cool.Record(coolSpec) for iElemt in range(0, attrListSpec.size()): attrSpec = attrListSpec[iElemt] coolPayload[attrSpec.name()] = athenaAttrList[attrSpec.name()] # Store cool object to folder myfolder.storeObject(beginRun, endRun, coolPayload, coolChan, selectedTag)
##=============================================================================== ## Name: TrigEgammaElectronIsEMCutDefs_medium.py ## ## Author: Ryan Mackenzie White ## Created: June 2014 ## ## Description: Medium trigger electron cut definitions for 2014 new tunes. ## ##=============================================================================== import PyCintex try : PyCintex.loadDictionary('ElectronPhotonSelectorToolsDict') except : pass from ROOT import egammaPID # Import a needed helper from PATCore.HelperUtils import * # Define GeV GeV = 1000.0 def TrigElectronIsEMMediumSelectorConfigDC14(theTool) : ''' This is for the Medium++ isEM definitions for the Trigger. ''' theTool = GetTool(theTool)
import AthenaPython.PyAthena as PyAthena import AthenaCommon.SystemOfUnits as Units from AthenaPython.PyAthena import StatusCode import PyCintex import ROOT import math PyCintex.loadDictionary('egammaEnumsDict') # Needed for egammaParameters from ROOT import egammaParameters from ROOT import egammaPID PyCintex.loadDict("libTrkTrackSummaryDict") PyCintex.loadDict('libegammaAnalysisUtilsDict') from ROOT import TLorentzVector class HSG2_2L2QDPDFilter(PyAthena.AthFilterAlgorithm): def __init__(self, name="HSG2_2L2QDPDFilter",**kw): kw['name'] = name super(HSG2_2L2QDPDFilter, self).__init__(**kw) self.cutDict=dict() # types are e or mu or jet self.cutDict["types"] = kw.get("types", []) # pT cuts for muons and jets, ET cut for electrons self.cutDict["pTCuts"] = kw.get("pTCuts", []) # quality cuts are LoosePP or ORLH for electrons, combined+lowpt for muons, barrel for jets self.cutDict["qualityCuts"] = kw.get("qualityCuts", []) # Container names in AOD self.cutDict["collections"] = kw.get("collections", []) # Di-lepton mass cut self.cutDict["diLeptonMassCut"] = kw.get("diLeptonMassCut", 5.*Units.GeV) # Electron-jet dR cut self.cutDict["electronJetDRCut"] = kw.get("electronJetDRCut", 0.05) # Negative value means no overlap removal
#=== folder tag suffix tagCon = "LARTimeCorrectionOflNonRunCon-00" tagRun = "LARTimeCorrectionOflRunCon-00" #=== values for the comment channel author = "dhu" comment = "Updated time constant values" #================================================== #=== #=== Code starts below here #=== #================================================== #=== set shortcut g = PyCintex.gbl PyCintex.makeClass('std::vector<float>') #=== get a logger log = CaloCondLogger.getLogger("CaloTimeConsWriter") #=== (re-)create the database db = CaloCondTools.openDb('SQLITE', 'COMP200', 'UPDATE') try: #=== creating folder specifications spec = cool.RecordSpecification() spec.extend('CaloCondBlob16M', cool.StorageType.Blob16M) #=== create the folder folderPath = CaloCondTools.getCaloPrefix( ) + "LAR/TimeCorrectionOfl/NonRunCon"
def configure(self, joboptions=None, commands=None, dllname=None, factname=None, extra_options=None): if not (self.app is None): self.msg.info('C++ application already configured') return self.app self.msg.info('configuring application...') usr_cfg = AthCfg() self.cfg.seek(0) usr_cfg << self.cfg.read() # reset self.cfg = AthCfg() if commands: self.cfg << commands+'\n' # common configuration self.cfg << """ # basic job configuration include('AthenaCommon/Atlas.UnixStandardJob.py') include.block('AthenaCommon/Atlas.UnixStandardJob.py') if not (not %(run_batch)s and theApp.EventLoop == 'PyAthenaEventLoopMgr'): # make SIG_INT fatal svcMgr.CoreDumpSvc.FatalHandler = -1 """ % {'run_batch' : self.options.run_batch} self.cfg << """ # user level configuration try: include('$HOME/.athenarc') except IncludeError: pass """ # another user level configuration usr_cfg.seek(0) self.cfg << usr_cfg.read() if isinstance(joboptions, (list,tuple)): for jobo_name in joboptions: self.cfg.include(jobo_name) if not self.options.run_batch: self.cfg << """ theApp.EventLoop = 'PyAthenaEventLoopMgr' svcMgr += CfgMgr.PyAthenaEventLoopMgr() """ self.cfg << """ ### logging and messages --------- from AthenaCommon.Logging import * _msg = log _msg.setLevel(getattr(logging, '%(output_level)s')) import AthenaCommon.Constants as Lvl theApp.setOutputLevel(%(output_level)s) theApp.OutputLevel = Lvl.%(output_level)s from AthenaCommon.AppMgr import ServiceMgr as svcMgr svcMgr.MessageSvc.OutputLevel = Lvl.%(output_level)s """ % dict(output_level=self.options.msg_lvl) self.cfg << """ from AthenaCommon.Include import Include, IncludeError, include include.setShowIncludes(%(showincludes)s) if %(showincludes)s: import AthenaCommon.Include as AthCIncMod AthCIncMod.marker=' -#-' # distinguish bootstrap from other jo-code """ % dict(showincludes=self.options.showincludes) cfg_name = self.cfg._jobo.name.replace('.py','.pkl') self.msg.info('dumping job-configuration into [%s]...', cfg_name) # run configuration in a forked-subprocess... sc = _app_configure(self.cfg, cfg_name, extra_options) if sc: err = 'could not configure application [sc=%d]' % sc self.msg.error(err) raise RuntimeError(err) self.msg.info('configuring application w/ [%s]', cfg_name) import os self.cfg._jobo.close() os.remove(self.cfg._jobo.name) import PyCintex PyCintex.Cintex.Enable() gbl = PyCintex.makeNamespace('') import GaudiPython.Bindings as gaudi # remove the gaudimodule exit handler as to prevent them from clobering import atexit for hdlr in reversed(atexit._exithandlers[:]): module_name = hdlr[0].__module__ if ('GaudiPython' in module_name or 'gaudimodule' in module_name): atexit._exithandlers.remove(hdlr) del hdlr # install our own exit handler (if needed) import sys if hasattr(sys, 'ps1'): # ie: is interactive atexit.register(self.exit) del atexit from . import ResourceLimits ResourceLimits.SetMaxLimits() try: import cPickle as pickle except ImportError: import pickle import PyUtils.dbsqlite as dbs db = dbs.open(cfg_name, 'r') jobo_cfg = db['jobopts'] kw = jobo_cfg['ApplicationMgr'] for k in ('Go', 'Exit', 'AuditInitialize', 'AuditFinalize'): if k in kw: del kw[k] outputlevel = jobo_cfg['ApplicationMgr']['OutputLevel'] self.app = gaudi.AppMgr(outputlevel=outputlevel, selfoptions=kw, dllname=dllname, factname=factname) # open the pycomps folder pycomps = db.get('pycomps', None) # just opening it should do if pycomps: import AthenaPython.Configurables as _C _C.PyComponents.instances = dict((p.name, p) for p in pycomps) #_C.PyComponents.instances = pycomps for p in pycomps: if hasattr(p, 'setup'): if callable(p.setup): p.setup() setattr(self, '_pycomps', pycomps) import AthenaPython.PyAthena as PyAthena josvc = PyAthena.py_svc('JobOptionsSvc', createIf=False, iface='IJobOptionsSvc') assert josvc is not None for client in jobo_cfg: if client == 'ApplicationMgr': continue for n,v in jobo_cfg[client].iteritems(): p = gaudi.StringProperty(n, v) if not josvc.addPropertyToCatalogue(client, p).isSuccess(): self.msg.error( 'could not add property [%s.%s = %s]', client, n, v ) if client in ('MessageSvc', 'JobOptionsSvc'): svc = PyAthena.py_svc(client, iface='IProperty') svc.setProperty(p) db.close() import os if os.path.exists(cfg_name): os.remove(cfg_name) pass #import AthenaCommon.Debugging as dbg #dbg.hookDebugger() return self.app
# ToolSvc += mycaloisolationtool #import MCTruthClassifier.MCTruthClassifierBase #print MCTruthClassifier.MCTruthClassifierBase.MCTruthClassifier #MCTruthClassifier.MCTruthClassifierBase.MCTruthClassifier.TrackParticleContainerName = "GSFTrackParticleCandidate" #MCTruthClassifier.MCTruthClassifierBase.MCTruthClassifier.TrackParticleTruthCollection = "GSFTrackParticleTruthCollection" #include ("RecExCommon/ContainerRemapping.py") print "ServiceMgr after:" print ServiceMgr print "end ServiceMgr after" import PyCintex PyCintex.loadDictionary('ElectronPhotonSelectorTools') from ROOT import egammaPID from ElectronPhotonSelectorTools.ConfiguredAsgElectronIsEMSelectors import ConfiguredAsgElectronIsEMSelector electronSelector = ConfiguredAsgElectronIsEMSelector( "myIsEmSelector", egammaPID.ElectronIDMediumPP, OutputLevel=DEBUG) ToolSvc += electronSelector from ElectronPhotonSelectorTools.ConfiguredAsgPhotonIsEMSelectors import ConfiguredAsgPhotonIsEMSelector photonSelector = ConfiguredAsgPhotonIsEMSelector("myPhotonSelector", egammaPID.PhotonIDTightAR, OutputLevel=DEBUG) ToolSvc += photonSelector # Add top algorithms to be run from simpleStudy.simpleStudyConf import TestAlg
def setUp(self): PyCintex.makeClass('A::B::C::MyClass') # This is needed to force loading the dictionary self.A = PyCintex.makeNamespace('A') self.std = PyCintex.makeNamespace('std') self.gbl = PyCintex.makeNamespace('')
#usage is: #from PathResolver import PathResolver #PathResolver.FindCalibFile("blah") import PyCintex PyCintex.loadDict('libPathResolverDict') FindCalibFile = PyCintex.gbl.PathResolverFindCalibFile FindCalibDirectory = PyCintex.gbl.PathResolverFindCalibDirectory
def setUp(self): PyCintex.makeClass('A::B::C::MyClass' ) # This is needed to force loading the dictionary self.A = PyCintex.makeNamespace('A') self.std = PyCintex.makeNamespace('std') self.gbl = PyCintex.makeNamespace('')
def test25STLIterator(self): vector = PyCintex.makeClass('std::vector<MyA>') self.failUnless( vector ) self.failUnless( PyCintex.makeClass('std::vector<MyA>::iterator') ) self.failUnless( PyCintex.makeClass('std::vector<MyA>::reverse_iterator') )
def initialize_jets(self): import PyCintex PyCintex.loadDictionary("JetUtils") from ROOT import JetCaloHelper, JetCaloQualityUtils self.jet_emf = lambda jet : JetCaloHelper.jetEMFraction(jet) self.jet_hecF = lambda jet : JetCaloQualityUtils.hecF(jet)
from AthenaCommon.Logging import logging mlog = logging.getLogger( 'TrigConfigCheckInPool.py' ) ## get the logger mlog.level=1 MetaDataTree = ROOT.AthenaROOTAccess.TChainROOTAccess('MetaData') def _iter(self) : sequential = self.begin() end = self.end() while sequential != end : yield sequential.__deref__() sequential.__preinc__() raise StopIteration _plcClass = PyCintex.makeClass("IOVPayloadContainer") _plcClass.iter = _iter ROOT.IOVTime.__str__ = lambda x: "%i/%i" % (x.run(),x.event()) ROOT.IOVRange.__str__ = lambda x: "%s - %s" % (x.start(),x.stop()) def checkPoolFileForRunLevel(poolfilename): folderName = { "L1M" : "_TRIGGER_LVL1_Menu", "HLTM" : "_TRIGGER_HLT_Menu", "L1K" : "_TRIGGER_LVL1_Lvl1ConfigKey", "HLTK" : "_TRIGGER_HLT_HltConfigKeys", "L1PS" : "_TRIGGER_LVL1_Prescales" } MetaDataTree.Add(poolfilename+'/MetaData')
def __init__(self, t ) : if type(t) is str : t = PyLCGDict.makeClass(t) # print "type is string: ",t # t = PyLCGDict.makeClass(t) #t = PyLCGDict.makeClass(t) self.type = t
rflx = rflx.Type assert(rflx) import PyCintex _load_lib = PyCintex.loadDict def _load_dict(libname,retry=10): msg.debug("::: loading [%s]...", libname) try: return _load_lib(libname) except (Exception,SystemError,), err: msg.warning("**error** %s", err) return # we need to pre-load these guys as HepPDT is missing a linkopts # against HepPID. see bug #46551 hep_pid = PyCintex.loadDict('libHepPID.so') hep_pdt = PyCintex.loadDict('libHepPDT.so') from PyUtils.Decorators import forking import os dict_libs = reduce(set.union, [set(v) for v in reg.db.values()]) dict_libs = [os.path.basename(l) for l in dict_libs] _veto_libs = [ 'libG4EventGraphicsDict.so', # freaking statics ! ] dict_libs = [l for l in dict_libs if l not in _veto_libs] msg.debug("::: loading dict-libraries...") @forking
def _pythonize_tfile(): import PyCintex; PyCintex.Cintex.Enable() root = import_root() import PyUtils.Helpers as H with H.ShutUp(filters=[ re.compile( 'TClass::TClass:0: RuntimeWarning: no dictionary for.*'), re.compile( 'Warning in <TEnvRec::ChangeValue>: duplicate entry.*' ), ]): PyCintex.loadDict("RootUtilsPyROOTDict") rootutils = getattr(root, "RootUtils") pybytes = getattr(rootutils, "PyBytes") read_root_file = getattr(rootutils, "_pythonize_read_root_file") tell_root_file = getattr(rootutils, "_pythonize_tell_root_file") pass def read(self, size=-1): """read([size]) -> read at most size bytes, returned as a string. If the size argument is negative or omitted, read until EOF is reached. Notice that when in non-blocking mode, less data than what was requested may be returned, even if no size parameter was given. FIXME: probably doesn't follow python file-like conventions... """ SZ = 4096 if size>=0: #size = _adjust_sz(size) #print "-->0",self.tell(),size c_buf = read_root_file(self, size) if c_buf and c_buf.sz: #print "-->1",self.tell(),c_buf.sz #self.seek(c_buf.sz+self.tell()) #print "-->2",self.tell() buf = c_buf.buffer() buf.SetSize(c_buf.sz) return str(buf[:]) return '' else: size = SZ out = [] while True: #size = _adjust_sz(size) c_buf = read_root_file(self, size) if c_buf and c_buf.sz: buf = c_buf.buffer() buf.SetSize(c_buf.sz) out.append(str(buf[:])) else: break return ''.join(out) root.TFile.read = read del read root.TFile.seek = root.TFile.Seek root.TFile.tell = lambda self: tell_root_file(self) ## import os ## def tell(self): ## fd = os.dup(self.GetFd()) ## return os.fdopen(fd).tell() ## root.TFile.tell = tell ## del tell return
""" python -c "import urllib;print urllib.urlopen('http://www.cern.ch/~frankm').read()" """ import sys, time, platform import PyCintex as Dictionary lib_prefix = '' if platform.system()=='Linux': lib_prefix = 'lib' gbl = Dictionary.makeNamespace('') std = gbl.std Dictionary.loadDict(lib_prefix+'LHCbStatusDict') LHCbStatus=gbl.LHCbStatus srv=LHCbStatus.Server() s = srv.info() s.hlt.subfarm(0)='OT' print s.hlt.subfarm(0)
__version__ = '1.0.0' __author__ = 'Joerg Stelzer <*****@*****.**>' __all__ = ['TrigConf', 'l1menuloader', 'hltmenuloader'] import ROOT, PyCintex PyCintex.Cintex.Enable() PyCintex.loadDictionary("libTrigConfL1DataDict") PyCintex.loadDictionary("libTrigConfHLTDataDict") PyCintex.loadDictionary("libTrigConfStorageDict") TrigConf = PyCintex.makeNamespace('TrigConf') # modify a few functions TrigConf.Menu.items = TrigConf.Menu.itemsV TrigConf.HLTFrame.chains = TrigConf.HLTFrame.chainsV TrigConf.HLTFrame.sequences = TrigConf.HLTFrame.sequencesV #from TrigConfOffline.menuloader import * from TrigConfOffline import menuloader hltmenuloader = menuloader.hltmenuloader l1menuloader = menuloader.l1menuloader
e_coll = jobproperties.HSG2.llqqElectronCollection() staco_coll = jobproperties.HSG2.llqqStacoMuonCollection() muons_coll = jobproperties.HSG2.llqqMuonsCollection() calo_coll = jobproperties.HSG2.llqqCaloMuonCollection() jet_coll = jobproperties.HSG2.llqqJetCollection() diLeptonMassCut = jobproperties.HSG2.llqqDiLeptonMassCut() electronJetDRCut = jobproperties.HSG2.llqqElectronJetDRCut() from AthenaCommon.Logging import logging msg = logging.getLogger( "NTUP_2L2QHSG2_Filter" ) # AthElectronLikelihoodTool with PhysicsAnalysis/ElectronPhotonID/ElectronPhotonSelectorTools/data/ElectronLikelihoodPdfs.root # and LikeEnum::Loose by following https://twiki.cern.ch/twiki/bin/viewauth/AtlasProtected/HiggsZZllllSummer2013#More_information import ROOT import PyCintex PyCintex.loadDict('libElectronPhotonSelectorToolsDict') from AthenaCommon.AppMgr import ToolSvc if not hasattr(ToolSvc, "AthElectronLikelihoodTool_VeryLoose"): from ElectronPhotonSelectorTools.ElectronPhotonSelectorToolsConf import AthElectronLikelihoodTool ToolSvc += AthElectronLikelihoodTool( "AthElectronLikelihoodTool_VeryLoose", inputPDFFileName = "ElectronPhotonSelectorTools/ElectronLikelihoodPdfs.root", cutLikelihoodEnum = ROOT.LikeEnum.VeryLoose, useUserData = False , forceRecalculateImpactParameter = True) msg.info("AthElectronLikelihoodTool/AthElectronLikelihoodTool_VeryLoose is added") if not hasattr(ToolSvc, "AthElectronLikelihoodTool_Loose"): from ElectronPhotonSelectorTools.ElectronPhotonSelectorToolsConf import AthElectronLikelihoodTool ToolSvc += AthElectronLikelihoodTool( "AthElectronLikelihoodTool_Loose", inputPDFFileName = "ElectronPhotonSelectorTools/ElectronLikelihoodPdfs.root", cutLikelihoodEnum = ROOT.LikeEnum.Loose, useUserData = False ,
# main jobOption include("RecExCommon/RecExCommon_topOptions.py") from GaudiSvc.GaudiSvcConf import THistSvc ServiceMgr += THistSvc() ServiceMgr.THistSvc.Output += ["file1 DATAFILE='output.root' OPT='RECREATE'"] #topSequence += TrigDecChecker import PyUtils.RootUtils as ru ROOT = ru.import_root() import PyCintex PyCintex.loadDictionary('ElectronPhotonSelectorToolsDict') PyCintex.loadDictionary('egammaEnumsDict') from ROOT import LikeEnum from ROOT import egammaPID from ROOT import egammaParameters #from TrigEgammaHypo.TrigEGammaPIDdefs import SelectionDefElectron from ElectronPhotonSelectorTools.TrigEGammaPIDdefs import SelectionDefElectron from TrigEgammaValidation.TrigEgammaValidationConf import NavZeeTPCounts zee = NavZeeTPCounts() #zee.Chains = ['e24_medium1_iloose','e24_medium_iloose','e28_tight1_L2Star_idperf','e28_tight1_idperf'] #zee.Chains = ['e24_medium1_L1EM20V','e24_medium_L1EM20V','e24_medium1_iloose','e24_medium_iloose','e28_tight1_iloose_L2StarA','e28_tight_iloose_L2Star','e28_tight_iloose','e0_perf_L1EM15VH','e60_medium1'] #zee.IsEMloose = SelectionDefElectron.ElectronLoose1 #zee.IsEMloose1 = SelectionDefElectron.ElectronLoose1
import sys, PyCintex as Dict SCR = Dict.makeNamespace('SCR') gbl = Dict.makeNamespace('') Context = SCR.PasteboardContext Display = SCR.Display Window = SCR.Window Pasteboard = SCR.Pasteboard # # ASCII = 0x00 NORMAL = 0x00 BOLD = 0x01 UNDERLINE = 0x02 INVERSE = 0x04 FLASH = 0x08 ATTRIBUTES = 0x0F GRAPHIC = 0x10 FONT_SUPP = 0x20 FONT_USER = 0x30 MODIFIED = 0x40 ON = 1 OFF = 0 SEQUENTIAL_WINDOW = 0 DETACHED_WINDOW = 1 PULLDOWN_WINDOW = 2 # # # CTRL_A = 0x101 CTRL_D = 0x104 CTRL_X = 0x118
def init(self): self.a4 = OutputStream(open(self.file_name, "w"), "AOD2A4", Event, EventStreamInfo) import PyCintex PyCintex.loadDict("egammaAnalysisTools") self.tool_ciwt = PyAthena.py_tool("CaloIsolationWrapperTool", iface="ICaloIsolationWrapperTool") assert bool(self.tool_ciwt) PyCintex.loadDictionary("TrigMuonEvent") PyCintex.loadDictionary("TrigObjectMatching") self.tmefih = PyCintex.makeClass("TrigMatch::TrigMuonEFInfoHelper") from ROOT import vector PyCintex.loadDictionary("JetUtils") from ROOT import JetCaloHelper, JetCaloQualityUtils, Long, CaloSampling self.jet_emf = lambda jet : JetCaloHelper.jetEMFraction(jet) self.jet_hecF = lambda jet : JetCaloQualityUtils.hecF(jet) ### smax needed for jet cealning #### FIX THIS: don't know either getNumberOfSamplings() or Unknown #### UPDATE: getNumberOfSamplings just returns Unknown! self.jet_smax = Long(CaloSampling.getNumberOfSamplings()) self.jet_fmax = lambda jet : JetCaloQualityUtils.fracSamplingMax(jet, Long(CaloSampling.Unknown)) self.jet_time = lambda jet : JetCaloQualityUtils.jetTimeCells(jet) self.jet_quality_lar = lambda jet : JetCaloQualityUtils.jetQualityLAr(jet) self.jet_quality_hec = lambda jet : JetCaloQualityUtils.jetQualityHEC(jet) self.jet_bad = lambda jet : JetCaloQualityUtils.isBad(jet, False) self.jet_ugly = lambda jet : JetCaloQualityUtils.isUgly(jet, False) PyCintex.loadDictionary("egammaEnumsDict") PyCintex.loadDictionary("muonEventDict") PyCintex.loadDictionary("egammaAnalysisUtils") PyCintex.loadDictionary("MissingETEvent") from ROOT import MuonParameters, egammaParameters, egammaPID from ROOT import ElectronMCChargeCorrector self.MuonParameters = MuonParameters self.egammaParameters = egammaParameters self.egammaPID = egammaPID self.empp_helper = PyCintex.makeClass("isEMPlusPlusHelper")() if self.year == 2010: gROOT.ProcessLine(".L checkOQ.C++") from ROOT import egammaOQ self.egOQ = egammaOQ() self.egOQ.initialize() self.tool_ttv = PyAthena.py_tool("Reco::TrackToVertex", iface="Reco::ITrackToVertex") self.tool_tdt = PyAthena.py_tool('Trig::TrigDecisionTool/TrigDecisionTool') self.tool_tmt = PyAthena.py_tool("TrigMatchTool/TrigMatchTool") self.tool_hfor= PyAthena.py_tool("HforTool",iface="IHforTool") self.tool_timing = PyAthena.py_tool("Rec::MuonCombinedTimingTool/MuonCombinedTimingTool", iface="Rec::IMuonCombinedTimingTool") PyCintex.loadDictionary("TrkSpaceTimePoint")
import sys, platform, PyCintex as Dict if platform.system() == 'Linux': Dict.loadDictionary('libOnlineKernelDict.so') Dict.loadDictionary('libUPIDict.so') else: Dict.loadDictionary('OnlineKernelDict.dll') Dict.loadDictionary('UPIDict.dll') CPP = Dict.makeNamespace('CPP') gbl = Dict.makeNamespace('') #import os #l=os.popen("cat /proc/"+str(os.getpid())+"/maps | grep UPIR").readlines() #ll=os.popen('nm -C -D '+l[0].split(' ')[-1]) #for i in ll: # if i.find('upic_refresh')>=0: print i[:-1] EventType = 2 # graphics attributes NORMAL = 0x00 BOLD = 0x01 UNDERLINE = 0x02 INVERSE = 0x04 FLASH = 0x08 ATTRIBUTES = 0x0F ASCII = 0x00 GRAPHIC = 0x10 FONT_SUPP = 0x20 FONT_USER = 0x30 FONTS = 0x30
from PrimaryDPDMaker.PrimaryDPDFlags_EGammaStream import primEGammaDPD from PrimaryDPDMaker.ElectronFilter import ElectronFilter # pre-pend an external electron filter # filter just for robust loose, leave all the rest blank, # to be checked by ourselves later # ---- Load the egammaPID and egammaParameters information # This is needed to always be up-to-date with the egamma # IsEM selections and also the author selections import PyCintex PyCintex.loadDictionary('egammaEnumsDict') from ROOT import egammaPID from ROOT import egammaParameters # Import the needed algorithms and tools from AnalysisUtils.AnalysisUtilsConf import ElectronIDSelector from D2PDMaker.D2PDMakerConf import D2PDElectronSelector ToolSvc += ElectronIDSelector( "RobustLooseElectronFilterSelector", isEM = "RobustLoose" ) theJob += D2PDElectronSelector( "RobustLooseElectronFilter", OutputLevel = INFO, inputCollection = 'ElectronAODCollection', outputCollection = 'RobustLooseElectronCollection',
# mcROOTFileName = localMCFile, # mcROOTHistName = "mu_mc10b" # ) # add the fudge factors #include ( "gmsbFudgeFactors/gmsbFudgeFactors.py" ) #topSequence += theGmsbFudgeFactors #theGmsbFudgeFactors.WhichFudgeFactors = 200 # add the selection include("gmsbTools/gmsbTools_jobOptions.py") include("SUSYPhotonJetCleaningTool/SUSYPhotonJetCleaningTool_jobOptions.py") import PyCintex PyCintex.loadDictionary('egammaEnumsDict') from ROOT import egammaPID #if not 'RANDSEED' in dir(): # RANDSEED = 0 #print "random seed", RANDSEED gmsbSelectionTool.IsMC = True gmsbSelectionTool.SmearMC = False gmsbSelectionTool.SmearMC = False gmsbSelectionTool.ElectronPt = 25 * GeV gmsbSelectionTool.PhotonPt = 100 * GeV gmsbSelectionTool.MuonPt = 25 * GeV #gmsbSelectionTool.RandomSeed = RANDSEED #gmsbSelectionTool.MCEtconeShift = 0.0;
def test25STLIterator(self): vector = PyCintex.makeClass('std::vector<MyA>') self.failUnless(vector) self.failUnless(PyCintex.makeClass('std::vector<MyA>::iterator')) self.failUnless( PyCintex.makeClass('std::vector<MyA>::reverse_iterator'))