class DsoDb (_Dso.PyDsoDb): """ The repository of 'rootmap' files (location, content,...) and a set of operations one can apply on them (load dict, query dicts,...) """ def __init__(self): super (DsoDb, self).__init__('AthenaDsoDb') import AthenaCommon.Logging self.msg = AthenaCommon.Logging.logging.getLogger("AthenaDsoDb") #self.msg.setLevel(AthenaCommon.Logging.logging.VERBOSE) # inject the known aliases NOW for k,v in _aliases.iteritems(): try: self.db[k] = self.db[v] except KeyError,err: self.msg.info("could not install alias [%s] -> [%s]", k,v) # make sure we'll be able to load dicts import PyCintex PyCintex.Cintex.Enable() # load reflex self._load_dict = PyCintex.loadDict self._load_dict('ReflexRflx') self._rflx = PyCintex.makeNamespace('Reflex') if not self._rflx: self._rflx = PyCintex.makeNamespace('ROOT::Reflex') self._rflx_type = self._rflx.Type.ByName self._gbl = PyCintex.makeNamespace('') return
def __init__(self): # import cintex import PyCintex; PyCintex.Cintex.Enable() # import root import PyUtils.RootUtils as ru ROOT = ru.import_root() self._cxx = ROOT.Ath.DsoDb.instance() # load reflex _load_dict = PyCintex.loadDict _load_dict('ReflexRflx') self._rflx = PyCintex.makeNamespace('Reflex') if not self._rflx: self._rflx = PyCintex.makeNamespace('ROOT::Reflex') return
def __init__(self): # import cintex import PyCintex PyCintex.Cintex.Enable() # import root import PyUtils.RootUtils as ru ROOT = ru.import_root() self._cxx = ROOT.Ath.DsoDb.instance() # load reflex _load_dict = PyCintex.loadDict _load_dict('ReflexRflx') self._rflx = PyCintex.makeNamespace('Reflex') if not self._rflx: self._rflx = PyCintex.makeNamespace('ROOT::Reflex') return
def gen_typeregistry_dso(oname=_dflt_typereg_fname): '''inspect all the accessible reflex types and get their rootmap-naming. also associate the clid if available. ''' import CLIDComps.clidGenerator as _c cliddb = _c.clidGenerator(db=None) del _c import PyUtils.path as _p oname = _p.path(oname) del _p import PyUtils.Logging as _L msg = _L.logging.getLogger('typereg-dso') #msg.setLevel(_L.logging.INFO) msg.setLevel(_L.logging.VERBOSE) del _L msg.info("installing registry in [%s]...", oname) # FIXME: should use the Cxx one... #reg = DsoDb() reg = PyDsoDb() cls_names = reg.db.keys() msg.debug("::: loading reflex") import PyCintex PyCintex.Cintex.Enable() PyCintex.loadDict('libReflexRflx.so') rflx = PyCintex.makeNamespace('Reflex') if not rflx: rflx = PyCintex.makeNamespace('ROOT::Reflex') rflx = rflx.Type assert (rflx) import PyCintex _load_lib = PyCintex.loadDict def _load_dict(libname, retry=10): msg.debug("::: loading [%s]...", libname) try: return _load_lib(libname) except ( Exception, SystemError, ), err: msg.warning("**error** %s", err) return
def gen_typeregistry_dso(oname=_dflt_typereg_fname): '''inspect all the accessible reflex types and get their rootmap-naming. also associate the clid if available. ''' import CLIDComps.clidGenerator as _c cliddb = _c.clidGenerator(db=None) del _c import PyUtils.path as _p oname = _p.path(oname) del _p import PyUtils.Logging as _L msg = _L.logging.getLogger('typereg-dso') #msg.setLevel(_L.logging.INFO) msg.setLevel(_L.logging.VERBOSE) del _L msg.info("installing registry in [%s]...", oname) # FIXME: should use the Cxx one... #reg = DsoDb() reg = PyDsoDb() cls_names = reg.db.keys() msg.debug("::: loading reflex") import PyCintex PyCintex.Cintex.Enable() PyCintex.loadDict('libReflexRflx.so') rflx = PyCintex.makeNamespace('Reflex') if not rflx: rflx = PyCintex.makeNamespace('ROOT::Reflex') rflx = rflx.Type assert(rflx) import PyCintex _load_lib = PyCintex.loadDict def _load_dict(libname,retry=10): msg.debug("::: loading [%s]...", libname) try: return _load_lib(libname) except (Exception,SystemError,), err: msg.warning("**error** %s", err) return
def __initialize(self): PyCintex.Cintex.Enable() # global name space self.gbl = gbl = PyCintex.Namespace('') # load reflex _load_dict = PyCintex.loadDict _load_dict('ReflexRflx') # Create the Reflex::Type class print "...creating Reflex::Type class..." _rflx = PyCintex.makeNamespace('Reflex') if not _rflx: _rflx = PyCintex.makeNamespace('ROOT::Reflex') _rflx_type = _rflx.Type.ByName self.rflxType = _rflx.Type return
def __initialize(self): PyCintex.Cintex.Enable() # global name space self.gbl = gbl = PyCintex.Namespace('') # load reflex _load_dict = PyCintex.loadDict _load_dict ('ReflexRflx') # Create the Reflex::Type class print "...creating Reflex::Type class..." _rflx = PyCintex.makeNamespace ('Reflex') if not _rflx: _rflx = PyCintex.makeNamespace ('ROOT::Reflex') _rflx_type = _rflx.Type.ByName self.rflxType = _rflx.Type return
__version__ = '1.0.0' __author__ = 'Joerg Stelzer <*****@*****.**>' __all__ = ['TrigConf', 'l1menuloader', 'hltmenuloader'] import ROOT, PyCintex PyCintex.Cintex.Enable() PyCintex.loadDictionary("libTrigConfL1DataDict") PyCintex.loadDictionary("libTrigConfHLTDataDict") PyCintex.loadDictionary("libTrigConfStorageDict") TrigConf = PyCintex.makeNamespace('TrigConf') # modify a few functions TrigConf.Menu.items = TrigConf.Menu.itemsV TrigConf.HLTFrame.chains = TrigConf.HLTFrame.chainsV TrigConf.HLTFrame.sequences = TrigConf.HLTFrame.sequencesV #from TrigConfOffline.menuloader import * from TrigConfOffline import menuloader hltmenuloader = menuloader.hltmenuloader l1menuloader = menuloader.l1menuloader
import sys, PyCintex as Dict SCR = Dict.makeNamespace('SCR') gbl = Dict.makeNamespace('') Context = SCR.PasteboardContext Display = SCR.Display Window = SCR.Window Pasteboard = SCR.Pasteboard # # ASCII = 0x00 NORMAL = 0x00 BOLD = 0x01 UNDERLINE = 0x02 INVERSE = 0x04 FLASH = 0x08 ATTRIBUTES = 0x0F GRAPHIC = 0x10 FONT_SUPP = 0x20 FONT_USER = 0x30 MODIFIED = 0x40 ON = 1 OFF = 0 SEQUENTIAL_WINDOW = 0 DETACHED_WINDOW = 1 PULLDOWN_WINDOW = 2 # # # CTRL_A = 0x101 CTRL_D = 0x104 CTRL_X = 0x118
def configure(self, joboptions=None, commands=None, dllname=None, factname=None, extra_options=None): if not (self.app is None): self.msg.info('C++ application already configured') return self.app self.msg.info('configuring application...') usr_cfg = AthCfg() self.cfg.seek(0) usr_cfg << self.cfg.read() # reset self.cfg = AthCfg() if commands: self.cfg << commands + '\n' # common configuration self.cfg << """ # basic job configuration include('AthenaCommon/Atlas.UnixStandardJob.py') include.block('AthenaCommon/Atlas.UnixStandardJob.py') if not (not %(run_batch)s and theApp.EventLoop == 'PyAthenaEventLoopMgr'): # make SIG_INT fatal svcMgr.CoreDumpSvc.FatalHandler = -1 """ % { 'run_batch': self.options.run_batch } self.cfg << """ # user level configuration try: include('$HOME/.athenarc') except IncludeError: pass """ # another user level configuration usr_cfg.seek(0) self.cfg << usr_cfg.read() if isinstance(joboptions, (list, tuple)): for jobo_name in joboptions: self.cfg.include(jobo_name) if not self.options.run_batch: self.cfg << """ theApp.EventLoop = 'PyAthenaEventLoopMgr' svcMgr += CfgMgr.PyAthenaEventLoopMgr() """ self.cfg << """ ### logging and messages --------- from AthenaCommon.Logging import * _msg = log _msg.setLevel(getattr(logging, '%(output_level)s')) import AthenaCommon.Constants as Lvl theApp.setOutputLevel(%(output_level)s) theApp.OutputLevel = Lvl.%(output_level)s from AthenaCommon.AppMgr import ServiceMgr as svcMgr svcMgr.MessageSvc.OutputLevel = Lvl.%(output_level)s """ % dict(output_level=self.options.msg_lvl) self.cfg << """ from AthenaCommon.Include import Include, IncludeError, include include.setShowIncludes(%(showincludes)s) if %(showincludes)s: import AthenaCommon.Include as AthCIncMod AthCIncMod.marker=' -#-' # distinguish bootstrap from other jo-code """ % dict(showincludes=self.options.showincludes) cfg_name = self.cfg._jobo.name.replace('.py', '.pkl') self.msg.info('dumping job-configuration into [%s]...', cfg_name) # run configuration in a forked-subprocess... sc = _app_configure(self.cfg, cfg_name, extra_options) if sc: err = 'could not configure application [sc=%d]' % sc self.msg.error(err) raise RuntimeError(err) self.msg.info('configuring application w/ [%s]', cfg_name) import os self.cfg._jobo.close() os.remove(self.cfg._jobo.name) import PyCintex PyCintex.Cintex.Enable() gbl = PyCintex.makeNamespace('') import GaudiPython.Bindings as gaudi # remove the gaudimodule exit handler as to prevent them from clobering import atexit for hdlr in reversed(atexit._exithandlers[:]): module_name = hdlr[0].__module__ if ('GaudiPython' in module_name or 'gaudimodule' in module_name): atexit._exithandlers.remove(hdlr) del hdlr # install our own exit handler (if needed) import sys if hasattr(sys, 'ps1'): # ie: is interactive atexit.register(self.exit) del atexit from . import ResourceLimits ResourceLimits.SetMaxLimits() try: import cPickle as pickle except ImportError: import pickle import PyUtils.dbsqlite as dbs db = dbs.open(cfg_name, 'r') jobo_cfg = db['jobopts'] kw = jobo_cfg['ApplicationMgr'] for k in ('Go', 'Exit', 'AuditInitialize', 'AuditFinalize'): if k in kw: del kw[k] outputlevel = jobo_cfg['ApplicationMgr']['OutputLevel'] self.app = gaudi.AppMgr(outputlevel=outputlevel, selfoptions=kw, dllname=dllname, factname=factname) # open the pycomps folder pycomps = db.get('pycomps', None) # just opening it should do if pycomps: import AthenaPython.Configurables as _C _C.PyComponents.instances = dict((p.name, p) for p in pycomps) #_C.PyComponents.instances = pycomps for p in pycomps: if hasattr(p, 'setup'): if callable(p.setup): p.setup() setattr(self, '_pycomps', pycomps) import AthenaPython.PyAthena as PyAthena josvc = PyAthena.py_svc('JobOptionsSvc', createIf=False, iface='IJobOptionsSvc') assert josvc is not None for client in jobo_cfg: if client == 'ApplicationMgr': continue for n, v in jobo_cfg[client].iteritems(): p = gaudi.StringProperty(n, v) if not josvc.addPropertyToCatalogue(client, p).isSuccess(): self.msg.error('could not add property [%s.%s = %s]', client, n, v) if client in ('MessageSvc', 'JobOptionsSvc'): svc = PyAthena.py_svc(client, iface='IProperty') svc.setProperty(p) db.close() import os if os.path.exists(cfg_name): os.remove(cfg_name) pass #import AthenaCommon.Debugging as dbg #dbg.hookDebugger() return self.app
def configure(self, joboptions=None, commands=None, dllname=None, factname=None, extra_options=None): if not (self.app is None): self.msg.info('C++ application already configured') return self.app self.msg.info('configuring application...') usr_cfg = AthCfg() self.cfg.seek(0) usr_cfg << self.cfg.read() # reset self.cfg = AthCfg() if commands: self.cfg << commands+'\n' # common configuration self.cfg << """ # basic job configuration include('AthenaCommon/Atlas.UnixStandardJob.py') include.block('AthenaCommon/Atlas.UnixStandardJob.py') if not (not %(run_batch)s and theApp.EventLoop == 'PyAthenaEventLoopMgr'): # make SIG_INT fatal svcMgr.CoreDumpSvc.FatalHandler = -1 """ % {'run_batch' : self.options.run_batch} self.cfg << """ # user level configuration try: include('$HOME/.athenarc') except IncludeError: pass """ # another user level configuration usr_cfg.seek(0) self.cfg << usr_cfg.read() if isinstance(joboptions, (list,tuple)): for jobo_name in joboptions: self.cfg.include(jobo_name) if not self.options.run_batch: self.cfg << """ theApp.EventLoop = 'PyAthenaEventLoopMgr' svcMgr += CfgMgr.PyAthenaEventLoopMgr() """ self.cfg << """ ### logging and messages --------- from AthenaCommon.Logging import * _msg = log _msg.setLevel(getattr(logging, '%(output_level)s')) import AthenaCommon.Constants as Lvl theApp.setOutputLevel(%(output_level)s) theApp.OutputLevel = Lvl.%(output_level)s from AthenaCommon.AppMgr import ServiceMgr as svcMgr svcMgr.MessageSvc.OutputLevel = Lvl.%(output_level)s """ % dict(output_level=self.options.msg_lvl) self.cfg << """ from AthenaCommon.Include import Include, IncludeError, include include.setShowIncludes(%(showincludes)s) if %(showincludes)s: import AthenaCommon.Include as AthCIncMod AthCIncMod.marker=' -#-' # distinguish bootstrap from other jo-code """ % dict(showincludes=self.options.showincludes) cfg_name = self.cfg._jobo.name.replace('.py','.pkl') self.msg.info('dumping job-configuration into [%s]...', cfg_name) # run configuration in a forked-subprocess... sc = _app_configure(self.cfg, cfg_name, extra_options) if sc: err = 'could not configure application [sc=%d]' % sc self.msg.error(err) raise RuntimeError(err) self.msg.info('configuring application w/ [%s]', cfg_name) import os self.cfg._jobo.close() os.remove(self.cfg._jobo.name) import PyCintex PyCintex.Cintex.Enable() gbl = PyCintex.makeNamespace('') import GaudiPython.Bindings as gaudi # remove the gaudimodule exit handler as to prevent them from clobering import atexit for hdlr in reversed(atexit._exithandlers[:]): module_name = hdlr[0].__module__ if ('GaudiPython' in module_name or 'gaudimodule' in module_name): atexit._exithandlers.remove(hdlr) del hdlr # install our own exit handler (if needed) import sys if hasattr(sys, 'ps1'): # ie: is interactive atexit.register(self.exit) del atexit from . import ResourceLimits ResourceLimits.SetMaxLimits() try: import cPickle as pickle except ImportError: import pickle import PyUtils.dbsqlite as dbs db = dbs.open(cfg_name, 'r') jobo_cfg = db['jobopts'] kw = jobo_cfg['ApplicationMgr'] for k in ('Go', 'Exit', 'AuditInitialize', 'AuditFinalize'): if k in kw: del kw[k] outputlevel = jobo_cfg['ApplicationMgr']['OutputLevel'] self.app = gaudi.AppMgr(outputlevel=outputlevel, selfoptions=kw, dllname=dllname, factname=factname) # open the pycomps folder pycomps = db.get('pycomps', None) # just opening it should do if pycomps: import AthenaPython.Configurables as _C _C.PyComponents.instances = dict((p.name, p) for p in pycomps) #_C.PyComponents.instances = pycomps for p in pycomps: if hasattr(p, 'setup'): if callable(p.setup): p.setup() setattr(self, '_pycomps', pycomps) import AthenaPython.PyAthena as PyAthena josvc = PyAthena.py_svc('JobOptionsSvc', createIf=False, iface='IJobOptionsSvc') assert josvc is not None for client in jobo_cfg: if client == 'ApplicationMgr': continue for n,v in jobo_cfg[client].iteritems(): p = gaudi.StringProperty(n, v) if not josvc.addPropertyToCatalogue(client, p).isSuccess(): self.msg.error( 'could not add property [%s.%s = %s]', client, n, v ) if client in ('MessageSvc', 'JobOptionsSvc'): svc = PyAthena.py_svc(client, iface='IProperty') svc.setProperty(p) db.close() import os if os.path.exists(cfg_name): os.remove(cfg_name) pass #import AthenaCommon.Debugging as dbg #dbg.hookDebugger() return self.app
import sys, platform, PyCintex as Dict if platform.system() == 'Linux': Dict.loadDictionary('libOnlineKernelDict.so') Dict.loadDictionary('libUPIDict.so') else: Dict.loadDictionary('OnlineKernelDict.dll') Dict.loadDictionary('UPIDict.dll') CPP = Dict.makeNamespace('CPP') gbl = Dict.makeNamespace('') #import os #l=os.popen("cat /proc/"+str(os.getpid())+"/maps | grep UPIR").readlines() #ll=os.popen('nm -C -D '+l[0].split(' ')[-1]) #for i in ll: # if i.find('upic_refresh')>=0: print i[:-1] EventType = 2 # graphics attributes NORMAL = 0x00 BOLD = 0x01 UNDERLINE = 0x02 INVERSE = 0x04 FLASH = 0x08 ATTRIBUTES = 0x0F ASCII = 0x00 GRAPHIC = 0x10 FONT_SUPP = 0x20 FONT_USER = 0x30 FONTS = 0x30
def setUp(self): PyCintex.makeClass('A::B::C::MyClass' ) # This is needed to force loading the dictionary self.A = PyCintex.makeNamespace('A') self.std = PyCintex.makeNamespace('std') self.gbl = PyCintex.makeNamespace('')
import os, sys, string, platform import Online.Utils as Utils import PyCintex as PyLCGDict lib_prefix = '' if platform.system() == 'Linux': lib_prefix = 'lib' PyLCGDict.loadDict(lib_prefix + 'DIMDict') DIM = PyLCGDict.makeNamespace('DIM') gbl = PyLCGDict.makeNamespace('') #----enable tab completion--------------------------------------------------------------- try: import rlcompleter, readline readline.parse_and_bind("tab: complete") except: pass log = Utils.log error = Utils.error def void_call(self): return 1 #CmdInfo = gbl.CmndInfo Info = gbl.DimInfo Info = DIM.Info Timer = gbl.DimTimer Client = gbl.DimClient
def execute(self): print "Executing ReconAnaAlg",self.name() # reset self.obsQ = array('d', 192*[0]) self.expQ = array('d', 192*[0]) self.totalObsQ = 0.0 evt = self.evtSvc() # SimEvent Data simhdr = evt['/Event/Sim/SimHeader'] if simhdr == None: roh = evt['/Event/Readout/ReadoutHeader'] ilist = roh.findHeaders(51301) if ilist.size() == 1: simhdr = ilist[0] if simhdr == None: print "No SimHeader in this ReadOut. Skip." return SUCCESS statshdr = simhdr.unobservableStatistics() stats = statshdr.stats() self.qEdep = stats["QEDepInGdLS"].sum() + stats["QEDepInLS"].sum() scintX = stats["xQESumGdLS"].sum() + stats["xQESumLS"].sum() scintY = stats["yQESumGdLS"].sum() + stats["yQESumLS"].sum() scintZ = stats["zQESumGdLS"].sum() + stats["zQESumLS"].sum() if self.qEdep < 0.1: print "Low energy deposit in LS or GdLS. Skip." return SUCCESS scintX = scintX/self.qEdep scintY = scintY/self.qEdep scintZ = scintZ/self.qEdep genX = stats["x_Trk1"].sum() genY = stats["y_Trk1"].sum() genZ = stats["z_Trk1"].sum() # Get underlying DE object de = self.getDet(self.target_de_name) if not de: print 'Failed to get DE', self.target_de_name return FAILURE # Get the AD coordinates of the vertexes Gaudi = PyCintex.makeNamespace('Gaudi') scintGlbPoint = Gaudi.XYZPoint(scintX, scintY, scintZ) scintLclPoint = de.geometry().toLocal(scintGlbPoint) genGlbPoint = Gaudi.XYZPoint(genX, genY, genZ) genLclPoint = de.geometry().toLocal(genGlbPoint) self.scintX = scintLclPoint.x() self.scintY = scintLclPoint.y() self.scintZ = scintLclPoint.z() self.genX = genLclPoint.x() self.genY = genLclPoint.y() self.genZ = genLclPoint.z() self.stats["file0/hists/scintX"].Fill(self.scintX / units.centimeter) self.stats["file0/hists/scintY"].Fill(self.scintY / units.centimeter) self.stats["file0/hists/scintZ"].Fill(self.scintZ / units.centimeter) self.stats["file0/hists/scintE"].Fill(self.qEdep) radialRGen = math.sqrt(self.genX*self.genX+self.genY*self.genY) if radialRGen > 2000. or ROOT.TMath.Abs(self.genZ) > 2000: self.info("Generated vertex is beyond the LS. Skip.") return SUCCESS # CalibReadoutEvent Data croHdr = evt["/Event/CalibReadout/CalibReadoutHeader"] if croHdr == None: self.error("Failed to get current calib readout header") return FAILURE readout = croHdr.calibReadout() if readout == None: self.info("No calibrated readout this cycle") print "scintE: ", self.qEdep, " MeV" return SUCCESS if readout.channelReadout().size() == 0: self.info("no channel Readout") return SUCCESS svcMode = ServiceMode(croHdr.context(), 0) for channel in readout.channelReadout(): #channel = channelPair.second pmtId = channel.pmtSensorId().fullPackedData() #pmtId = self.cableSvc.adPmtSensor(chanId, svcMode) # localId = (pmtId.ring()-1)*24 + (pmtId.column()-1) ring = (pmtId & 0x0000ff00)>>8 column = (pmtId & 0x000000ff) localId = (ring-1)*24 + (column-1) self.obsQ[localId] = channel.maxCharge() self.totalObsQ = self.totalObsQ + channel.maxCharge() self.stats["file0/hists/croPmtPeakAdcPrf"].Fill(localId, channel.maxCharge()) if self.totalObsQ < 50: self.info("Low total p.e number. Skip.") return SUCCESS # RecEvent Data recHdr = evt["/Event/Rec/AdMLRec"] if recHdr == None: self.error("Failed to get current RecHeader") return FAILURE #recResults = recHdr.recTrigger() recTrigger = recHdr.recTrigger() if recTrigger == None: self.info("No recTrigger this cycle") return SUCCESS #for recPair in irange(recResults.begin(), recResults.end()): #recTrigger = recPair.second recName = "ML" histRecXName = "recX_" + recName histRecYName = "recY_" + recName histRecZName = "recZ_" + recName histDeltaXName = "deltaX_" + recName histDeltaYName = "deltaY_" + recName histDeltaZName = "deltaZ_" + recName histResoRName = "resoR_" + recName histDriftRName = "driftR_" + recName histRZName = "RZ_" + recName prfqmuRName = "qmuR_" + recName prfqmuThetaName = "qmuTheta_" + recName prfqmuQobsName = "qmuQobs_" + recName prfqmuPMTName = "qmuPMT_" + recName histErecRrecName = "hist_ErecR_" + recName histErecZrecName = "hist_ErecZ_" + recName histChi2TestName = "Chi2Test_" + recName vtxQualityName = "vtxQuality_" + recName prfErecRrecName = "prf_ErecRrec_" + recName prfErecZrecName = "prf_ErecZrec_" + recName prfQsumRtrueName = "prf_QsumR_" + recName prfQsumZtrueName = "prf_QsumZ_" + recName prfEratioRtrueName = "prf_EratioRtrue_" + recName prfEratioZtrueName = "prf_EratioZtrue_" + recName prfRbiasRtrueName = "prf_Rbias_Rtrue_" + recName prfZbiasZtrueName = "prf_Zbias_Ztrue_" + recName histRbiasRtrueName = "hist_Rbias_Rtrue_" + recName histZbiasZtrueName = "hist_Zbias_Ztrue_" + recName if self.firstEntry: # Make the histograms self.stats["file0/hists/%s"%histRecXName] = TH1F(histRecXName, "recX (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histRecYName] = TH1F(histRecYName, "recY (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histRecZName] = TH1F(histRecZName, "recZ (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histDeltaXName] = TH1F(histDeltaXName, "recX - scintX (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histDeltaYName] = TH1F(histDeltaYName, "recY - scintY (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histDeltaZName] = TH1F(histDeltaZName, "recZ - scintZ (cm)", 500, -250, 250) self.stats["file0/hists/%s"%histResoRName] = TH1F(histResoRName, "|#vec{R}_{rec} - #vec{R}_{scint}| (cm)", 50, 0, 100) self.stats["file0/hists/%s"%histDriftRName] = TH1F(histDriftRName, "|#vec{R}_{rec}| - |#vec{R}_{scint}| (cm)", 200, -100, 100) self.stats["file0/hists/%s"%histRZName] = TH2F(histRZName, "R_{rec}^{radial} v.s Z_{rec} [cm]", 250, 0, 250, 500, -250.0, 250.) self.stats["file0/hists/%s"%histErecRrecName] = TH2F(histErecRrecName, "E_{rec} (a.u) v.s (R_{rec}^{radial})^{2} [m]", 60, 0.0, 6.0, 1000, 0.0, 10.) self.stats["file0/hists/%s"%histErecZrecName] = TH2F(histErecZrecName, "E_{rec} (a.u) v.s Z_{rec} [m]", 50, -2.5, 2.5, 1000, 0.0, 10.) self.stats["file0/hists/%s"%prfErecRrecName] = TProfile(prfErecRrecName, "E_{rec} (a.u) v.s (R_{rec}^{radial})^{2} [m^{2}]", 60, 0.0, 6.0, 0.0, 10., "e") self.stats["file0/hists/%s"%prfErecZrecName] = TProfile(prfErecZrecName, "E_{rec} (a.u) v.s Z_{rec} [m]", 50, -2.5, 2.5, 0.0, 10., "e") self.stats["file0/hists/%s"%prfQsumZtrueName] = TProfile(prfQsumZtrueName, "totalCharge v.s Z_{true} [m]", 50, -2.5, 2.5, 0.0, 500., "e") self.stats["file0/hists/%s"%prfQsumRtrueName] = TProfile(prfQsumRtrueName, "totalCharge v.s (R_{true}^{radial})^{2} [m^{2}]", 60, 0, 6.0, 0.0, 500., "e") self.stats["file0/hists/%s"%prfEratioRtrueName] = TProfile(prfEratioRtrueName, "E_{rec}/E_{true} [a.u] v.s (R_{true}^{radial})^{2} [m]", 60, 0., 6.0, 0.0, 5., "e") self.stats["file0/hists/%s"%prfEratioZtrueName] = TProfile(prfEratioZtrueName, "E_{rec}/E_{true} [a.u] v.s Z_{true} [m]", 50, -2.5, 2.5, 0.0, 5., "e") self.stats["file0/hists/%s"%prfRbiasRtrueName] = TProfile(prfRbiasRtrueName, "R_{bias} [cm] v.s (R_{true}^{radial})^{2} [m]", 60, 0., 6.0, -100., 100., "e") self.stats["file0/hists/%s"%prfZbiasZtrueName] = TProfile(prfZbiasZtrueName, "Z_{bias} [cm] v.s Z_{true}^{2} [m]", 50, -2.5, 2.5, -100., 100., "e") self.stats["file0/hists/%s"%histRbiasRtrueName] = TH2F(histRbiasRtrueName, "R_{bias} [cm] v.s (R_{true}^{radial})^{2} [m]", 60, 0., 6.0, 400, -100., 100.) self.stats["file0/hists/%s"%histZbiasZtrueName] = TH2F(histZbiasZtrueName, "Z_{bias} [cm] v.s Z_{true}^{2} [m]", 50, -2.5, 2.5, 400, -100., 100.) self.stats["file0/hists/%s"%vtxQualityName] = TH1F(vtxQualityName, "vertex quality", 2000, 0, 4000.) recX = recTrigger.position().x() recY = recTrigger.position().y() recZ = recTrigger.position().z() recE = recTrigger.energy() vtxQuality = recTrigger.positionQuality() print "vtxQuality: ", vtxQuality vtxRec = ROOT.TVector3( recX, recY, recZ) vtxScint = ROOT.TVector3(self.scintX, self.scintY, self.scintZ) deltaX = recX - self.scintX deltaY = recY - self.scintY deltaZ = recZ - self.scintZ vtxReso = ROOT.TVector3( deltaX, deltaY, deltaZ) driftR = vtxRec.Mag() - vtxScint.Mag() resoR = vtxReso.Mag() radialR2 = recX*recX + recY*recY radialR = math.sqrt(recX*recX + recY*recY) radialTrueR2 = (self.scintX*self.scintX + self.scintY*self.scintY) radialTrueR = math.sqrt(self.scintX*self.scintX + self.scintY*self.scintY) self.stats["file0/hists/%s"%histRecXName].Fill(recX/units.centimeter) self.stats["file0/hists/%s"%histRecYName].Fill(recY/units.centimeter) self.stats["file0/hists/%s"%histRecZName].Fill(recZ/units.centimeter) self.stats["file0/hists/%s"%histDeltaXName].Fill(deltaX/units.centimeter) self.stats["file0/hists/%s"%histDeltaYName].Fill(deltaY/units.centimeter) self.stats["file0/hists/%s"%histDeltaZName].Fill(deltaZ/units.centimeter) self.stats["file0/hists/%s"%histResoRName].Fill(resoR/units.centimeter) self.stats["file0/hists/%s"%histDriftRName].Fill(driftR/units.centimeter) self.stats["file0/hists/%s"%histRZName].Fill(radialR/units.centimeter, recZ/units.centimeter) self.stats["file0/hists/%s"%histErecRrecName].Fill(radialR2/(units.meter*units.meter), recE) self.stats["file0/hists/%s"%histErecZrecName].Fill(recZ/units.meter, recE) self.stats["file0/hists/%s"%prfErecRrecName].Fill(radialR2/(units.meter*units.meter), recE) self.stats["file0/hists/%s"%prfErecZrecName].Fill(recZ/units.meter, recE) self.stats["file0/hists/%s"%prfQsumRtrueName].Fill(radialTrueR2/(units.meter*units.meter), self.totalObsQ) self.stats["file0/hists/%s"%prfQsumZtrueName].Fill(self.scintZ/units.meter, self.totalObsQ) self.stats["file0/hists/%s"%prfEratioRtrueName].Fill(radialTrueR2/(units.meter*units.meter), recE/self.qEdep) self.stats["file0/hists/%s"%prfEratioZtrueName].Fill(self.scintZ/units.meter, recE/self.qEdep) self.stats["file0/hists/%s"%prfRbiasRtrueName].Fill(radialTrueR2/(units.meter*units.meter), (radialR-radialTrueR)/units.centimeter) self.stats["file0/hists/%s"%prfZbiasZtrueName].Fill(self.scintZ/units.meter, (recZ-self.scintZ)/units.centimeter) self.stats["file0/hists/%s"%histRbiasRtrueName].Fill(radialTrueR2/(units.meter*units.meter), (radialR-radialTrueR)/units.centimeter) self.stats["file0/hists/%s"%histZbiasZtrueName].Fill(self.scintZ/units.meter, (recZ-self.scintZ)/units.centimeter) self.stats["file0/hists/%s"%vtxQualityName].Fill(vtxQuality) if self.firstEntry: self.stats["file0/hists/%s"%prfqmuRName] = TProfile(prfqmuRName, "Q_{obs}/Q_{exp} v.s R [m]", 60, 0.0, 6.0, -1.0, 500.0, "e") self.stats["file0/hists/%s"%prfqmuThetaName]=TProfile(prfqmuThetaName, "Q_{obs}/Q_{exp} (#cos#theta)", 50, 0.0, 1.0, -1.0, 500.0, "e") self.stats["file0/hists/%s"%prfqmuPMTName] = TProfile(prfqmuPMTName, "Q_{obs}/Q_{exp} (PMTLocalId)", 192, 0.0, 192, -1.0, 500.0, "e") self.stats["file0/hists/%s"%prfqmuQobsName]=TProfile(prfqmuQobsName, "Q_{obs}/Q_{exp} (Qobs)", 50, 0.0, 50., -1.0, 500.0, "e") self.stats["file0/hists/%s"%histChi2TestName] = TH1F(histChi2TestName, "#chi^{2} test", 150, 0, 300) self.stats["file0/hists/recPmtExpQPrf"]=TProfile("recPmtExpQPrf", "expected charge profile for each PMT", 192, 0, 192, 0.0, 50, "e") self.vertex = CLHEP.Hep3Vector(recX*units.mm, recY*units.mm, recZ*units.mm) self.siteName = "DayaBay" self.detName = "AD1" self.siteIds = { 'DayaBay' : gbl.Site.kDayaBay, 'LingAo' : gbl.Site.kLingAo, 'Far' : gbl.Site.kFar, } self.detIds = { 'AD1' : gbl.DetectorId.kAD1, 'AD2' : gbl.DetectorId.kAD2, 'AD3' : gbl.DetectorId.kAD3, 'AD4' : gbl.DetectorId.kAD4, } site = self.siteIds[self.siteName] detector = self.detIds[self.detName] qtool = self.tool('IReconHelperTool', 'ExpQCalcTool') expq = qtool.expqcalc(site, detector, self.vertex) for localId in range(0, 192): expq[localId] = expq[localId]*recE*3.14*103*103*9000*0.2/4.0/3.14159625 self.expQ[localId] = expq[localId] self.stats["file0/hists/recPmtExpQPrf"].Fill(localId, self.expQ[localId]) # self.chi2 = 0.0 svcmode = ServiceMode(recHdr.context(), 0) for localId in range(0, 192): ring = localId/24 + 1 column = localId%24 + 1 pmtId = AdPmtSensor(ring, column, recTrigger.detector().site(), recTrigger.detector().detectorId()) chanId = self.cableSvc.feeChannelId(pmtId, svcmode) pmtCalib = self.calibSvc.pmtCalibData(pmtId, svcmode) if pmtCalib == None: self.error("No calib data for pmt local ID:%I" %localId) return FAILURE if pmtCalib.m_status != PmtCalibData.kGood: continue pmtPos = self.pmtSvc.get(pmtId.fullPackedData()).localPosition() pmtNorm = self.pmtSvc.get(pmtId.fullPackedData()).localDirection() distVec = ROOT.TVector3( recX - pmtPos.x(), recY - pmtPos.y(), recZ - pmtPos.z() ) dist = distVec.Mag() costheta =(distVec.x()*pmtNorm.x() + \ distVec.y()*pmtNorm.y() + \ distVec.z()*pmtNorm.z() )/dist dist = dist/1000. self.stats["file0/hists/%s"%prfqmuRName].Fill(dist, self.obsQ[localId]/self.expQ[localId]) self.stats["file0/hists/%s"%prfqmuThetaName].Fill(costheta, self.obsQ[localId]/self.expQ[localId]) self.stats["file0/hists/%s"%prfqmuQobsName].Fill(self.obsQ[localId], self.obsQ[localId]/self.expQ[localId]) self.stats["file0/hists/%s"%prfqmuPMTName].Fill(localId, self.obsQ[localId]/self.expQ[localId]) self.chi2 += math.pow(self.obsQ[localId]-self.expQ[localId], 2) \ / self.expQ[localId] self.stats["file0/hists/%s"%histChi2TestName].Fill(self.chi2) # self.firstEntry = False return SUCCESS
import sys, PyCintex as Dict ROMon = Dict.makeNamespace('ROMon') gbl = Dict.makeNamespace('') File = ROMon.File Run = ROMon.Run RunDB = ROMon.RunDB createBuffer = RunDB.createBuffer
def setUp(self): PyCintex.makeClass('A::B::C::MyClass') # This is needed to force loading the dictionary self.A = PyCintex.makeNamespace('A') self.std = PyCintex.makeNamespace('std') self.gbl = PyCintex.makeNamespace('')
__version__ = "$Revision: 1.8 $" __author__ = "Sebastien Binet <*****@*****.**>" __all__ = [ 'CpuHdr', 'IoHdr', 'MemHdr', 'PersHdr', 'CpuData', 'IoData', 'MemData', #'PersData', ] import ROOT import PyCintex PyCintex.Cintex.Enable() PyCintex.loadDictionary('libPerfMonEventDict') PerfMon = PyCintex.makeNamespace('PerfMon') CpuHdr = PerfMon.CpuHdr IoHdr = PerfMon.IoHdr MemHdr = PerfMon.MemHdr PersHdr = PerfMon.PersHdr CpuData = PerfMon.CpuData IoData = PerfMon.IoData MemData = PerfMon.MemData #PersData = PerfMon.PersData
""" python -c "import urllib;print urllib.urlopen('http://www.cern.ch/~frankm').read()" """ import sys, time, platform import PyCintex as Dictionary lib_prefix = '' if platform.system()=='Linux': lib_prefix = 'lib' gbl = Dictionary.makeNamespace('') std = gbl.std Dictionary.loadDict(lib_prefix+'LHCbStatusDict') LHCbStatus=gbl.LHCbStatus srv=LHCbStatus.Server() s = srv.info() s.hlt.subfarm(0)='OT' print s.hlt.subfarm(0)