def __init__(self, cooldbconn, statusdbconn="", readoracle=False, loglevel=1, detStatus="", detStatusTag=""): """Initialisation of luminosity calculator Specify the COOL database string (e.g. COOLONL_TRIGGER/COMP200), COOL detector status connection string (if needed), whether to force Oracle (otherwise SQLite replicas might be used) and the detector status requirements and tag (if any). Status requirements given in the form 'SCTB G EMEC Y' etc """ # open the COOL database instance (readonly) try: self.cooldb = indirectOpen(cooldbconn, True, readoracle, loglevel > 1) except Exception as e: print(e) sys.exit(-1) # store other parameters self.loglevel = loglevel # setup detector status access if needed self.detstatus = detStatus self.detstatustag = detStatusTag if (self.detstatus != ""): # connect to detector status DB try: self.detstatusdb = indirectOpen(statusdbconn, True, readoracle, loglevel > 1) except Exception as e: print(e) sys.exit(-1)
def initDB(self,file=''): if file == '': self.db = indirectOpen('COOLONL_SCT/COMP200',oracle=True) svc=coral.ConnectionService() self.session = svc.connect('oracle://ATLAS_COOLPROD/ATLAS_COOLONL_SCT') self.connected=True else: self.db = indirectOpen('sqlite://;schema='+file+';dbname=COMP200') svc = coral.ConnectionService() self.session = svc.connect('sqlite:///'+file) self.connected=True
def __init__(self, cooltdaqdbconn, cooltrigdbconn, coolstatusdbconn, oracle=False, loglevel=1): debug = (loglevel > 1) try: self.cooldb = indirectOpen(cooltdaqdbconn, True, oracle, debug) if (loglevel >= 1): print("Connected to", cooltdaqdbconn, "for RunControl data") except Exception as e: print(e) sys.exit(-1) if (len(cooltrigdbconn) > 0): try: self.cooltrigdb = indirectOpen(cooltrigdbconn, True, oracle, debug) if (loglevel >= 1): print("Connected to", cooltrigdbconn, "for CTP data") self.usetrig = True except Exception as e: print(e) sys.exit(-1) else: self.usetrig = False if (len(coolstatusdbconn) > 0): try: self.coolstatusdb = indirectOpen(coolstatusdbconn, True, oracle, debug) if (loglevel >= 1): print("Connected to", coolstatusdbconn, "for detector status data") except Exception as e: print(e) sys.exit(-1) # store other parameters self.loglevel = loglevel self.coolpath = '/TDAQ/RunCtrl' self.cooltlbpath = '/TRIGGER/LUMI' self.nowtime = time.time() * 1000000000 # no restriction on initial selection self.onlyRec = False self.mask = 0 self.runType = "" self.filenameTag = "" # intial values self.lbrunmap = {} self.runmap = {} self.triglbmap = {}
def get(cls, dbname): if not dbname in cls.cooldbs: cls.cooldbs[dbname] = indirectOpen(dbname, readOnly=True, oracle=True) return cls.cooldbs[dbname]
def get_instance(cls, db_string, read_only=True, create=False): """ Open a database connection """ res_db_string, read_only = cls.resolve_db_string(db_string, read_only) try: prev_stdout = sys.stdout sys.stdout = StringIO() try: connection = indirectOpen(res_db_string, readOnly=read_only, oracle=True) finally: sys.stdout = prev_stdout except Exception as e: if "The database does not exist" in e.args[0] and not create: log.info("Failed trying to connect to '%s'", res_db_string) raise from PyCool import cool dbService = cool.DatabaseSvcFactory.databaseService() connection = dbService.createDatabase(res_db_string) except Exception: log.error(sys.exc_info()[0]) raise return connection
def getFieldForRun(run, readOracle=True, quiet=False, lumiblock=None): "Get the magnetic field currents (MagFieldDCSInfo) for a given run" # access the TDAQ schema to translate run number into timestamp # and get the filename tag newdb = (run >= 236107) if not quiet: print "Reading magnetic field for run %i, forceOracle %s CONDBR2 %s" % ( run, readOracle, newdb) # setup appropriate connection and folder parameters if newdb: dbname = 'CONDBR2' sorfolder = '/TDAQ/RunCtrl/SOR' fntname = 'T0ProjectTag' else: dbname = 'COMP200' sorfolder = '/TDAQ/RunCtrl/SOR_Params' fntname = 'FilenameTag' tdaqDB = indirectOpen('COOLONL_TDAQ/%s' % dbname, oracle=readOracle) if (tdaqDB is None): print "MagFieldUtils.getFieldForRun ERROR: Cannot connect to COOLONL_TDAQ/%s" % dbname return None sortime = 0 try: tdaqfolder = tdaqDB.getFolder(sorfolder) runiov = run << 32 obj = tdaqfolder.findObject(runiov, 0) payload = obj.payload() sortime = payload['SORTime'] fnt = payload[fntname] except Exception, e: print "MagFieldUtils.getFieldForRun ERROR accessing folder %s" % sorfolder print e
def GetDBConn(self, schema, db): """for example schema=COOLONL_TRIGGER', db='CONDBR2""" if (schema,db) in self.openConn: return self.openConn[(schema,db)] try: if schema=="DEFECTS": #from AthenaCommon.Logging import logging # this is needed because of some problem in DQUtils logger # the logger there, if called first makes the athena logger crash defdb = DQDefects.DefectsDB("COOLOFL_GLOBAL/CONDBR2",tag=db) defdb.closeDatabase = lambda: None self.openConn[(schema,db)] = defdb else: readoracle=False # Richard: Tools which expect to read from the real # data conditions database have to be # setup in such a way as to ignore SQLite # replicas have to use 'indirectOpen' with # the oracle=True argument logging=False from CoolConvUtilities.AtlCoolLib import indirectOpen self.openConn[(schema,db)] = indirectOpen("%s/%s"%(schema,db),True,readoracle, logging) except Exception: import traceback traceback.print_exc() sys.exit(-1) return self.openConn[(schema,db)]
def openDB(self, dbstring, oracle=False, debug=False, force=False): if self.verbose: print 'LumiDBHandler.openDB(', dbstring, ') called' # Check if already open if dbstring in self.__class__.dbDict: # No force, just return if not force: if self.verbose: print 'LumiDBHandler.openDB - Connection already exists' return True # Yes it is # Force specified, close so we can re-open if self.verbose: print 'LumiDBHandler.openDB - Connection already exists, closing first due to force=True' self.closeDB(dbstring) # Try to open DB connection if self.verbose: print 'LumiDBHandler.openDB - Connecting to', dbstring try: db = indirectOpen(dbstring, readOnly=True, oracle=oracle, debug=debug) except Exception, e: print e return False
def bunchSpacingOfRun(runnumber,LB,verbose=False): if (runnumber<236107): print "WARNING BunchSpacingUtils don't work for run-1 data" return None tdaqDBName="COOLONL_TDAQ/CONDBR2" folder="/TDAQ/OLC/LHC/FILLPARAMS" iovtime=getTimeForLB(runnumber,LB,readOracle=True) if iovtime==0: print "ERROR, can't get start time of run %i, LB %i" % (runnumber,LB) return None obj=None db = None try: db=indirectOpen(tdaqDBName,oracle=True) f=db.getFolder(folder) obj=f.findObject(cool.ValidityKey(iovtime),0) except Exception,e: print e.args[0] if len(e.args)>1 and e.args[0].find("Object not found - 0"): print "WARNING No data found in folder %s for run/LB %i/%i" % (folder,runnumber,LB) else: print "BunchSpacingUtils: ERROR accesssing folder",folder,"on db",tdaqDBName print e if db is not None: db.closeDatabase() return None
def getTimeForLB(run, LB, readOracle=False): "Return the time a specific run/LB, given the folder, or 0 for bad/no data" runiov = (run << 32) + LB if _timeForLB.has_key(runiov): print "getTimeForLB: Returning cached time for run %i, LumiBlock %i " % ( run, LB) return _timeForLB[runiov] if (run >= 236107): dbname = "CONDBR2" else: dbname = "COMP200" #print "Querying DB for time of run %i LB %i" % (run,LB) try: trigDB = indirectOpen('COOLONL_TRIGGER/%s' % dbname, oracle=readOracle) if (trigDB is None): print "MagFieldUtils.getTimeForLB ERROR: Cannot connect to COOLONL_TDAQ/%s" % dbname return 0 lblbfolder = trigDB.getFolder('/TRIGGER/LUMI/LBLB') obj = lblbfolder.findObject(runiov, 0) payload = obj.payload() lbtime = payload['StartTime'] _timeForLB[runiov] = lbtime trigDB.closeDatabase() return lbtime except Exception, e: print "MagFieldUtils.getTimeForLB WARNING: accessing /TRIGGER/LUMI/LBLB for run %i, LB %i" % ( run, LB) print e return 0
def __init__(self, cooldbconn, **kw): """Initialize the tool. cooldbconn = main database connection (to /TRIGGER/...) so should be COOLONL. kw = argument list. Clients should stay up-to-date with CoolLumiCalc when they supply optional arguments. For more information, try help(LumiBlockComps.LumiCalculator.coolLumiCalc) child arguments: coolfolderlumi, useprescale """ self.lumidbname = kw.pop('lumidb', 'COOLOFL_TRIGGER/COMP200') self.lumifoldername = kw.pop('coolfolderlumi', '/TRIGGER/OFLLUMI/LBLESTOFL') self.lumitag = kw.pop('coolfoldertag', 'OflLumi-7TeV-002') self.lumimethod = kw.pop('lumimethod', 'ATLAS_PREFERRED') self.useprescale = kw.pop('useprescale', False) kwkeep = kw.copy() kw.update({ 'readoracle': False, 'loglevel': 1, 'detStatus': "", 'detStatusTag': "" }) #supply defaults kw.update(kwkeep) #restore user choices print("Trying to open", cooldbconn) coolLumiCalc.__init__(self, cooldbconn, **kw) #also need the online database try: print("Now trying to open", self.lumidbname) self.cooldblumi = indirectOpen(self.lumidbname, True, kw.get('readoracle', False), kw.get('loglevel', 1) > 1) except Exception as e: print(e) sys.exit(-1) pass
def openDB(self): # Open the trigger COOL database try: self.trigProdDb = indirectOpen(self.trigProdDbName, True, False, False) if self.verbose: print('Connected to', self.trigProdDbName, 'for Trigger data') except Exception as e: print(e) return False return True
def __getCountsForSMKandTrigNames(smkByRun, triggerNames, levels): doL2 = (levels & 1) != 0 doEF = (levels & 2) != 0 if doL2: l2Names = {} l2counts = {} if doEF: efNames = {} efcounts = {} runlist = sorted(smkByRun.keys()) smks = set(smkByRun.values()) if doL2: for smk in smks: l2Names[smk] = [tn for tn in triggerNames[smk] if tn.level == 'L2'] if doEF: for smk in smks: efNames[smk] = [tn for tn in triggerNames[smk] if tn.level == 'EF'] from CoolConvUtilities.AtlCoolLib import indirectOpen conn = indirectOpen("COOLONL_TRIGGER/COMP200", True, True, False) if doL2: l2folder = conn.getFolder("/TRIGGER/LUMI/LVL2COUNTERS") if doEF: effolder = conn.getFolder("/TRIGGER/LUMI/EFCOUNTERS") lbfolder = conn.getFolder("/TRIGGER/LUMI/LBLB") lbinfo = _getLBInfo(runlist, lbfolder) for run in runlist: smk = smkByRun[run] if doL2: l2names_thissmk = l2Names[smk] if l2names_thissmk: l2counts[run] = TriggerRates( "L2", l2names_thissmk, _getCountsInFolder(run, -1, [tn.counter for tn in l2names_thissmk], l2folder, lbinfo[run])) if doEF: efnames_thissmk = efNames[smk] if efnames_thissmk: efcounts[run] = TriggerRates( "EF", efnames_thissmk, _getCountsInFolder(run, -1, [tn.counter for tn in efnames_thissmk], effolder, lbinfo[run])) if levels & 3 == 1: return l2counts elif levels & 3 == 2: return efcounts elif levels & 3 == 3: return l2counts, efcounts return None
def getSMK(runlist): from CoolConvUtilities.AtlCoolLib import indirectOpen conn = indirectOpen("COOLONL_TRIGGER/COMP200", True, True, False) folder = conn.getFolder("/TRIGGER/HLT/HltConfigKeys") smk = {} for run in runlist: runlb = (run << 32) obj = folder.findObject(runlb, 0) smk[run] = obj.payload()['MasterConfigurationKey'] return smk
def GetConnection(dbconn, verbosity=0): connection = None if dbconn == "COMP": connection = 'COOLONL_TRIGGER/COMP200' elif dbconn == "OFLP": connection = 'COOLONL_TRIGGER/OFLP200' else: raise RuntimeError("Can't connect to COOL db %s" % dbconn) try: openConn = indirectOpen(connection, readOnly=True, oracle=True, debug=(verbosity > 0)) except Exception: import traceback traceback.print_exc() sys.exit(-1) return openConn
def getL2Counts(run, lb, chainCounter): """ returns a list of Run/LB indexed CountRec objects if lb<0 then the list contains the entire run, otherwise it has just one entry for the given run/lb """ with timer("access COOL"): from CoolConvUtilities.AtlCoolLib import indirectOpen conn = indirectOpen("COOLONL_TRIGGER/COMP200", True, True, False) folder = conn.getFolder('/TRIGGER/LUMI/LVL2COUNTERS') if lb >= 0: runlb = (run << 32) + lb objs = [folder.findObject(runlb, 0)] else: start = run << 32 end = ((run + 1) << 32) - 1 objs = folder.browseObjects( start, end, cool.ChannelSelection( 0, 0, cool.ChannelSelection.sinceBeforeChannel)) res = [] pos = None with timer("read the blob"): sl = [(o.since(), o.payload()['Data'].read()) for o in objs] with timer("unpack"): for (runlb, s) in sl: run = runlb >> 32 lb = runlb & 0xFFFFFFFF if len(s) == 0: res += [(run, lb, CountRec(chainCounter, 0, 0, 0, 0))] else: if len(s) % 20 != 0: raise RuntimeError, "the length of the data vector %i is not a multiple of 20" % len( s) #__printCounts(s) if pos == None: pos = __findPos(chainCounter, s) if pos == None: return None res += [(run, lb, __getCounts(pos, s))] return res
def getLArFormatForRun(run, readOracle=True, quiet=False, connstring=None): from AthenaCommon.Logging import logging mlog_LRF = logging.getLogger('getLArRunFormatForRun') # connstring = "sqlite://;schema=rundb.db;dbname=CONDBR2" if connstring is None: from IOVDbSvc.CondDB import conddb connstring = "COOLONL_LAR/" + conddb.dbdata mlog_LRF.info("Connecting to database " + connstring) print("run=", run) runDB = indirectOpen(connstring, oracle=readOracle) if (runDB is None): mlog_LRF.error("Cannot connect to database %s", connstring) del mlog_LRF raise RuntimeError( "getLArFormatForRun ERROR: Cannot connect to database %s", connstring) format = None nSamples = None gainType = None runType = None latency = None firstSample = None try: folder = runDB.getFolder('/LAR/Configuration/RunLog') runiov = run << 32 obj = folder.findObject(runiov, 0) payload = obj.payload() format = payload['format'] nSamples = ord(payload['nbOfSamples']) gainType = payload['gainType'] runType = payload['runType'] latency = ord(payload['l1aLatency']) firstSample = ord(payload['firstSample']) except Exception as e: mlog_LRF.warning( "No information in /LAR/Configuration/RunLog for run %i" % run) #mlog_LRF.warning(e) del mlog_LRF return None runDB.closeDatabase() mlog_LRF.info("Found info for run %d" % run) del mlog_LRF return LArRunInfo(nSamples, gainType, latency, firstSample, format, runType)
def GetConnection(dbconn,verbosity=0): connection = None m = match(r".*?([^/.]+)\.db",dbconn) if dbconn=="COMP": connection = 'COOLONL_TRIGGER/COMP200' elif dbconn=="OFLP": connection = 'COOLONL_TRIGGER/OFLP200' elif dbconn=="CONDBR2": connection = 'COOLONL_TRIGGER/CONDBR2' elif m: dbname=m.group(1).upper() connection = "sqlite://;schema=%s;dbname=%s;" % (dbconn,dbname) else: raise RuntimeError ("Can't connect to COOL db %s" % dbconn) try: openConn = indirectOpen(connection,readOnly=True,oracle=True,debug=(verbosity>0)) except Exception: import traceback traceback.print_exc() sys.exit(-1) return openConn
def testRun(globaltag, run, tofs, nevt, deltat, configfile, debug=0): "Execute the test run with given parameters" print "Simulate tag %s run %i with %i events time increment %i" % ( globaltag, run, nevt, deltat) starttime = time.time() # find run start time from SORParams folder tdaqDB = indirectOpen('COOLONL_TDAQ/COMP200', oracle=True) if (tdaqDB is None): print "ERROR: Cannot connect to COOLONL_TDAQ/COMP200" return 1 sortime = 0 try: tdaqfolder = tdaqDB.getFolder('/TDAQ/RunCtrl/SOR_Params') runiov = run << 32 obj = tdaqfolder.findObject(runiov, 0) payload = obj.payload() sortime = payload['SORTime'] / 1.E9 + tofs except Exception, e: print "ERROR accessing /TDAQ/RunCtrl/SOR_Params" print e
def openDB(self, dbstring, oracle=False, debug=False, force=False): if self.verbose: print('LumiDBHandler.openDB(', dbstring, ') called') # Check if already open if dbstring in self.__class__.dbDict: # No force, just return if not force: if self.verbose: print('LumiDBHandler.openDB - Connection already exists') return True # Yes it is # Force specified, close so we can re-open if self.verbose: print( 'LumiDBHandler.openDB - Connection already exists, closing first due to force=True' ) self.closeDB(dbstring) # Try to open DB connection if self.verbose: print(('LumiDBHandler.openDB - Connecting to', dbstring)) try: db = indirectOpen(dbstring, readOnly=True, oracle=oracle, debug=debug) except Exception as e: print(e) return False # OK, opened. Save this to our dict for later use self.__class__.dbDict[dbstring] = db return True
if run_end_time < 0 or lbData.end_time > run_end_time: run_end_time = lbData.end_time del lb itr.close() except Exception,e: log.error('Reading data from '+lvl1lbdata_foldername+' failed: '+str(e)) ###---------------------------------------------------------------------------------- ### Read luminosity information ### try: #dbTdaq=dbSvc.openDatabase(dbTdaqString, False) dbTdaq = indirectOpen('COOLONL_TDAQ/COMP200', True, False, False) log.info("Opened database: "+dbTdaqString) except Exception,e: log.error('Error opening database:'+str(e)) sys.exit(-1) if not dbTdaq.existsFolder(lumi_foldername) : print "Folder",lumi_foldername,"not found" dbTaq.closeDatabase() sys.exit(-1) try: folder=dbTdaq.getFolder(lumi_foldername) itr=folder.browseObjects(run_beg_time, run_end_time, cool.ChannelSelection(0)) while itr.goToNext():
def bunchSpacingOfRun(runnumber, LB, verbose=False): if (runnumber < 236107): print("WARNING BunchSpacingUtils don't work for run-1 data") return None tdaqDBName = "COOLONL_TDAQ/CONDBR2" folder = "/TDAQ/OLC/LHC/FILLPARAMS" iovtime = getTimeForLB(runnumber, LB, readOracle=True) if iovtime == 0: print("ERROR, can't get start time of run %i, LB %i" % (runnumber, LB)) return None obj = None db = None try: db = indirectOpen(tdaqDBName, oracle=True) f = db.getFolder(folder) obj = f.findObject(cool.ValidityKey(iovtime), 0) except Exception as e: print(e.args[0]) if len(e.args) > 1 and e.args[0].find("Object not found - 0"): print("WARNING No data found in folder %s for run/LB %i/%i" % (folder, runnumber, LB)) else: print("BunchSpacingUtils: ERROR accesssing folder", folder, "on db", tdaqDBName) print(e) if db is not None: db.closeDatabase() return None pl = obj.payload() buf = pl["BCIDmasks"] bucketDiff = 0 firstFilled = -1 lastFilled = -1 bucketDiffs = [] for iBucket, filled in enumerate(buf): if filled != 0: if (verbose): print("Bucket", iBucket, "filled") lastFilled = iBucket if firstFilled < 0: firstFilled = iBucket if (verbose): print("First filled bucket=", iBucket) else: if (verbose): print("Bucket #%i, bunch spacing=%i * 25ns" % (iBucket, bucketDiff)) bucketDiffs.append(bucketDiff) bucketDiff = 1 pass else: # not filled if (verbose): print("Bucket", iBucket, "not filled") bucketDiff += 1 pass #Handle wrap-around: if (firstFilled >= 0 and lastFilled > 0): bucketDiffs.append(len(buf) - lastFilled + firstFilled) if (verbose): print("Bunchdiff at wrap-around:", (len(buf) - lastFilled + firstFilled)) if db is not None: db.closeDatabase() if len(bucketDiffs) == 0: return None else: return min(bucketDiffs)
if (lbtime > 0): # use this time instead of SORtime if not quiet: print "Lumiblock starts %i seconds from start of run" % int( (lbtime - sortime) / 1.E9) sortime = lbtime else: print "MagFieldUtils.getFieldForRun ERROR accessing /TRIGGER/LUMI/LBLB" print "Fall back on SOR time from %s" % sorfolder lbtime = sortime # if we do not have a valid time, exit if (sortime == 0): return None # now having got the start of run timestamp, lookup the field info in DCS dcsDB = indirectOpen('COOLOFL_DCS/%s' % dbname, oracle=readOracle) if (dcsDB is None): print "MagFieldUtils.getFieldForRun ERROR: Cannot connect to COOLOFL_DCS/%s" % dbname return None data = None try: # map of expected channel names to slots in data[] variable # follows original order from run1/COMP200 # has changed in CONDBR2, but use of named channels recovers this currentmap = { 'CentralSol_Current': 0, 'CentralSol_SCurrent': 1, 'Toroids_Current': 2, 'Toroids_SCurrent': 3 } dcsfolder = dcsDB.getFolder('/EXT/DCS/MAGNETS/SENSORDATA')
# write here the run number and lumiblock of a time after the HV has stabilized to the version that has to be corrected offline # and for which we don't do online re-calibration RunNumber = 207306 LumiBlock = 1 GlobalTag = 'COMCOND-ES1PA-006-01' Geometry = 'ATLAS-GEO-18-00-00' from RecExConfig.RecFlags import rec rec.RunNumber.set_Value_and_Lock(RunNumber) from PyCool import cool from CoolConvUtilities.AtlCoolLib import indirectOpen trigDB = indirectOpen('COOLONL_TRIGGER/COMP200', oracle=True) trigfolder = trigDB.getFolder('/TRIGGER/LUMI/LBLB') runiov = (RunNumber << 32) + LumiBlock print " runiov ", runiov obj = trigfolder.findObject(runiov, 0) payload = obj.payload() TimeStamp = payload['StartTime'] / 1000000000L trigDB.closeDatabase() # this setting is just to get directly pileup noise as b and write back the same in the database... from CaloTools.CaloNoiseFlags import jobproperties jobproperties.CaloNoiseFlags.FixedLuminosity.set_Value_and_Lock(1.) #TimeStamp = 1274368420 print " TimeStamp : ", TimeStamp
def getLBsToIgnore(runnum,burstsFromCosmic=True,bulkProcessing=False, dropNonReady=True): badLBs=set() # 1. Get LB range for this run and LBs without "ATLAS-READY" nReadyLBs=0 nNotReadyLBs=0 tdaqdb=indirectOpen('COOLONL_TDAQ/CONDBR2') if (tdaqdb is None): print ("ERROR: Can't access COOLONL_TDAQ/CONDBR2") sys.exit(-1) fmode=tdaqdb.getFolder("/TDAQ/RunCtrl/DataTakingMode") since=(runnum<<32)+1 until=((1+runnum)<<32)-1 maxLb=0 minLb=1 itr=fmode.browseObjects(since,until,cool.ChannelSelection.all()) while itr.goToNext(): obj=itr.currentRef() pl=obj.payload() isReady=pl["ReadyForPhysics"] lb1=max(since,obj.since()) & 0xFFFFFFFF ts2=obj.until() if ts2<until: #ignore the IOV beyond the end of the run lb2=ts2 & 0xFFFFFFFF if lb2>maxLb: maxLb=lb2 if not isReady: if dropNonReady: print ("Ignoring LumiBlocks %i - %i not ATLAS READY" % (lb1,lb2)) badLBs.update(range(lb1,lb2)) nNotReadyLBs+=(lb2-lb1) else: nReadyLBs+=(lb2-lb1) pass pass pass pass itr.close() tdaqdb.closeDatabase() print ("Run %i goes up to LB %i" % (runnum,maxLb)) #2. Get problematic LBs #2.1 Look for collisions in empty bunches - Fetch from DQ Web Server source = 'tier0' stream = 'physics_CosmicCalo' serverstring="https://%[email protected]" % password server = xmlrpclib.ServerProxy(serverstring) multicall = xmlrpclib.MultiCall(server) # Look for the highest(latest) processing version of CosmicCalo by retrieving amitag run_spec = {'source': source, 'high_run': runnum, 'low_run': runnum} multicall.get_procpass_amitag_mapping(run_spec) results = multicall() if len(results[0])==0: print ("Nothing found about run",runnum,"on DQM server") proc = 0 try: list = results[0][str(runnum)] for item in list: if ("f" in item[2] and bulkProcessing and "CosmicCalo" in item[1] and item[0]>proc): proc = 2 if ("x" in item[2] and (not bulkProcessing) and "CosmicCalo" in item[1] and item[0]>proc): print (item) proc = 1 pass pass except Exception as e: print ("ERROR: can't retrieve the AMI Tag") print (e) if (proc == 0): print ("I haven't found any processing version for CosmicCalo. Assume express processing") proc=1 try: multicall = xmlrpclib.MultiCall(server) run_spec = {'source': source, 'high_run': runnum, 'stream': stream, 'proc_ver': proc, 'low_run': runnum} multicall.get_timestamp(run_spec) results=multicall() timestamp=results[0][str(runnum)] from time import asctime,localtime print ("DQM server timestamp:", asctime(localtime(timestamp))) print ("Now: ",asctime()) except Exception as e: print ("ERROR: can't get timestamp from DQM server") print (e) multicall = xmlrpclib.MultiCall(server) run_spec = {'source': source, 'high_run': runnum, 'stream': stream, 'proc_ver': proc, 'low_run': runnum} multicall.get_dqmf_all_results(run_spec,'LAr/LAR_GLOBAL/Collisions-Bkg/LArCollTimeLumiBlockTimeCut') results = multicall() RE = re.compile(r'\((?P<lb>\S+)\.0*\)') try: list = results[0][str(runnum)] for item in list: if 'NBins' in item: continue m = RE.search(item).groupdict() lb=int(m['lb']) ncollisions=int(results[0][str(runnum)][item]) if ncollisions > 50: badLBs.add(lb) print ("LumiBlock %i ignored because it is empty bunches are polluted with collisions" % lb) pass pass except Exception as e: print ("ERROR: can't get LArCollTimeLumiBlockTimeCut from DQM server") print (e) if (burstsFromCosmic):# CosmicCalo stream : from the DQ web histoName = {'EMBC':'BarrelC','EMBA':'BarrelA','EMECC':'EMECC','EMECA':'EMECA'} for iPart in histoName.keys(): multicall = xmlrpclib.MultiCall(server) #multicall.get_dqmf_all_results(run_spec,'LAr/%s/Noise/Partition/NoisyEvent_TimeVeto_%s'%(iPart,histoName[iPart])) multicall.get_dqmf_all_results(run_spec,'/LAr/%s/Occupancy-Noise/Noise_Burst/NoisyEvent_TimeVeto_%s'%(iPart,iPart)) results = multicall() try: resultlist = results[0][str(runnum)] #print ("Got %i items for NoisyEvent_TimeVeto_%s" % (len(list),histoName[iPart])) for item in resultlist: if 'NBins' in item: continue m = RE.search(item).groupdict() lb=int(m['lb']) yieldbursts=float(results[0][str(runnum)][item]) if yieldbursts > 0: badLBs.add(lb) print ("LumiBlock %i ignored because it contains bursts in CosmicCalo stream in %s" % (lb,iPart)) pass pass except Exception as e: print ("ERROR: can't get NoisyEvent from DQM server") print (e) del multicall del server #3.2 Get defects from Defects DB db = DefectsDB() lar_defects = [d for d in (db.defect_names | db.virtual_defect_names) if d.startswith("LAR")] defects = db.retrieve((runnum, minLb), (runnum, maxLb), lar_defects) for defect in defects: part=defect.channel.split("_")[1] #3.2.1 Check for HV trip if "HVTRIP" in defect.channel and defect.present: for lb in range(defect.since.lumi,defect.until.lumi): badLBs.add(lb) print ("LumiBlock %i ignored because of a HV trip in partition %s" % (lb,part)) pass pass #3.2.2 Check for Noise Bursts from the defects if (not burstsFromCosmic): if not bulkProcessing: if "NOISEBURST" in defect.channel and defect.present: for lb in range(defect.since.lumi,defect.until.lumi): badLBs.add(lb) print ("LumiBlock %i ignored because of a noise burst in partition %s" % (lb,part)) pass pass else: #not bulk processing if "SEVNOISEBURST" in defect.channel and defect.present: for lb in range(defect.since.lumi,defect.until.lumi): badLBs.add(lb) print ("LumiBlock %i ignored because of a severe noise burst in partition %s" % (lb,part)) pass pass del db #Close Defects DB nBadLBs=len(badLBs) if dropNonReady: nBadLBs=nBadLBs-nNotReadyLBs print ("Found %i not-ready LBs, %i atlas-ready LBs and %i bad LBs" % (nNotReadyLBs,nReadyLBs,nBadLBs)) return badLBs
doPedestalPlots=False, doEtCorrelationPlots=False, doCaloQualCut=False) ToolSvc += CfgMgr.LVL1__TrigT1CaloLWHistogramTool("TrigT1CaloLWHistogramTool", LVL1ConfigSvc="") ServiceMgr += CfgMgr.THistSvc() ServiceMgr.THistSvc.Output = ["AANT DATAFILE='output.root' OPT='RECREATE'"] ServiceMgr.THistSvc.Output += [ "RAMPDATA DATAFILE='graphs.root' OPT='RECREATE'" ] svcMgr.IOVDbSvc.Folders += [ "<dbConnection>sqlite://;schema=/afs/cern.ch/user/l/l1ccalib/w0/DaemonData/reference/calibReferences.sqlite;dbname=L1CALO</dbConnection>/TRIGGER/L1Calo/V1/References/FineTimeReferences" ] conddb.addFolderWithTag("TRIGGER", "/TRIGGER/L1Calo/V2/Calibration/Calib1/PprChanCalib", "HEAD") # override timestamp with SOR time of run # this overcomes strange timestamps stored in the EventInfo # code taken from CoolConvUtilities/MagFieldUtils.py from CoolConvUtilities.AtlCoolLib import indirectOpen foldername_ = '/TDAQ/RunCtrl/SOR' if svcMgr.IOVDbSvc.DBInstance == 'CONDBR2' else '/TDAQ/RunCtrl/SOR_Params' db_ = indirectOpen('COOLONL_TDAQ/%s' % svcMgr.IOVDbSvc.DBInstance, oracle=True) if not db_: raise RuntimeError("Couldn't open connection to TDAQ DB") folder_ = db_.getFolder(foldername_) obj_ = folder_.findObject(GetRunNumber() << 32, 0) payload_ = obj_.payload() svcMgr.IOVDbSvc.forceTimestamp = payload_['SORTime'] / 1000000000
def getLBsToIgnore(runnum, burstsFromCosmic=True, bulkProcessing=False, dropNonReady=True): badLBs = set() # 1. Get LB range for this run and LBs without "ATLAS-READY" nReadyLBs = 0 nNotReadyLBs = 0 tdaqdb = indirectOpen('COOLONL_TDAQ/CONDBR2') if (tdaqdb is None): print "ERROR: Can't access COOLONL_TDAQ/CONDBR2" sys.exit(-1) fmode = tdaqdb.getFolder("/TDAQ/RunCtrl/DataTakingMode") since = (runnum << 32) + 1 until = ((1 + runnum) << 32) - 1 maxLb = 0 minLb = 1 itr = fmode.browseObjects(since, until, cool.ChannelSelection.all()) while itr.goToNext(): obj = itr.currentRef() pl = obj.payload() isReady = pl["ReadyForPhysics"] lb1 = max(since, obj.since()) & 0xFFFFFFFF ts2 = obj.until() if ts2 < until: #ignore the IOV beyond the end of the run lb2 = ts2 & 0xFFFFFFFF if lb2 > maxLb: maxLb = lb2 if not isReady: if dropNonReady: print "Ignoring LumiBlocks %i - %i not ATLAS READY" % (lb1, lb2) badLBs.update(xrange(lb1, lb2)) nNotReadyLBs += (lb2 - lb1) else: nReadyLBs += (lb2 - lb1) pass pass pass pass itr.close() tdaqdb.closeDatabase() print "Run %i goes up to LB %i" % (runnum, maxLb) #2. Get problematic LBs #2.1 Look for collisions in empty bunches - Fetch from DQ Web Server source = 'tier0' stream = 'physics_CosmicCalo' serverstring = "https://%[email protected]" % password server = xmlrpclib.ServerProxy(serverstring) multicall = xmlrpclib.MultiCall(server) # Look for the highest(latest) processing version of CosmicCalo by retrieving amitag run_spec = {'source': source, 'high_run': runnum, 'low_run': runnum} multicall.get_procpass_amitag_mapping(run_spec) results = multicall() if len(results[0]) == 0: print "Nothing found about run", runnum, "on DQM server" proc = 0 try: list = results[0][str(runnum)] for item in list: if ("f" in item[2] and bulkProcessing and "CosmicCalo" in item[1] and item[0] > proc): proc = 2 if ("x" in item[2] and (not bulkProcessing) and "CosmicCalo" in item[1] and item[0] > proc): print item proc = 1 pass pass except Exception, e: print "ERROR: can't retrieve the AMI Tag" print e
def testStatusCutsToRange(): dbconn = indirectOpen('COOLOFL_GLOBAL/COMP200') statusCutsToRange(dbconn, '/GLOBAL/DETSTATUS/LBSUMM', 0, cool.ValidityKeyMax, 'TRTB 3')
if run_end_time < 0 or lbData.end_time > run_end_time: run_end_time = lbData.end_time del lb itr.close() except Exception, e: log.error('Reading data from ' + lvl1lbdata_foldername + ' failed: ' + str(e)) ###---------------------------------------------------------------------------------- ### Read luminosity information ### try: #dbTdaq=dbSvc.openDatabase(dbTdaqString, False) dbTdaq = indirectOpen('COOLONL_TDAQ/COMP200', True, False, False) log.info("Opened database: " + dbTdaqString) except Exception, e: log.error('Error opening database:' + str(e)) sys.exit(-1) if not dbTdaq.existsFolder(lumi_foldername): print "Folder", lumi_foldername, "not found" dbTaq.closeDatabase() sys.exit(-1) try: folder = dbTdaq.getFolder(lumi_foldername) itr = folder.browseObjects(run_beg_time, run_end_time, cool.ChannelSelection(0))
'bs_metadata') or inputFileSummary.__contains__( 'run_number'): # get the run number if inputFileSummary.__contains__('bs_metadata'): run_number = inputFileSummary['bs_metadata'][ 'run_number'] else: run_number = int(inputFileSummary['run_number'][0]) pointintime = (int(run_number) << 32) # try to connect to the COOL database from PyCool import cool from CoolConvUtilities.AtlCoolLib import indirectOpen connstring = "COOLONL_TRIGGER/CONDBR2" # get the MCK from COOL coolDB = indirectOpen(connstring, oracle='True') if coolDB is None: log.error( "Unable to connect to %s to get MCK from COOL" % connstring) else: # try to get the MCK out of COOL foldername = 'MenuAwareMonConfigKey' MCKfolder = coolDB.getFolder('/TRIGGER/HLT/' + foldername) release_tag = foldername + '-' + mam.ms.current_athena_version # try retrieve and MCK for the correct release try: retrieved_obj = MCKfolder.findObject(
if 'GlobalTag' not in dir(): GlobalTag = 'COMCOND-ES1PT-004-00' if 'Geometry' not in dir(): Geometry = 'ATLAS-GEO-20-00-00' printfunc("RunNumber ", RunNumber) printfunc("LumiBlock ", LumiBlock) from RecExConfig.RecFlags import rec rec.RunNumber.set_Value_and_Lock(RunNumber) from CoolConvUtilities.AtlCoolLib import indirectOpen trigDB = indirectOpen('COOLONL_TRIGGER/CONDBR2', oracle=True) trigfolder = trigDB.getFolder('/TRIGGER/LUMI/LBLB') runiov = (RunNumber << 32) + LumiBlock printfunc(" runiov ", runiov) obj = trigfolder.findObject(runiov, 0) payload = obj.payload() TimeStamp = payload['StartTime'] / 1000000000 trigDB.closeDatabase() # this setting is just to get directly pileup noise as b and write back the same in the database... from CaloTools.CaloNoiseFlags import jobproperties jobproperties.CaloNoiseFlags.FixedLuminosity.set_Value_and_Lock(1.) #TimeStamp = 1274368420 printfunc(" TimeStamp : ", TimeStamp)
if len(runNumbers) > 0: #from RecExConfig.RecAlgsFlags import recAlgs #from RecExConfig.RecFlags import rec from TriggerJobOpts.TriggerFlags import TriggerFlags ### Loop over the input files and find if a problematic one is there. ### If so, turn off the trigger. from PyCool import cool from CoolConvUtilities.AtlCoolLib import indirectOpen import PyUtils.AthFile as AthFile # get connection to COOL and find the HLT ps key from IOVDbSvc.CondDB import conddb coolDbConn = indirectOpen("COOLONL_TRIGGER/%s" % conddb.dbdata, oracle=True) hltfolder = coolDbConn.getFolder('/TRIGGER/HLT/PrescaleKey') lvl1folder = coolDbConn.getFolder('/TRIGGER/LVL1/Lvl1ConfigKey') chansel = cool.ChannelSelection(0, 0, cool.ChannelSelection.sinceBeforeChannel) needToTurnOffHLT = False needToTurnOffLVL1 = False for RunNumber in runNumbers: # translate to begin and end IOV iovmin = (RunNumber << 32) + 0 iovmax = ((RunNumber + 1) << 32) - 1 # read info from COOL hltobjs = hltfolder.browseObjects(iovmin, iovmax, chansel) allHltpsks = [] while hltobjs.goToNext():
lumiBlockMax = 2, ppmADCMinValue = 80, ppmADCMaxValue = 963, doFineTimePlots = True, doPedestalPlots = False, doEtCorrelationPlots = False, doCaloQualCut = False ) ToolSvc += CfgMgr.LVL1__TrigT1CaloLWHistogramTool("TrigT1CaloLWHistogramTool", LVL1ConfigSvc = "") ServiceMgr += CfgMgr.THistSvc() ServiceMgr.THistSvc.Output = ["AANT DATAFILE='output.root' OPT='RECREATE'"] ServiceMgr.THistSvc.Output += ["RAMPDATA DATAFILE='graphs.root' OPT='RECREATE'"] svcMgr.IOVDbSvc.Folders += ["<dbConnection>sqlite://;schema=/afs/cern.ch/user/l/l1ccalib/w0/DaemonData/reference/calibReferences.sqlite;dbname=L1CALO</dbConnection>/TRIGGER/L1Calo/V1/References/FineTimeReferences"] conddb.addFolderWithTag("TRIGGER", "/TRIGGER/L1Calo/V2/Calibration/Calib1/PprChanCalib", "HEAD") # override timestamp with SOR time of run # this overcomes strange timestamps stored in the EventInfo # code taken from CoolConvUtilities/MagFieldUtils.py # as soon as the timestamps are fixed this should be skipped # if 220000 < GetRunNumber() < XXX: from CoolConvUtilities.AtlCoolLib import indirectOpen foldername_ = '/TDAQ/RunCtrl/SOR' if svcMgr.IOVDbSvc.DBInstance == 'CONDBR2' else '/TDAQ/RunCtrl/SOR_Params' db_ = indirectOpen('COOLONL_TDAQ/%s' % svcMgr.IOVDbSvc.DBInstance, oracle=True) if not db_: raise RuntimeError("Couldn't open connection to TDAQ DB") folder_ = db_.getFolder(foldername_) obj_ = folder_.findObject(GetRunNumber() << 32, 0) payload_ = obj_.payload() svcMgr.IOVDbSvc.forceTimestamp = payload_['SORTime'] / 1000000000L