def make_nextdb(self, dbname=None): """create a new pvarch database, copying pvs to save from an old database (and that are in the current cache) """ olddb = SimpleDB(user=dbuser, passwd=dbpass,dbname=self.arch_db, host=dbhost,debug=0) olddb.use(self.arch_db) old_data = olddb.tables['pv'].select() cache_pvnames = self.get_pvnames() dbname = nextname(current=self.arch_db,dbname=dbname) self.create_emptydb(dbname) newdb = SimpleDB(user=dbuser, passwd=dbpass,dbname=dbname, host=dbhost,debug=0) pvtable = newdb.tables['pv'] sys.stdout.write('adding %i pvs to DB %s\n' % (len(old_data),dbname)) for p in old_data: if p['active'] == 'no' or p['name'] not in cache_pvnames: continue pvtable.insert(name=p['name'], type=p['type'], description=p['description'], data_table=p['data_table'], deadtime=p['deadtime'], graph_type=p['graph_type'], graph_lo=p['graph_lo'], graph_hi=p['graph_hi']) self.__setrun(dbname) olddb.close() newdb.close() return dbname
class Archiver: MIN_TIME = 100 sql_insert = "insert into %s (pv_id,time,value) values (%i,%f,%s)" def __init__(self,dbconn=None,**args): self.master_db = config.master_db self.dbname = None self.db = SimpleDB(dbconn=dbconn) self.dbconn = self.db.conn # print 'Archiver db = ', self.db, self.db.conn ret = self.read_master("select db from info where process='archive'") self.dbname = ret[0]['db'] # print 'Archiver:: current dbname = ', self.dbname # self.use_currentDB() self.db.use(self.dbname) time.sleep(0.01) self.db.read_table_info() self.pv_table = self.db.tables['pv'] self.debug = 0 self.force_checktime = 0 self.messenger = sys.stdout self.dtime_limbo = {} self.last_collect = 0 self.pvinfo = {} self.pvs = {} for k,v in args.items(): if (k == 'debug'): self.debug = v elif (k == 'messenger'): self.messenger = v for d in self.db.exec_fetch('select * from pv'): d.update({'last_ts': 0,'last_value':None, 'force_time': get_force_update_time() }) self.pvinfo[d['name']] = d def exec_fetch(self,sql): self.db.execute(sql) ret = [{}] try: ret = self.db.fetchall() except: pass return ret def read_master(self,query): self.db.use(self.master_db) ret = self.exec_fetch(query) if self.dbname is not None: self.db.use(self.dbname) return ret def use_currentDB(self,dbname=None): ret = self.read_master("select db from info where process='archive'") try: self.dbname = ret[0]['db'] self.db.use(self.dbname) except: raise IOError, 'cannot determine archive database name' return self.dbname def get_cache_changes(self,dt=30): """ get list of name,type,value,cvalue,ts from cache """ return self.read_master("select * from cache where ts>%f" % (time.time()-dt)) def get_cache_names(self): ret = self.read_master("select pvname from cache") self.cache_names = [i['pvname'] for i in ret] return self.cache_names def get_cache_full(self, pv): " return full information for a cached pv" s = self.read_master("select * from cache where pvname='%s'" % pv) try: return s[0] except: print 'Could not get cache for pvname "%s"' % pv return {'type':None,'value':None} def refresh_pv_table(self): """refresh the pvinfo dictionary by re-reading the database settings for pvs in the pv table This will cause changes in database settings (deadtime, etc) to be updated. """ for pvdata in self.pv_table.select(): name = pvdata['name'] # already know about this pv: update pvinfo. if name in self.pvinfo: self.pvinfo[name].update(pvdata) # look up any new pvs else: self.get_info(name) def check_for_new_pvs(self): " read pvs in cache, make sure we have them all in pvinfo dictionary" for p in self.get_cache_names(): self.get_info(p) def get_info(self,pvname): """ return pvinfo dictionary for a pv, and also ensures that it is in the pvinfo dictionary""" if pvname not in self.pvinfo: try: r = self.pv_table.select_where(name=pvname) dat = r[0] except IndexError: self.add_pv(pvname) time.sleep(0.01) try: r = self.pv_table.select_where(name=pvname) dat = r[0] except: return None dat['force_time'] = get_force_update_time() dat['last_ts'] = 0 dat['last_value'] = None self.pvinfo[pvname] = dat return self.pvinfo[pvname] def sync_with_cache(self,update_vals=False): """ initialize pv lists, insert times, etc with cache use update_vals to force insert of cache values into the archive (as on startup) n """ newpvs = [] cache_values = [] self.get_cache_names() pvtable_data = self.pv_table.select() print ' This is sync with cache ', update_vals, len(self.cache_names), len(pvtable_data) self.db.use(self.master_db) now = time.time() print 'masterdb %s / data=%s' % ( self.master_db, len(pvtable_data)) if update_vals: x = self.db.exec_fetch("select pvname,value,ts from cache") current_cache = {} for i in x: current_cache[i['pvname']] = i for pvdata in pvtable_data: name = normalize_pvname(pvdata['name']) if name not in self.cache_names: newpvs.append((name, epics.PV(name))) elif update_vals: r = current_cache.get(name,None) if r is not None: ts = r['ts'] if now - ts > SEC_DAY: ts = now cache_values.append((name,ts,r['value'])) if len(newpvs)>0: epics.poll() m = MasterDB() for pvname, pv in newpvs: if pv.connected: m.add_pv(pvname) m.close() # now switch to archiving database, and (perhaps) insert values from cache if self.dbname is not None: self.db.use(self.dbname) for name,ts,value in cache_values: self.update_value(name,ts,value) print 'Sync with Cache Done' def get_pv(self,pvname): " " if pvname in self.pvs: return self.pvs[pvname] try: p = self.pvs[pvname] = epics.PV(pvname) return p except: return None def dbs_for_time(self, t0=SEC_DAY, t1=MAX_EPOCH): """ return list of databases with data in the given time range""" timerange = ( min(t0,t1) - SEC_DAY, max(t0,t1) + SEC_DAY) query = "select * from runs where stop_time>=%i and start_time<=%i order by start_time" r = [] for i in self.read_master(query % timerange): if i['db'] not in r: r.append(i['db']) return r def get_value_at_time(self,pvname,t): "return archived value of a pv at one time" if pvname is None: return None pvname = normalize_pvname(pvname) info = self.get_info(pvname) if info is None: return None db = self.dbs_for_time(t,t)[0] self.db.use(db) qpv = "select data_table,id from pv where name ='%s'" % pvname qdat = 'select time,value from %s where pv_id=%i and time<=%f order by time desc limit 1' i = self.db.exec_fetchone(qpv) r = self.db.exec_fetchone(qdat % (i['data_table'],i['id'],t)) return r['time'],r['value'] def get_data(self,pvname,t0,t1,with_current=None): "get data from database for a time range" if pvname is None: return [] pvname = normalize_pvname(pvname) info = self.get_info(pvname) if info is None: return ([],'No PV named %s',pvname) # stat = [info] dat = [] pvquery= "select data_table,id from pv where name ='%s'" % pvname fquery = 'select time,value from %s where pv_id=%i and time<=%f order by time desc limit 1' gquery = 'select time,value from %s where pv_id=%i and time>=%f order by time limit 1' squery = 'select time,value from %s where pv_id=%i and time>=%f and time<=%f order by time' needs_firstpoint = True tnow = time.time() # make sure t0 and t1 are ordered if t0 > t1: t0,t1 = t1,t0 if t1-t0 < 1800.0: t0 = t1 - 1800.0 # look back at least an hour # determine if we should append the current (cached) value if with_current is None: add_current = abs(t1-tnow) < 1.5 * SEC_DAY else: add_current = with_current # print 'get data for ' ,pvname, t0,t1 # print 'dbs: ', self.dbs_for_time(t0,t1) try: for db in self.dbs_for_time(t0,t1): self.db.use(db) stat.append(pvquery) r = self.db.exec_fetchone(pvquery) try: table = r['data_table'] pvid = r['id'] except KeyError: # this db doesn't know about this PV -- maybe it's a recent addition? continue stat.append((db,table, pvid)) if needs_firstpoint: q = fquery % (table,pvid,t0) stat.append(q) r = self.db.exec_fetchone(q) try: dat.append((r['time'],r['value'])) needs_firstpoint = False except: stat.append('no data before t0!') q = squery % (table,pvid,t0,t1) stat.append(q) for i in self.exec_fetch(q): dat.append((i['time'],i['value'])) # add value at time just after selected time range r = self.db.exec_fetchone(gquery % (table,pvid,t1)) try: dat.append((r['time'],r['value'])) except KeyError: pass # optionally, add current value if add_current: stat.append('adding cached value') r= self.get_cache_full(pvname) stat.append(r) if r['value'] is not None: dat.append((time.time(),r['value'])) except: stat.append('Exception!') dat.sort() self.use_currentDB() return dat,stat def write(self,s): self.messenger.write(s) def drop_pv(self,name): self.db.execute("update pv set active='no' where name=%s" % clean_string(name)) def add_pv(self,name,description=None,graph={},deadtime=None,deadband=None): """add PV to the database: expected to take a while""" pvname = normalize_pvname(name) t0_start = time.time() if not valid_pvname(pvname): sys.stdout.write("## Archiver add_pv invalid pvname = '%s'" % pvname) return None if pvname in self.pvinfo: if 'yes' == self.pvinfo[pvname]['active']: self.write("PV %s is already in database.\n" % pvname) else: self.write("PV %s is in database, reactivating!\n" % pvname) self.pvinfo[pvname]['active'] = 'yes' return None # create an Epics PV, check that it's valid try: pv = epics.PV(pvname) pv.connect() pv.get_ctrlvars() typ = pv.type count = pv.count prec = pv.precision connected = pv.connected except: typ= 'int' count = 1 prec = None connected = False if not connected: self.write("cannot add PV '%s': not connected" % pvname) return None # determine type dtype = 'string' if (typ in ('int','long','short')): dtype = 'int' if (typ in ('enum',)): dtype = 'enum' if (typ in ('double','float')): dtype = 'double' # determine data table table = "pvdat%3.3i" % ((hash(pvname) % 128) + 1) # determine descrption (don't try too hard!) if description is None: if pvname.endswith('.VAL'): descpv = "%s.DESC" % pvname[:-4] else: descpv = "%s.DESC" % pvname for f in motor_fields: if pvname.endswith(f): descpv = None if descpv is not None: try: dp = epics.PV(descpv) description = dp.get(as_string=True) ## dp.disconnect() except: pass if description is None: description = '' # set graph default settings gr = {'high':'','low':'','type':'normal'} gr.update(graph) if dtype == 'enum': x = pv.get(as_string=True) gr['type'] = 'discrete' gr['low'] = 0 gr['high'] = len(pv.enum_strs) elif dtype == 'double': gr['type'] = 'normal' dx = description.lower() for i in ('cathode','pirani','pressure'): if dx.find(i) >= 0: gr['type'] = 'log' if (deadtime == None): deadtime = config.pv_deadtime_dble if dtype in ('enum','string'): deadtime = config.pv_deadtime_enum if (gr['type'] == 'log'): deadtime = 5.0 # (pressures change very frequently) if (deadband == None): deadband = 1.e-5 if (gr['type'] == 'log'): deadband = 1.e-4 if prec is not None: deadband = 10**(-(prec+1)) if dtype in ('enum','string'): deadband = 0.5 self.write('Archiver adding PV: %s, table: %s' % (pvname,table)) self.pv_table.insert(name = pvname, type = dtype, description= description, data_table = table, deadtime = deadtime, deadband = deadband, graph_lo = gr['low'], graph_hi = gr['high'], graph_type = gr['type']) dat = self.pv_table.select_where(name=pvname)[0] dat['force_time'] = get_force_update_time() dat['last_ts'] = 0 dat['last_value'] = None self.pvinfo[pvname] = dat self.update_value(pvname,time.time(),pv.value) self.write(" time=%f\n" % (time.time() - t0_start)) sys.stdout.flush() pv = None def update_value(self,name,ts,val): "insert value into appropriate table " if val is None: return if ts is None or ts < self.MIN_TIME: ts = time.time() self.pvinfo[name]['last_ts'] = ts self.pvinfo[name]['last_value'] = val info = self.pvinfo[name] try: self.db.execute(self.sql_insert % (info['data_table'],info['id'], ts,clean_string(val))) except TypeError: self.write("cannot update %s\n" % name) def collect(self): """ one pass of collecting new values, deciding what to archive""" newvals, forced = {},{} tnow = time.time() dt = max(1.0, 2.*(tnow - self.last_collect)) self.last_collect = tnow new_Changes = self.get_cache_changes(dt=dt) for dat in new_Changes: name = dat['pvname'] val = dat['value'] ts = dat['ts'] or time.time() if name not in self.pvinfo: self.add_pv(name) info = self.pvinfo[name] if info['active'] == 'no' or name in newvals: continue last_ts = info['last_ts'] last_val = info['last_value'] if last_ts is None: last_ts = 0 do_save = ((ts-last_ts) > info['deadtime']) if do_save and dat['type'] in ('double','float'): try: v,o = float(val),float(last_val) do_save = abs((v-o)/max(abs(v),abs(o),1.e-12)) > abs(info['deadband']) except: pass if do_save: # self.update_value(name,ts,val) newvals[name] = (ts,val) if name in self.dtime_limbo: self.dtime_limbo.pop(name) elif (ts-last_ts) > 1.e-3: # pv changed, but inside 'deadtime': put it in limbo! self.dtime_limbo[name] = (ts,val) # now look through the "limbo list" and insert the most recent change # iff the last insert was longer ago than the deadtime: tnow = time.time() # print '====== Collect: ', len(new_Changes) , len(newvals), len(self.dtime_limbo), time.ctime() for name in self.dtime_limbo.keys(): info = self.pvinfo[name] if info['active'] == 'no': continue last_ts = info['last_ts'] last_val = info['last_value'] if (tnow - last_ts) > info['deadtime']: newvals[name] = self.dtime_limbo.pop(name) n_new = len(newvals) n_forced = 0 # check for stale values and re-read db settings every 5 minutes or so if (tnow - self.force_checktime) >= 300.0: # sys.stdout.write('looking for stale values, checking for new settings...%s\n' %time.ctime() ) self.force_checktime = tnow self.check_for_new_pvs() self.refresh_pv_table() for name, info in self.pvinfo.items(): last_ts = info['last_ts'] last_val = info['last_value'] if info['active'] == 'no': continue ftime = info['force_time'] try: force = tnow-last_ts > ftime except: print 'Cannot Figure out whether to force recording??' print tnow, last_ts, ftime print 'They should all be floats???' force = False if force: r = self.get_cache_full(name) if r['type'] is None and r['value'] is None: # an empty / non-cached PV? try: test_pv = epics.PV(name) test_pv.connect() # if PV is still not connected, set time # to wait 2 hours before checking again. if (test_pv is None or not test_pv.connected): self.pvinfo[name]['last_ts'] = 7200+tnow-ftime self.write(" PV not connected: %s\n" % name) else: r['value'] = test_pv.value test_pv.disconnect() except: pass else: if name not in newvals: newvals[name] = (tnow,str(r['value'])) n_forced = n_forced + 1 for name,data in newvals.items(): self.update_value(name,data[0],data[1]) # self.db.commit_transaction() return n_new,n_forced def set_pidstatus(self, pid=None, status='unknown'): self.db.use(self.master_db) if status in ('running','offline','stopping','unknown'): self.db.execute("update info set status='%s' where process='archive'" % status) if pid is not None: self.db.execute("update info set pid=%i where process='archive'" % int(pid)) self.db.use(self.dbname) def set_infotime(self,ts): self.db.use(self.master_db) self.db.execute("update info set ts=%f,datetime='%s' where process='archive'" % (ts,time.ctime(ts))) self.db.use(self.dbname) def get_pidstatus(self): self.db.use(self.master_db) ret = self.db.exec_fetchone("select * from info where process='archive'") self.db.use(self.dbname) return ret['pid'], ret['status'] def get_nchanged(self,minutes=10,limit=None): """return the number of values archived in the past minutes. if limit is set, return as soon as this limit is seen to be exceeded this is useful when checking if any values have been cached.""" n = 0 dt = (time.time()-minutes*60.0) q = "select count(time) from pvdat%3.3i where time > %f " for i in range(1,129): r = self.exec_fetch(q % (i,dt)) try: n += r[0]['count(time)'] except: pass if limit is not None and n>limit: break return n def mainloop(self,verbose=False): t0 = time.time() self.db.get_cursor() self.use_currentDB() self.last_collect = t0 self.write( 'connecting to database %s ... \n' % self.dbname) self.sync_with_cache(update_vals=True) self.write("done. DB connection took %6.3f sec\n" % (time.time()-t0)) self.write("connecting to %i Epics PVs ... \n" % ( len(self.pvinfo) )) self.write('====== Start monitoring / saving to DB=%s\n' % self.dbname) sys.stdout.flush() mypid = os.getpid() self.set_pidstatus(pid=mypid, status='running') is_collecting = True n_loop = 0 n_changed = 0 n_forced = 0 t_lastlog = 0 mlast = -1 msg = "%s: %i new, %i forced entries. (%i)\n" while is_collecting: try: n_loop = n_loop + 1 n1,n2 = self.collect() n_changed = n_changed + n1 n_forced = n_forced + n2 epics.poll() tnow = time.time() tmin,tsec = time.localtime()[4:6] if tsec < 2 and tmin != mlast and tmin % 5 == 0: self.write(msg % (time.ctime(), n_changed, n_forced, n_loop)) sys.stdout.flush() n_changed = 0 n_forced = 0 n_loop = 0 t_lastlog = tnow mlast = tmin self.set_infotime(tnow) except KeyboardInterrupt: sys.stderr.write('Interrupted by user.\n') return None masterpid, status = self.get_pidstatus() if (status in ('stopping','offline')) or (masterpid != mypid): self.set_pidstatus(status='offline') is_collecting = False return None
class MasterDB: """ general interface to Master Database of Epics Archiver. This is used by both by itself, and is sublcassed by Cache, ArchiveMaster, Instruments, and Alerts. Because of the multiple uses and especially because it is used as the sole interface by some Web processes, the API here may seem a bit disjointed, with a partial interface for Pairs and Alerts, but this allows many processes to have exactly one database connection. """ runs_title= '| database | date range | duration (days)|' runs_line = '|-------------------|---------------------------|----------------|' def_alert_msg ="""Hello, An alarm labeled %LABEL% was detected for PV = '%PV%' The current value = %VALUE%. This is %COMP% the trip point value of %TRIP% """ optokens = ('ne', 'eq', 'le', 'lt', 'ge', 'gt') opstrings= ('not equal to', 'equal to', 'less than or equal to', 'less than', 'greater than or equal to', 'greater than') ops = {'eq':'__eq__', 'ne':'__ne__', 'le':'__le__', 'lt':'__lt__', 'ge':'__ge__', 'gt':'__gt__'} def __init__(self,dbconn=None, **kw): self.db = SimpleDB(dbconn=dbconn) self.dbconn = self.db.conn self.db.use(master_db) self.db.get_cursor() self.db.read_table_info() self.info = self.db.tables['info'] self.cache = self.db.tables['cache'] self.runs = self.db.tables['runs'] self.pairs = self.db.tables['pairs'] self.stations = self.db.tables['stations'] self.instruments = self.db.tables['instruments'] self.inst_pvs = self.db.tables['instrument_pvs'] self.alerts = self.db.tables['alerts'] self.arch_db = self._get_info('db', process='archive') self.pvnames = [] def use_master(self): "point db cursor to use master database" self.db.use(master_db) def use_current_archive(self): "point db cursor to use current archive database" self.arch_db = self._get_info('db', process='archive') self.db.use(self.arch_db) def save_db(self,dbname=None): if dbname is None: dbname = self.arch_db sys.stdout.write('saving %s\n' % dbname) self.db.use(dbname) self.db.safe_dump(compress=True) self.db.use(master_db) def get_pvnames(self): """ generate self.pvnames: a list of pvnames in the cache""" self.pvnames = [i['pvname'] for i in self.cache.select()] # # for i in self.cache.select(): # if i['pvname'] not in self.pvnames: # self.pvnames.append(i['pvname']) return self.pvnames def request_pv_cache(self,pvname): """request a PV to be included in caching. will take effect once a 'process_requests' is executed.""" self.db.use(master_db) npv = normalize_pvname(pvname) if len(self.pvnames)== 0: self.get_pvnames() if npv in self.pvnames: return cmd = "insert into requests (pvname,action,ts) values ('%s','add',%f)" % (npv,time.time()) self.db.execute(cmd) print 'REQUEST_PV_CACHE: add ', pvname def add_pv(self,pvname,set_motor_pairs=True): """adds a PV to the cache: actually requests the addition, which will be handled by the next process_requests in mainloop(). Here, we check for 'Motor' PV typs and make sure all motor fields are requested together, and that the motor fields are 'related' by assigning a pair_score = 10. """ pvname = normalize_pvname(pvname.strip()) fields = (pvname,) if not valid_pvname(pvname): sys.stdout.write("## MasterDB add_pv invalid pvname = '%s'" % pvname) return fields prefix = pvname isMotor = False if pvname.endswith('.VAL'): prefix = pvname[:-4] p = epics.PV(pvname) p.wait_for_connection(timeout=0.1) print 'Master Add PVname = ', pvname, p.connected if p.connected: self.request_pv_cache(pvname) if ('.' not in prefix and p.type == 'double'): rtype = epics.PV(prefix+'.RTYP') rtype.wait_for_connection(0.1) if rtype is not None: isMotor = 'motor' == rtype.get() if isMotor: fields = tuple(["%s%s" % (prefix,i) for i in motor_fields]) pvs = [] for pvname in fields: pvs.append(epics.PV(pvname)) epics.poll() for p in pvs: p.wait_for_connection(timeout=0.5) if p.connected: self.request_pv_cache(p.pvname) if isMotor and set_motor_pairs: time.sleep(0.25) self.set_allpairs(fields) return fields def drop_pv(self,pvname): """drop a PV from the caching process -- really this 'suspends updates' will take effect once a 'process_requests' is executed.""" npv = normalize_pvname(pvname) if len(self.pvnames)== 0: self.get_pvnames() if not npv in self.pvnames: return cmd = "insert into requests (pvname,action) values ('%s','suspend')" % npv self.db.execute(cmd) def get_recent(self,dt=60): """get recent additions to the cache, those inserted in the last dt seconds.""" where = "ts>%f order by ts" % (time.time() - dt) return self.cache.select(where=where) def dbs_for_time(self, t0=SEC_DAY, t1=MAX_EPOCH): """ return list of databases with data in the given time range""" timerange = ( min(t0,t1) - SEC_DAY, max(t0,t1) + SEC_DAY) where = "stop_time>=%i and start_time<=%i order by start_time" r = [] for i in self.runs.select(where=where % timerange): if i['db'] not in r: r.append(i['db']) return r def close(self): "close db connection" self.db.close() def _get_info(self,name='db',process='archive'): " get value from info table" try: return self.info.select_one(where="process='%s'" % process)[name] except: return None def _set_info(self,process='archive',**kw): " set value(s) in the info table" # print 'SET INFO ', process, kw self.info.update("process='%s'" % process, **kw) def get_cache_status(self): " get status of caching process" return self._get_info('status', process='cache') def get_arch_status(self): " get status of archiving process" return self._get_info('status', process='archive') def set_cache_status(self,status): " set status of caching process" return self._set_info(status=status, process='cache') def set_arch_status(self,status): " set status of archiving process" return self._set_info(status=status, process='archive') def get_cache_pid(self): " get pid of caching process" return self._get_info('pid', process='cache') def get_arch_pid(self): " get pid of archiving process" return self._get_info('pid', process='archive') def set_cache_pid(self,pid): " set pid of caching process" return self._set_info(pid=pid, process='cache') def set_arch_pid(self,pid): " set pid of archiving process" return self._set_info(pid=pid, process='archive') ## ## Status/Activity Reports def get_npvs(self): """return the number of pvs in archive""" self.db.use(self.arch_db) r = self.db.exec_fetch('select id from pv') self.db.use(master_db) return len(r) def arch_nchanged(self,minutes=10,max = None): """return the number of values archived in the past minutes. """ self.db.use(self.arch_db) n = 0 t0 = time.time() dt = (time.time()-minutes*60.) q = "select pv_id from pvdat%3.3i where time > %f " self.db.get_cursor() for i in range(1,129): n = n + self.db.cursor.execute(q % (i,dt)) if max is not None and n > max: break self.db.use(master_db) return n def arch_report(self,minutes=10): """return a report (list of text lines) for archiving process, """ npvs = self.get_npvs() o = ["Current Database=%s, status=%s, PID=%i, %i PVs in archive" %(self.arch_db, self.get_arch_status(), self.get_arch_pid(),npvs)] return o def arch_full_report(self,minutes=10): """return a report (list of text lines) for archiving process, giving the number of values archived in the past minutes. """ nchanged = self.arch_nchanged(minutes=minutes) npvs = self.get_npvs() o = ["Current Database=%s, status=%s, PID=%i, %i PVs in archive" %(self.arch_db, self.get_arch_status(), self.get_arch_pid(),npvs), "%i values archived in past %i minutes" % (nchanged, minutes)] return o def cache_report(self,brief=False,dt=60): """return a report (list of text lines) for caching process, giving number of values cached in the past dt seconds. Use 'brief=False' to show which PVs have been cached. """ out = [] pid = self.get_cache_pid() ret = self.cache.select(where="ts> %i order by ts" % (time.time()-dt)) fmt = " %s %.25s = %s" if not brief: for r in ret: out.append(fmt % (tformat(t=r['ts'],format="%H:%M:%S"), r['pvname']+' '*20, r['value']) ) fmt = '%i PVs had values updated in the past %i seconds. pid=%i' out.append(fmt % (len(ret),dt,pid)) return out def runs_report(self, n=10): """return a report (list of text lines) for the archiving runs showing the time ranges for the (at most) 10 most recent runs. """ r = [] for i in self.runs.select(where='1=1 order by start_time desc limit %i' % n): timefmt = "%6.2f " if i['db'] == self.arch_db: timefmt = "%6.1f*" i['stop_time'] = time.time() days = timefmt % ((i['stop_time'] - i['start_time'])/(24*3600.0)) drange = "%s to %s" %(tformat(i['start_time'],format="%Y-%m-%d"), tformat(i['stop_time'],format="%Y-%m-%d")) r.append("| %16s | %24s | %10s |" % (i['db'],drange,days)) r.reverse() out = [self.runs_line,self.runs_title,self.runs_line] for i in r: out.append(i) out.append(self.runs_line) return out ## ## Pairs def get_related_pvs(self,pv,minscore=1): """return a list of related pvs to the provided pv with a minumum pair score""" out = [] tmp = [] npv = normalize_pvname(pv) if len(self.pvnames)== 0: self.get_pvnames() if npv not in self.pvnames: return out for i in ('pv1','pv2'): where = "%s='%s' and score>=%i order by score" for j in self.pairs.select(where = where % (i,npv,minscore)): tmp.append((j['score'],j['pv1'],j['pv2'])) tmp.sort() for r in tmp: if r[1] == npv: out.append(r[2]) elif r[2] == npv: out.append(r[1]) out.reverse() return out def __get_pvpairs(self,pv1,pv2): "fix and sort 2 pvs for use in the pairs tables" p = [normalize_pvname(pv1),normalize_pvname(pv2)] p.sort() return tuple(p) def get_pair_score(self,pv1,pv2): "set pair score for 2 pvs" p = self.__get_pvpairs(pv1,pv2) score = -1 if len(self.pvnames)== 0: self.get_pvnames() if (p[0] in self.pvnames) and (p[1] in self.pvnames): o = self.pairs.select_one(where= "pv1='%s' and pv2='%s'" % p) score = int(o.get('score',-1)) return score def set_pair_score(self,pv1,pv2,score=None): "set pair score for 2 pvs" p = self.__get_pvpairs(pv1,pv2) current_score = self.get_pair_score(p[0],p[1]) if current_score is None: wait_count = 0 while current_score is None and wait_count < 10: time.sleep(0.1) self.get_pvnames() current_score = self.get_pair_score(p[0],p[1]) wait_count = wait_count + 1 # a current_score=None means the pv pairs may not be known yet. if current_score is None: current_score = -1 score = 1 if score is None: score = 1 + current_score if current_score <= 0: q = "insert into pairs set score=%i, pv1='%s', pv2='%s'" else: q = "update pairs set score=%i where pv1='%s' and pv2='%s'" self.db.exec_fetch(q % (score,p[0],p[1])) def increment_pair_score(self,pv1,pv2): """increase by 1 the pair score for two pvs """ self.set_pair_score(pv1,pv2,score=None) def set_allpairs(self,pvlist,score=10): """for a list/tuple of pvs, set all pair scores to be at least the provided score""" if not isinstance(pvlist,(tuple,list)): return _tmp = list(pvlist[:]) # these may be newly added names, so may not yet be # in pvnames. If not, let's give them a chance! newnames = False wait_count = 0 if len(self.pvnames)== 0: self.get_pvnames() while newnames and wait_count < 10: newnames = False for i in _tmp: newnames = newnames or (i not in self.pvnames) time.sleep(0.1) self.get_pvnames() wait_count = wait_count + 1 while _tmp: a = _tmp.pop() for b in _tmp: if self.get_pair_score(a,b)<score: self.set_pair_score(a,b,score=score) self.pairs.select() ## ## Instruments def get_instruments_with_pv(self,pv): """ return a list of (instrument ids, inst name, station name) for instruments which contain a named pv""" inames = [] pvn = normalize_pvname(pv) for r in self.inst_pvs.select(where="pvname='%s'" % pvn): inst = self.instruments.select_one(where="id=%i" % r['inst']) sta = self.stations.select_one(where="id=%i" % inst['station']) inames.append((r['inst'],inst['name'],sta['name'])) return inames ## ## Alerts def get_alerts(self,pvname=None,name=None): """ return a list of alerts for a pvname""" where = '1=1' if pvname is not None: pvname = normalize_pvname(pvname) where = "pvname='%s'" % pvname if name is not None: where = "%s and name='%s'" % (where,clean_input(name)) return self.alerts.select(where=where) def get_alert_with_id(self,id): """ return a list of alerts for a pvname""" return self.alerts.select_one(where="id=%i" % id) ## Alerts def remove_alert(self, id=None): """remove an alert""" if id is None: return q = "delete from alerts where id=%i" % int(id) self.db.execute(q) def add_alert(self, pvname=None,name=None, mailto=None, mailmsg=None, timeout=30, compare='ne', trippoint=None, **kw): """add a new alert""" if pvname is None: return pvname = normalize_pvname(pvname) if name is None: name = pvname if len(self.pvnames)== 0: self.get_pvnames() if pvname not in self.pvnames: self.add_pv(pvname) active = 'yes' if mailto is None: active,mailto = ('no','') if mailmsg is None: active,mailmsg= ('no','') if trippoint is None: active,trippoint = ('no',0.) if compare not in self.optokens: compare = 'ne' self.alerts.insert(name=name,pvname=pvname,active=active, mailto=mailto,mailmsg=mailmsg, timeout=30, compare=compare, trippoint=trippoint) for a in self.get_alerts(pvname=pvname,name=name): val = epics.caget(pvname) self.check_alert(a['id'],val) def update_alert(self,id=None,**kw): """modify an existing alert, index with id, and passing in keyword args for ('pvname','name','mailto','mailmsg', 'trippoint','compare','status','active') """ if id is None: return where = "id=%i"% id mykw = {} for k,v in kw.items(): if k in ('pvname','name','mailto','mailmsg','timeout', 'trippoint','compare','status','active'): maxlen = 1024 if k == 'mailmsg': maxlen = 32768 v = clean_input(v,maxlen=maxlen) if 'compare' == k: if not v in self.optokens: v = 'ne' elif 'status' == k: if v != 'ok': v = 'alarm' elif 'active' == k: if v != 'no': v = 'yes' mykw[k]=v self.alerts.update(where=where,**mykw) a = self.get_alert_with_id(id) val = epics.caget(a['pvname']) self.check_alert(id,val) def check_alert(self,id,value,sendmail=False): """ check alert status, sending mail if necessary. returns two boolean values: (Value_OK, Mail_Was_Sent) """ # print '## check_alert with sendmail=True', id, value where = "id=%i"% id alarm = self.alerts.select_one(where=where) # if alarm is not active, return now if 'no' == alarm['active']: return True,False # coerce values to strings or floats for comparisons convert = str if isinstance(value,(int,long,float,complex)): convert = float value = convert(value) trippoint = convert(alarm['trippoint']) cmp = self.ops[alarm['compare']] # compute new alarm status: note form 'value.__ne__(trippoint)' value_ok = not getattr(value,cmp)(trippoint) old_value_ok = (alarm['status'] == 'ok') notify = sendmail and old_value_ok and (not value_ok) if old_value_ok != value_ok: # update the status filed in the alerts table status = 'alarm' if value_ok: status = 'ok' self.alerts.update(status=status,where=where) # send mail if value is now not ok! if notify: self.sendmail(alarm,value) # if sendmail: print '##check_alert: ' , value_ok, notify return value_ok, notify def sendmail(self,alarm,value): """ send an alert email from an alarm dict holding the appropriate row of the alert table. """ mailto = alarm['mailto'] pvname = alarm['pvname'] label = alarm['name'] compare= alarm['compare'] msg = alarm['mailmsg'] if mailto in ('', None) or pvname in ('', None): return mailto = mailto.replace('\r','').replace('\n','') trippoint = str(alarm['trippoint']) mailto = tuple(mailto.split(',')) subject = "[Epics Alert] %s" % (label) if msg in ('', None): msg = self.def_alert_msg msg = clean_mail_message(msg) opstr = 'not equal to' for tok,desc in zip(self.optokens, self.opstrings): if tok == compare: opstr = desc # fill in 'template' values in mail message for k,v in {'PV': pvname, 'LABEL':label, 'COMP': opstr, 'VALUE': str(value), 'TRIP': str(trippoint)}.items(): msg = msg.replace("%%%s%%" % k, v) # do %PV(XX)% replacements mlines = msg.split('\n') for i,line in enumerate(mlines): nmatch = 0 match = re_showpv(line) while match is not None and nmatch<25: try: pvn = match.groups()[0] rep = "%(cvalue)s" % self.cache.select_one(where="pvname='%s'" % pvn) line = line.replace('%%PV(%s)%%' % pvn,rep) except: line = line.replace('%%PV(%s)%%' % pvn, 'Unknown_PV(%s)' % pvn) match = re_showpv(line) nmatch = nmatch + 1 mlines[i] = line msg = "From: %s\r\nSubject: %s\r\n%s\nSee %s/plot/%s\n" % \ (mailfrom,subject,'\n'.join(mlines),cgi_url,pvname) try: s = smtplib.SMTP(mailserver) s.sendmail(mailfrom,mailto,msg) s.quit() except: sys.stdout.write("Could not send Alert mail: mail not configured??")