def stop_servers(startswith_list): astor = fn.Astor() db = PyTango.Database() datum = db.get_server_list() for server_name in datum.value_string: # Operate on servers starting with the specified strings items = [ item for item in startswith_list if server_name.startswith(item) ] if items: try: # TBD - add the # astor.stop_server(server_name) # success_output.append(server_name) failed_output.append(server_name + "(NO-OP IN STOPPING:{})".format(exc)) except Exception as exc: logging.error("EXCEPTION in stopping {} {}".format( server_name, exc)) failed_output.append(server_name + "(EXC IN STOPPING:{})".format(exc)) else: skipped_output.append(server_name + " (SKIPPED)")
def cleanup(delete=True): global tangodb, alarms fn.log.info('MAIL_TEST: CLEANUP '+'>'*40) try: servers = fn.Astor('PyAlarm/test_mail') print('Server States:' + str(servers.states())) print('Stopping ...') servers.stop_servers() except: traceback.print_exc() if delete: print('deleting alarms ...') for a in ('TEST_MAIL','TEST_SMTP'): try: alarms.remove(a) except: pass print('deleting devices ...') for d in ('test/pyalarm/sendmail','test/pyalarm/smtpmail', 'test/pyalarm/smtp'): try: tangodb.delete_device(d) except: traceback.print_exc()
def start_devices(self, regexp='*', force=False, do_init=False, do_restart=False): #devs = fn.tango.get_class_devices('HdbEventSubscriber') devs = self.get_archivers() if regexp: devs = fn.filtersmart(devs, regexp) off = sorted(set(d for d in devs if not fn.check_device(d))) if off and do_restart: print('Restarting %s Archiving Servers ...' % self.db_name) astor = fn.Astor() astor.load_from_devs_list(list(off)) astor.stop_servers() fn.wait(3.) astor.start_servers() fn.wait(3.) for d in devs: try: dp = fn.get_device(d) if do_init: dp.init() if force or dp.attributenumber != dp.attributestartednumber: off.append(d) print('%s.Start()' % d) dp.start() except Exception, e: self.warning('start_archivers(%s) failed: %s' % (d, e))
def cleanup(self, servers=None, delete=True): servers = servers or self.servers self.log('CLEANUP ' + '>' * 40) try: astor = fn.Astor() [astor.load_by_name('PyAlarm/' + s) for s in servers] print('Server States:' + str(astor.states())) print('Stopping ...') astor.stop_servers() except: traceback.print_exc() if delete: print('deleting alarms ...') for a in self.tags: try: self.alarms.remove(a) except: pass print('deleting devices ...') for s, devs in self.servers.items(): for d in devs: try: self.tangodb.delete_device(d) except: traceback.print_exc()
def delete_simulators(filein): #NOTE THIS METHOD SHOUL DELETE ONLY PYSIGNALSIMULATOR INSTANCES, NOT ANYTHING ELSE! raise 'NotImplementedYet!' all_sims = fd.Astor('*Simulator*/*').get_all_devices() devs = [d for d in pickle.load(open(filein)) if d in all_sims] db = PyTango.Database() for d in devs: props = get_all_properties(d) [db.delete_property(d, p) for p in props] db.delete(d)
def restart_servers(servers=[], host=''): if not servers: servers = get_servers_status()['restart'] astor = fandango.Astor() astor.load_from_servers_list(servers) astor.stop_servers() print('waiting ...') fandango.wait(10.) for s in astor: host = host or astor[s].host print('Starting %s at %s' % (s, host)) astor.start_servers(s, host=host) return
def restart_attributes_archivers(schema,attributes,action=False): import PyTangoArchiving api = PyTangoArchiving.api(schema) devs = fn.defaultdict(list) [devs[api[a].archiver].append(a) for a in attributes] if not action: print('%d archivers to restart, call with action=True to execute it'%len(devs)) else: print('Restarting %d archivers'%len(devs)) astor = fn.Astor() astor.load_from_devs_list(devs.keys()) astor.stop_servers() time.sleep(10.) astor.start_servers() return dict((k,len(v)) for k,v in devs.items())
def get_servers_status(regexp='*', exclude=['bpms', 'test', 'sr_vc_']): servers = fandango.Astor() servers.load_by_name('PyAlarm/*%s*' % regexp) servers.load_by_name('Panic*/*%s*' % regexp) print('%d servers loaded' % len(servers)) states = servers.states() [states.pop(k) for k in states.keys() if any(e in k for e in exclude)] exported = fandango.get_all_devices(exported=True) exported = [s for s in states if 'dserver/' + s in exported] zombies = sorted(d for d, s in states.items() if d in exported and s is None) off = sorted(d for d, s in states.items() if d not in zombies and s is None) on = sorted(s for s in states if states[s] is not None) print('\n') for s in off: print('%s : %s : OFF' % (servers[s].host, s)) for s in zombies: print('%s : %s : ZOMBIE!' % (servers[s].host, s)) print('\n') failed = [] for s in on: for d in sorted(servers[s].get_device_list()): if not fandango.matchCl('(sys|dserver)/*', d): ss = fandango.check_device(d) p = fandango.tango.get_device_property(d, 'pollingperiod') if not p: print('%s has no polling defined' % d) elif float(p) > 1000: print('%s has a wrong polling! %s' % (d, p)) if str(ss) not in ('ALARM', 'ON'): failed.append(s) print('%s : %s : %s : %s' % (servers[s].host, s, d, str(ss))) print('\n%d servers have failed devices' % len(failed)) restart = sorted(set(d for l in (off, zombies, failed) for d in l)) print('%d servers should be restarted' % len(restart)) print('') return { 'off': off, 'on': on, 'zombies': zombies, 'failed': failed, 'restart': restart }
def force_stop_attributes(schema,attr_list): """ This method will stop archivers, modify tables, and restart archivers to ensure that archiving is stop. """ import fandango api = PyTangoArchiving.ArchivingAPI(schema) attr_list = [a for a in attr_list if a in api and api[a].archiver] arch = list(set(api[a].archiver for a in attr_list)) astor = fn.Astor() astor.load_from_devs_list(arch) astor.stop_servers() for s in attr_list: query = "update amt set stop_date=now() where ID = %s and stop_date is NULL"%api[s].ID print query api.db.Query(query) astor.start_servers()
def repair_dedicated_attributes(api,attrs=None,load=True,restart=False): api.load_attribute_modes() tdedi = api.load_dedicated_archivers() tdediattrs = dict((a,d) for d,v in tdedi.items() for a in v) newconfig = dict((a,tdediattrs[a]) for a in (attrs or tdediattrs) if a in tdediattrs and a in api and api[a].archiver and tdediattrs[a]!=api[a].archiver) #rows = dict((a,tdb.db.Query('select ID,archiver,start_date from amt where STOP_DATE is NULL and ID=%d'%api[a].ID)) for a in newconfig.keys() if a in api) if restart: astor = fn.Astor('ArchivingManager/1') astor.load_from_devs_list(list(set([api[a].archiver for a in newconfig]+newconfig.values()))) astor.stop_servers() if load: print 'Updating %d dedicated attributes in amt.'%len(newconfig) for a,d in newconfig.items(): api.db.Query("update amt set archiver='%s' where ID=%d and STOP_DATE is NULL"%(d,api[a].ID)) if restart: astor.start_servers() return newconfig
def check_archiving_schema( schema='hdb', attributes=[],values={}, ti = None, period = 7200, old_period=24*3600*90,\ exclude=['*/waveid','*/wavename','*/elotech-*'], use_index = True, loads = True, action=False, trace=True, export=None): ti = fn.now() if ti is None else str2time(ti) if isString(ti) else ti api = pta.api(schema) is_hpp = isinstance(api, pta.HDBpp) check = dict() old_period = 24*3600*old_period if old_period < 1000 \ else (24*old_period if old_period<3600 else old_period) allattrs = api.get_attributes() if hasattr( api, 'get_attributes') else api.keys() print('%s contains %d attributes' % (schema, len(allattrs))) if attributes: if fn.isString(attributes) and fn.isRegexp(attributes): tattrs = [a for a in allattrs if clsearch(attributes, a)] else: attributes = map(fn.tango.get_normal_name, fn.toList(attributes)) tattrs = [ a for a in allattrs if fn.tango.get_normal_name(a) in allattrs ] else: tattrs = allattrs excluded = [a for a in tattrs if any(fn.clmatch(e, a) for e in exclude)] tattrs = [a for a in tattrs if a not in excluded] print('%d attributes to check' % len(tattrs)) if not len(tattrs): return if excluded: print('\t%d attributes excluded' % len(excluded)) archived = {} for a in tattrs: if hasattr(api, 'get_attribute_archiver'): arch = api.get_attribute_archiver(a) else: arch = api[a].archiver if arch: archived[a] = arch print('\t%d attributes are archived' % len(archived)) #Getting Tango devices currently not running alldevs = set(t.rsplit('/', 1)[0] for t in tattrs) #tdevs = filter(fn.check_device,alldevs) #nodevs = [fn.tango.get_normal_name(d) for d in alldevs if d not in tdevs] #if nodevs: #print('\t%d devices are not running' % len(nodevs)) archs = sorted(set(archived.values())) if loads: astor = fn.Astor() astor.load_from_devs_list(archs) loads = fn.defaultdict(list) for k, s in astor.items(): for d in s.get_device_list(): d = fn.tango.get_normal_name(d) for a in archived: if fn.tango.get_normal_name(archived[a]) == d: loads[k].append(a) for k, s in sorted(loads.items()): print('\t%s archives %d attributes' % (k, len(s))) noarchs = [ fn.tango.get_normal_name(d) for d in archs if not fn.check_device(d) ] if noarchs: print('\t%d archivers are not running: %s' % (len(noarchs), noarchs)) ########################################################################### if isString(values) and values.endswith('.pck'): print('\nLoading last values from %s file\n' % values) import pickle values = pickle.load(open(values)) elif isString(values) and values.endswith('.json'): print('\nLoading last values from %s file\n' % values) values = fn.json2dict(values) elif not use_index or is_hpp: print('\nGetting last values ...\n') for a in tattrs: values[a] = api.load_last_values(a) else: print('\nGetting updated tables from database ...\n') tups = pta.utils.get_table_updates(schema) # Some tables do not update MySQL index tables t0 = [a for a in tarch if a in tattrs and not tups[api[a].table]] check.update((t, check_attribute(a, readable=True)) for t in t0 if not check.get(t)) t0 = [t for t in t0 if check[t]] print('%d/%d archived attributes have indexes not updated ...' % (len(t0), len(tarch))) if t0 and len(t0) < 100: vs = api.load_last_values(t0) tups.update((api[t].table, api[t].last_date) for t in t0) for a in tattrs: if a in tups: values[a] = [tups[api[a].table], 0] for k, v in values.items(): if (len(v) if isSequence(v) else v): if isinstance(v, dict): v = v.values()[0] if isSequence(v) and len(v) == 1: v = v[0] if v and not isNumber(v[0]): v = [date2time(v[0]), v[1]] values[k] = v else: values[k] = [] if isSequence(v) else None print('%d values obtained' % len(values)) ########################################################################### now = fn.now() result = fn.Struct() times = [t[0] for t in values.values() if t] futures = [t for t in times if t > now] times = [t for t in times if t < now] tmiss = [] tfutures = [k for k, v in values.items() if v and v[0] in futures] tmin, tmax = min(times), max(times) print('\toldest update was %s' % time2str(tmin)) print('\tnewest update was %s' % time2str(tmax)) if futures: print('\t%d attributes have values in the future!' % len(futures)) tnovals = [a for a in archived if not values.get(a, None)] if tnovals: print('\t%d archived attributes have no values' % len(tnovals)) try: tmiss = [ a for a, v in values.items() if v and old_period < v[0] < ti - period and a not in archived ] except: print(values.items()[0]) if tmiss: print('\t%d/%d attrs with values are not archived anymore' % (len(tmiss), len(tattrs))) result.Excluded = excluded result.Schema = schema result.All = tattrs result.Archived = values result.NoValues = tnovals result.MissingOrRemoved = tmiss result.TMin = tmin result.TMax = tmax result.Futures = tfutures tup = sorted(a for a in values if values[a] and values[a][0] > ti - period) tok = [a for a in tup if values[a][1] not in (None, [])] print('\n%d/%d archived attributes are updated since %s - %s' % (len(tup), len(archived), ti, period)) print('%d archived attributes are fully ok\n' % (len(tok))) tnotup = sorted(a for a in values if values[a] and values[a][0] < ti - period) print('\t%d archived attrs are not updated' % len(tnotup)) tupnoread = [ a for a in tup if not values[a][1] and fn.read_attribute(a) is None ] reads = dict((a, fn.read_attribute(a)) for a in tnotup) tnotupread = [a for a in tnotup if reads[a] is not None] print('\t%d not updated attrs are readable (Lost)' % len(tnotupread)) print('\t%d of them are not floats' % len([t for t in tnotupread if not isinstance(reads[t], float)])) print('\t%d of them are states' % len([t for t in tnotupread if t.lower().endswith('/state')])) print('\t%d of them seem motors' % len([t for t in tnotupread if t.lower().endswith('/position')])) tnotupevs = [a for a in tnotupread if fn.tango.check_attribute_events(a)] print('\t%d not updated attrs are readable and have events (LostEvents)' % len(tnotupevs)) tnotupnotread = [a for a in tnotup if a not in tnotupread] print('\t%d not updated attrs are not readable' % len(tnotupnotread)) result.Lost = tnotupread result.LostEvents = tnotupevs losts = (tnotupevs if is_hpp else tnotupread) diffs = dict() for a in losts: try: v, vv = values.get(a, (None, ))[1], reads[a] if fn.isSequence(v): v = fn.toList(v) if fn.isSequence(vv): vv = fn.toList(vv) diffs[a] = v != vv if fn.isSequence(diffs[a]): diffs[a] = any(diffs[a]) else: diffs[a] = bool(diffs[a]) except: diffs[a] = None fams = fn.defaultdict(list) for a in tnotupread: fams['/'.join(a.split('/')[-4:-2])].append(a) for f in sorted(fams): print('\t%s: %d attrs not updated' % (f, len(fams[f]))) print() differ = [a for a in losts if diffs[a]] #is True] print('\t%d/%d not updated attrs have also wrong values!!!' % (len(differ), len(losts))) rd = pta.Reader() only = [a for a in tnotupread if len(rd.is_attribute_archived(a)) == 1] print('\t%d/%d not updated attrs are archived only in %s' % (len(only), len(losts), schema)) result.LostDiff = differ print() archs = sorted(set(archived.values())) astor = fn.Astor() astor.load_from_devs_list(archs) badloads = fn.defaultdict(list) for k, s in astor.items(): for d in s.get_device_list(): d = fn.tango.get_normal_name(d) for a in losts: if fn.tango.get_normal_name(archived[a]) == d: badloads[k].append(a) for k, s in badloads.items(): if len(s): print('\t%s archives %d lost attributes' % (k, len(s))) print('\t%d updated attrs are not readable' % len(tupnoread)) result.ArchivedAndReadable = tok result.Updated = tup result.NotUpdated = tnotup result.Unreadable = tnotupnotread #result.DeviceNotRunning = nodevs result.ArchiverNotRunning = noarchs result.LostFamilies = fams # Tnones is for readable attributes not being archived tnones = [ a for a in archived if (a not in values or values[a] and values[a][1] in (None, [])) and a not in tupnoread and a not in tnotupread ] tupnones = [a for a in tnones if a in tup] if tupnones: print('\t%d archived readable attrs record empty values' % len(tupnones)) result.Nones = tnones if 0: get_ratio = lambda a, b: float(len(a)) / float(len(b)) #result.ArchRatio = get_ratio([t for t in readarch if t not in tnotup],readarch) #result.ReadRatio = get_ratio(result.Readable,tattrs) #result.LostRatio = get_ratio([a for a in tread if a in tnotup],tread) #result.MissRatio = get_ratio([a for a in tread if a not in tarch],tread) #result.OkRatio = 1.0-result.LostRatio-result.MissRatio #result.Summary = '\n'.join(( #('Checking archiving of %s attributes'%(len(attributes) if attributes else schema)) #,('%d attributes in %s, %d are currently active'%(len(api),schema,len(tarch))) #,('%d devices with %d archived attributes are not running'%(len(nodevs),len([a for a in api if a.rsplit('/',1) in nodevs]))) #,('%d archived attributes (%2.1f %%) are unreadable! (check and remove)'%(len(tnoread),1e2*get_ratio(tnoread,tarch))) #,('%d readable attributes are not archived'%(len(tmiss))) #,('%d attributes (readable or not) are updated (%2.1f %% of all readables)'%(len(tok),1e2*result.OkRatio)) #,('-'*80) #,('%d archived attributes (readable or not) are not updated!'%len(tnotup)) #,('%d archived and readable attributes are not updated! (check and restart?)'%len(treadnotup)) #,('-'*80) #,('%d readable attributes have been removed in the last %d days!'%(len(removed),old_period/(24*3600))) #,('%d readable scalar attributes are not being archived (not needed anymore?)'%len(tmscalar)) #,('%d readable array attributes are not being archived (Ok)'%len(tmarray)) #,('%d readable array attributes are archived (Expensive)'%len(tarray)) #,(''))) #if trace: print(result.Summary) #print('%d readable lost,Ok = %2.1f%%, %2.1f %% over all Readables (%2.1f %% of total)'%\ #(len(treadnotup),1e2*result.ArchRatio,1e2*result.OkRatio,1e2*result.ReadRatio)) if action: if action == 'start_devices': print('Executing action %s' % action) api.start_devices() if action == 'restart_all': print('Executing action %s' % action) devs = api.get_archivers() astor = fn.Astor() print('Restarting %d devs:' % (len(devs), devs)) astor.load_from_devs_list(devs) astor.stop_servers() fn.wait(10.) astor.start_servers() #print('NO ACTIONS ARE GONNA BE EXECUTED, AS THESE ARE ONLY RECOMMENDATIONS') #print(""" #api = PyTangoArchiving.HDBpp(schema) #api.start_devices() #or #api = PyTangoArchiving.ArchivingAPI('%s') #lostdevs = sorted(set(api[a].archiver for a in result.NotUpdated)) #print(lostdevs) #if lostdevs < a_reasonable_number: #astor = fn.Astor() #astor.load_from_devs_list(lostdevs) #astor.stop_servers() #fn.time.sleep(10.) #astor.start_servers() #"""%schema) print('\nfinished in %d seconds\n\n' % (fn.now() - ti)) if export is not None: if export is True: export = 'txt' for x in (export.split(',') if isString(export) else export): if x in ('json', 'pck', 'pickle', 'txt'): x = '/tmp/%s.%s' % (schema, x) print('Saving %s file with keys:\n%s' % (x, result.keys())) if 'json' in x: fn.dict2json(result.dict(), x) else: f = open(x, 'w') if 'pck' in x or 'pickle' in x: pickle.dump(result.dict(), f) else: f.write(fn.dict2str(result.dict())) f.close() return result
#!/usr/bin/env python import fandango as fn devs = fn.tango.get_class_devices('HdbEventSubscriber') for d in devs: try: if not fn.check_device(d): fn.Astor(d).stop_servers() fn.Astor(d).start_servers() else: # Wait to next iteration before setting polling dp = fn.get_device(d) dp.poll_command('start', 1200000) print(d, 'done') except: print(fn.getLastException())
def rename_archived_attributes(attribs,load=False,restart=False,modes={'MODE_P':[10000]},schemas=('hdb','tdb')): """ Renaming attributes in archiving PyTangoArchiving.utils.rename_archived_attributes({oldname:newname}) The following actions must be automated for both HDB and TDB """ import archiving attribs = dict((k.lower(),v.lower()) for k,v in attribs.items()) for schema in schemas: api = archiving.ArchivingAPI(schema) api.load_dedicated_archivers() #Get the list of old names targets = dict((a,api[a].ID) for a in api if a in attribs) #Search for archivers archivers = fandango.dicts.defaultdict(set) servers = fandango.dicts.defaultdict(set) for a in targets: arch = api[a].archiver if arch: servers[fandango.tango.get_device_info(arch).server].add(arch) archivers[arch].add(a) astor = fandango.Astor() if load: astor.load_from_devs_list(archivers.keys()) #Check if they are dedicated dedicated = dict((a,api[a].dedicated.lower()) for a in targets if api[a].dedicated) print('>> update dedicated') properties = [] for arch in set(dedicated.values()): prop = map(str.lower,api.tango.get_device_property(arch,['reservedAttributes'])['reservedAttributes']) nprop = [attribs.get(p,p) for p in prop] properties.append((arch,nprop)) print properties if load: [api.tango.put_device_property(arch,{'reservedAttributes':nprop}) for arch,nprop in properties] #Store the list of modes, #NOP!, instead we will try to use the new modes provided as argument. #modes = dict.fromkeys(modes_to_string(api[a].modes) for a in targets) #[modes.__setitem__(k,[attribs[a] for a in targets if modes_to_string(api[a].modes)==k]) for k in modes.keys()] for server,archs in servers.items(): if restart or modes is not None: for arch in archs: atts = archivers[arch] print('>> stopping archiving: %s'%atts) if load: api.stop_archiving(atts) print('>> stopping archiver %s: %s'%(server,archs)) if load: astor.stop_servers(server) for arch in archs: atts = archivers[arch] print('>> modifying adt table for %s attributes (%d)'%(arch,len(atts))) queries = [] for name in atts: ID = targets[name] name = attribs[name] device,att_name = name.rsplit('/',1) domain,member,family = device.split('/') queries.append("update adt set full_name='%s',device='%s',domain='%s',family='%s',member='%s',att_name='%s' where ID=%d" % (name,device,domain,family,member,att_name,ID)) print '\n'.join(queries[:10]+['...']) if load: [api.db.Query(query) for query in queries] print('>> start %s archivers '%server) if load: time.sleep(10) astor.start_servers(server) if load: fandango.Astor("ArchivingManager/*").stop_servers() time.sleep(15) fandango.Astor("ArchivingManager/*").start_servers() time.sleep(20) if restart or modes: print('>> start archiving: %s'%modes) if load: api.start_archiving(attribs.values(),modes) #for m,atts in modes.items(): #m = modes_to_dict(m) #api.start_archiving(atts,m) return archivers
def showNodeContextMenu(self, node, event): """ A pop up menu will be shown with the available options. Menus are managed using two tuple lists for each node: node.ContextMenu and node.ExpertMenu """ obj = self.getNodeDraggable(node) position = event.globalPos() self.debug('showNodeContextMenu(%s)' % obj) if self.itemAt(position) is self.headerItem(): node = self.headerItem() #node.ContextMenu = ['Search ...'] if node is None: node = self else: if not hasattr(node, 'ContextMenu'): node.ContextMenu = [] if not 'Search ...' in [k for k, a in node.ContextMenu ]: ##Creating default menu if not hasattr(node, 'ExpertMenu'): setattr(node, 'ExpertMenu', self.ExpertMenu) def addOption(menu, name, action): if name not in [t[0] for t in menu]: menu.append((name, action)) # DEVICE NODE CONTEXT MENU if obj.count('/') == 2: addOption(node.ContextMenu, "Open Panel", self.showPanel) addOption(node.ContextMenu, "Show Attributes", self.addAttrToNode) if self.getNodeAdmin(node): addOption(node.ContextMenu, "Go to %s" % self.getNodeAdmin(node), (lambda p=self.getNodeAdmin(node): p and self .findInTree(p))) addOption(node.ContextMenu, '', None) addOption(node.ContextMenu, "Show Properties", self.showProperties) addOption(node.ContextMenu, "Test Device", self.test_device) try: self.astor = fandango.Astor() addOption(node.ContextMenu, 'Start Server', self.start_server) addOption(node.ContextMenu, 'Stop Server', self.stop_server) addOption(node.ContextMenu, 'Device Info', self.device_info) except: self.warning( 'fandango.Astor() not available to start/stop devices' ) node.ContextMenu.append(('', None)) addOption(node.ExpertMenu, "Show ALL Attributes", lambda s=self: s.addAttrToNode(full=True)) # ATTRIBUTE NODE CONTEXT MENU elif obj.count('/') == 3: for k, v in self.AttributeMenu: self.debug('Adding action %s' % k) if type(v) is str and hasattr(self, v): node.ContextMenu.append((k, getattr(self, v))) else: node.ContextMenu.append( (k, lambda s=self.getNodeAlias(node): v(s))) #node.ContextMenu.append(("add to Trends", self.addToPlot)) #node.ContextMenu.append(("remove from Trends", self.removeFromPlot)) node.ContextMenu.append(('', None)) #node.ContextMenu.append(("Expand Node", self.expandNode)) #node.ContextMenu.append(("Collapse Node", self.collapseNode)) if node.isExpanded() and node.childCount() < 10 and all( self.getNodeText(node.child(j)).count('/') == 2 for j in range(node.childCount())): node.ContextMenu.append( ("Show Attributes", lambda n=node, s=self: [ s.addAttrToNode(n.child(j)) for j in range(n.childCount()) ])) node.ContextMenu.append(("Search ...",\ lambda: self.findInTree(str(Qt.QInputDialog.getText(self,'Search ...','Write a part of the name',Qt.QLineEdit.Normal)[0])) )) #configDialogAction = menu.addAction("Refresh Tree") #self.connect(configDialogAction, Qt.SIGNAL("triggered()"), self.refreshTree) menu = Qt.QMenu(self) if hasattr(node, 'ContextMenu'): last_was_separator = True for t in (type(node.ContextMenu) is dict and node.ContextMenu.items() or node.ContextMenu): try: k, action = t if k: configDialogAction = menu.addAction(k) if action: self.connect(configDialogAction, Qt.SIGNAL("triggered()"), action) else: configDialogAction.setEnabled(False) last_was_separator = False elif not last_was_separator: menu.addSeparator() last_was_separator = True except Exception, e: self.warning('Unable to add Menu Action: %s:%s' % (t, e))
def archiving_check(schema,csvpath=''): api = PyTangoArchiving.ArchivingAPI(schema) states = api.servers.states() values = api.load_last_values()#time consuming on HDB shouldbe = sorted(a for a in api if values[a] and fandango.date2time(values[a][0][0]) > time.time()-2*30*3600*24) active = api.get_archived_attributes() updated = sorted(a for a in active if values[a] and fandango.date2time(values[a][0][0]) > time.time()-3*3600) missing = sorted(a for a in shouldbe if a not in active) lost = sorted(a for a in active if a not in updated) loadarchivers = defaultdict(list) loadservers = defaultdict(list) lostarchivers = defaultdict(list) lostservers = defaultdict(list) for a in active: arch = api[a].archiver.lower() server = api.servers.get_device_server(arch).lower() loadarchivers[arch].append(a) loadservers[server].append(a) if a in lost: lostarchivers[arch].append(a) lostservers[server].append(a) [loadservers[api.servers.get_device_server(api[a].archiver.lower()).lower()].append(a) for a in active] emptyarchivers = [a for a,v in loadarchivers.items() if not len(v)] lostrate = dict((a,len(v) and len([a for a in v if a in lost])/float(len(v))) for a,v in loadarchivers) lostserversrate = dict((a,len(v) and len([a for a in v if a in lost])/float(len(v))) for a,v in loadservers.items()) dedi = api.load_dedicated_archivers() dediattrs = defaultdict(list) [dediattrs[a.lower()].append(d) for d,v in dedi.items() for a in v]; dmult = [a for a,v in dediattrs.items() if len(v)>1] wrongnames = [a for a in dediattrs if not attribute_name_check(a)] wrongarchivers = set(k.lower() for k,v in dedi.items() if any(a.lower() in map(str.lower,v) for a in wrongnames)) wrongattrs = [a for a,v in dediattrs if a in api and api[a].archiver.lower()!=v[0].lower()] deleteattrs = [a for a in dediattrs if a not in shouldbe] fnames = GetConfigFiles(csvpath) if csvpath else GetConfigFiles() csvs = dict((f,pta.ParseCSV(f,schema)) for f in fnames) csvattrs = defaultdict(list) [csvattrs[a.lower().strip()].append(f) for f,v in csvs.items() for a in v] stats = sorted([(len(v),len(v) and len([a for a in v if a in lost])/float(len(v))) for v in loadservers.values()]) stats = [(x,fandango.avg(t[1] for t in stats if t[0]==x)) for x in sorted(set(v[0] for v in stats))] # pylab.plot([t[0] for t in stats], [t[1] for t in stats]); pylab.show() exported = dict((d,fandango.str2time(fandango.get_device_info(d).started,'%dst %B %Y at %H:%M:%S')) for d in api.get_archivers()) first = min(exported.values()) #SLOWER SPEEDS ALWAYS HAVE MORE LOST ATTRIBUTES #Let's try a different approach to restart, much less agressive than fandango.start_servers()! #It seems that there's a lock when so many devices are restarted at once! torestart = list(reversed(sorted((len(v),k) for k,v in lostservers.items()))) for k in torestart.values(): print('Restarting %s') fandango.Astor(k).stop_servers() time.sleep(20.) fandango.Astor(k).start_servers(wait=240.) allattrs = sorted(set([a for a in csvattrs if a in api]+shouldbe+active))
def main_test(): print msg try: msg=""" #Create the test device #Launch it; take snapshot of memory usage NOTE: This testing is not capable of testing email/SMS sending. This will have to be human-tested defining a test receiver: Basic steps: * Create a simulator with attributes suitable for testing. * Start the simulators * Ensure that all alarms conditions are not enabled reseting attribute values. <pre><code class="python">""" if check_step(0): tango.add_new_device('PySignalSimulator/test-alarms','PySignalSimulator','test/test/alarms-test') tango.put_device_property('test/test/alarms-test',{ 'DynamicAttributes':map(str.strip, """#ALARM TESTING A=READ and VAR('VALUE1') or WRITE and VAR('VALUE1',VALUE) B=DevDouble(READ and VAR('B') or WRITE and VAR('B',VALUE)) S=DevDouble(READ and VAR('B')*sin(t%3.14) or WRITE and VAR('B',VALUE)) D=DevLong(READ and PROPERTY('DATA',True) or WRITE and WPROPERTY('DATA',VALUE)) C = DevLong(READ and VAR('C') or WRITE and VAR('C',VALUE)) T=t""".split('\n')), 'DynamicStates': 'STATE=C #INT to STATE conversion' }) fandango.Astor().start_servers('PySignalSimulator/test-alarms') time.sleep(10.) simulator = fandango.get_device('test/test/alarms-test') [simulator.write_attribute(a,0) for a in 'ABSDC'] msg="""</pre> * Create 2 PyAlarm instances, to check attribute-based and alarm/group based alarms * Setup the time variables that will manage the alarm cycle <pre><code class="python">""" alarms = panic.api() if check_step(1): tango.add_new_device('PyAlarm/test-alarms','PyAlarm','test/alarms/alarms-test') tango.add_new_device('PyAlarm/test-group','PyAlarm','test/alarms/alarms-group') threshold = 3 polling = 5 autoreset = 60 alarmdevs = ['test/alarms/alarms-test','test/alarms/alarms-group'] props = { 'Enabled':'15', 'AlarmThreshold':threshold, 'AlertOnRecovery':'email', 'PollingPeriod':polling, 'Reminder':0, 'AutoReset':autoreset, 'RethrowState':True, 'RethrowAttribute':False, 'IgnoreExceptions':True, 'UseSnap':True, 'CreateNewContexts':True, 'MaxMessagesPerAlarm':20, 'FromAddress':'*****@*****.**', 'LogLevel':'DEBUG', 'SMSConfig':':', 'StartupDelay':0, 'EvalTimeout':500, 'UseProcess':False, 'UseTaurus':False, } [tango.put_device_property(d,props) for d in alarmdevs] N,msg=2,gb+"* Start the PyAlarm devices"+ge if check_step(N): receiver = "[email protected],SMS:+3400000000" fandango.Astor().start_servers('PyAlarm/test-alarms') fandango.Astor().start_servers('PyAlarm/test-group') time.sleep(15.) N,msg=3,gb+"* create simple and group Alarms to inspect."+ge if check_step(N): alarms.add(tag='TEST_A',formula='test/test/alarms-test/A',device='test/alarms/alarms-test', receivers=receiver,overwrite=True) alarms.add(tag='TEST_DELTA',device='test/alarms/alarms-test',receivers=receiver,overwrite=True, formula = 'not -5<test/sim/test-00/S.delta<5 and ( test/sim/test-00/S, test/sim/test-00/S.delta )') alarms.add(tag='TEST_STATE',formula='test/test/alarms-test/State not in (OFF,UNKNOWN,FAULT,ALARM)', device='test/alarms/alarms-test',receivers=receiver,overwrite=True) alarms.add(tag='TEST_GROUP1',formula='any([d>0 for d in FIND(test/alarms/*/TEST_[ABC].delta)]) and FIND(test/alarms/*/TEST_[ABC])', device='test/alarms/alarms-group',receivers=receiver,overwrite=True) alarms.add(tag='TEST_GROUP2',formula='GROUP(TEST_[ABC])', device='test/alarms/alarms-group',receivers=receiver,overwrite=True) N,msg=4,gb+""" Test steps: * Enable an alarm condition in the simulated A attribute. * Alarm should be enabled after 1+AlarmThreshold*PollingPeriod time (and not before) * Group alarm should be enabled after 1+AlarmThreshold*PollingPeriod """+ge if check_step(N): pass N,msg=5,gb+""" * Disable alarm condition * Alarm should enter in recovery state after AlarmThreshold*PollingPeriod. * Alarm should AutoReset after AutoReset period. * Group should reset after AutoReset period. ###############################################################################"""+ge if check_step(N): pass N,msg=6,gb+""" * Testing properties ** RethrowAttribute ... None/NaN ** RethrowState ... True/False ** Enabled ... time or formula or both ###############################################################################"""+ge if check_step(N): pass msg=ge except: print traceback.format_exc() N,msg = -1,"""Stopping all servers ...""" check_step(N) fandango.Astor().stop_servers('PySignalSimulator/test-alarms') fandango.Astor().stop_servers('PyAlarm/test-alarms') fandango.Astor().stop_servers('PyAlarm/test-group')
def check_db_schema(schema, attributes = None, values = None, tref = -12*3600, n = 1, filters = '*', export = 'json', restart = False, subscribe = False): """ tref is the time that is considered updated (e.g. now()-86400) n is used to consider multiple values attrs: all attributes in db on: archived off: in db but not currently archived ok: updated known error causes (attrs not lost but not updated): nok: attributes are not currently readable noevs: attributes not sending events novals: attributes never recorded a value stall: not updated, but current value matches archiving lost: not updated, and values doesn't match with current """ t0 = fn.now() if hasattr(schema,'schema'): api,schema = schema,api.schema else: api = pta.api(schema) r = fn.Struct(api=api,schema=schema) if isString(tref): tref = fn.str2time(tref) r.tref = fn.now()+tref if tref < 0 else tref r.attrs = [a for a in (attributes or api.get_attributes()) if fn.clmatch(filters,a)] print('check_db_schema(%s,attrs[%s],tref="%s",export as %s)' % (schema,len(r.attrs),fn.time2str(r.tref),export)) if restart and schema!='hdbpc': archs = [a for a in api.get_archivers() if not fn.check_device(a)] if archs: try: print('Restarting archivers: %s' % str(archs)) astor = fn.Astor(archs) astor.stop_servers() astor.start_servers() except: traceback.print_exc() stopped = api.get_stopped_attributes() print('Restarting %d stopped attributes' % len(stopped)) api.restart_attributes(stopped) r.on = [a for a in api.get_archived_attributes() if a in r.attrs] r.off = [a for a in r.attrs if a not in r.on] r.archs = fn.defaultdict(list) r.pers = fn.defaultdict(list) r.values = load_schema_values(api,r.on,values,n,tref=tref) if schema in ('tdb','hdb'): [r.archs[api[k].archiver].append(k) for k in r.on] else: r.rvals = r.values r.freq, r.values = {}, {} for k,v in r.rvals.items(): try: if n > 1: v = v[0] if isSequence(v) and len(v) else v r.values[k] = v[0] if isSequence(v) and len(v) else v r.freq[k] = v and float(len(v))/abs(v[0][0]-v[-1][0]) else: r.values[k] = v except Exception as e: print(k,v) print(fn.except2str()) for k in api.get_archivers(): r.archs[k] = api.get_archiver_attributes(k) for k in api.get_periodic_archivers(): r.pers[k] = api.get_periodic_archivers_attributes(k) # Get all updated attributes r.ok = [a for a,v in r.values.items() if v and v[0] > r.tref] # Try to read not-updated attributes r.check = dict((a,fn.check_attribute(a) ) for a in r.on if a not in r.ok) #r.novals = [a for a,v in r.values.items() if not v] r.nok, r.stall, r.noevs, r.lost, r.novals, r.evs, r.rem = [],[],[],[],[],{},[] # Method to compare numpy values for a,v in r.check.items(): state = check_archived_attribute(a, v, default=CheckState.LOST, cache=r, tref=r.tref, check_events = subscribe and not api.is_periodic_archived(a)) { #CheckState.ON : r.on, #CheckState.OFF : r.off, CheckState.OK : r.ok, #Shouldn't be any ok in check list CheckState.NO_READ : r.nok, CheckState.STALL : r.stall, CheckState.NO_EVENTS : r.noevs, CheckState.LOST : r.lost, CheckState.UNK : r.novals, }[state].append(a) # SUMMARY r.summary = schema +'\n' r.summary += ','.join( """on: archived off: not archived ok: updated nok: not readable noevs: no events novals: no values stall: not changing lost: not updated """.split('\n'))+'\n' getline = lambda k,v,l: '\t%s:\t:%d\t(%s)' % (k,len(v),l) r.summary += '\n\t%s:\t:%d\tok+stall: %2.1f %%' % ( 'attrs',len(r.attrs), (100.*(len(r.ok)+len(r.stall))/(len(r.on) or 1e12))) r.summary += '\n\t%s/%s:\t:%d/%d' % ( 'on','off',len(r.on),len(r.off)) #if r.off > 20: r.summary+=' !!!' r.summary += '\n\t%s/%s:\t:%d/%d' % ( 'ok','nok',len(r.ok),len(r.nok)) if len(r.nok) > 10: r.summary+=' !!!' r.summary += '\n\t%s/%s:\t:%d/%d' % ( 'noevs','novals',len(r.noevs),len(r.novals)) if len(r.novals) > 1: r.summary+=' !!!' r.summary += '\n\t%s/%s:\t:%d/%d' % ( 'lost','stall',len(r.lost),len(r.stall)) if len(r.lost) > 1: r.summary+=' !!!' r.summary += '\n' r.archivers = dict.fromkeys(api.get_archivers()) for d in sorted(r.archivers): r.archivers[d] = api.get_archiver_attributes(d) novals = [a for a in r.archivers[d] if a in r.novals] lost = [a for a in r.archivers[d] if a in r.lost] if (len(novals)+len(lost)) > 2: r.summary += ('\n%s (all/novals/lost): %s/%s/%s' % (d,len(r.archivers[d]),len(novals),len(lost))) if hasattr(api,'get_periodic_archivers'): r.periodics = dict.fromkeys(api.get_periodic_archivers()) for d in sorted(r.periodics): r.periodics[d] = api.get_periodic_archiver_attributes(d) novals = [a for a in r.periodics[d] if a in r.novals] lost = [a for a in r.periodics[d] if a in r.lost] if len(novals)+len(lost) > 2: r.summary += ('\n%s (all/novals/lost): %s/%s/%s' % (d,len(r.periodics[d]),len(novals),len(lost))) r.perattrs = [a for a in r.on if a in api.get_periodic_attributes()] r.notper = [a for a in r.on if a not in r.perattrs] r.summary += '\nfinished in %d seconds\n\n'%(fn.now()-t0) print(r.summary) if restart: try: retries = r.lost+r.novals+r.nok print('restarting %d attributes' % len(retries)) api.restart_attributes(retries) except: traceback.print_exc() if export is not None: if export is True: export = 'txt' for x in (export.split(',') if isString(export) else export): if x in ('json','pck','pickle','txt'): x = '/tmp/%s.%s' % (schema,x) print('Saving %s file with keys:\n%s' % (x,r.keys())) if 'json' in x: fn.dict2json(r.dict(),x) else: f = open(x,'w') if 'pck' in x or 'pickle' in x: pickle.dump(r.dict(),f) else: f.write(fn.dict2str(r.dict())) f.close() for k,v in r.items(): if fn.isSequence(v): r[k] = sorted(v) return r
def load_attributes(self, servfilter, devfilter, attrfilter, warn=True, exclude=('dserver', 'tango*admin', 'sys*database', 'tmp', 'archiving')): tracer('In load_attributes(%s,%s,%s)' % (servfilter, devfilter, attrfilter)) servfilter, devfilter, attrfilter = servfilter.replace( ' ', '*').strip(), devfilter.replace(' ', '*'), attrfilter.replace(' ', '*') attrfilter = attrfilter or 'state' devfilter = devfilter or attrfilter archive = self.archivecheck.isChecked() all_devs = self.all_devices if not archive else self.archdevs all_devs = [ d for d in all_devs if not any(d.startswith(e) for e in exclude) or any( d.startswith(e) and fun.matchCl(e, devfilter) for e in exclude) ] if servfilter.strip('.*'): sdevs = map(str.lower, fandango.Astor(servfilter).get_all_devices()) all_devs = [d for d in all_devs if d in sdevs] #print('In load_attributes(%s,%s,%s): Searching through %d %s names' #%(servfilter,devfilter,attrfilter,len(all_devs), #'server' if servfilter else 'device')) if devfilter.strip().strip('.*'): devs = [ d for d in all_devs if (fandango.searchCl(devfilter, d, extend=True)) ] print('\tFound %d devs, Checking alias ...' % (len(devs))) alias, alias_devs = [], [] if '&' in devfilter: alias = self.all_alias else: for df in devfilter.split('|'): alias.extend( self.tango.get_device_alias_list('*%s*' % df.strip())) if alias: print('\t%d alias found' % len(alias)) alias_devs.extend(self.alias_devs[a] for a in alias if fun.searchCl(devfilter, a, extend=True)) print('\t%d alias_devs found' % len(alias_devs)) #if not self.alias_devs: #self.alias_devs = dict((str(self.tango.get_device_alias(a)).lower(),a) for a in self.all_alias) #devs.extend(d for d,a in self.alias_devs.items() if fandango.searchCl(devfilter,a) and (not servfilter or d in all_devs)) devs.extend(d for d in alias_devs if not servfilter.strip('.*') or d in all_devs) else: devs = all_devs devs = sorted(set(devs)) self.matching_devs = devs print('In load_attributes(%s,%s,%s): %d devices found' % (servfilter, devfilter, attrfilter, len(devs))) if False and not len(devs) and not archive: #Devices do not actually exist, but may exist in archiving ... #Option disabled, was mostly useless self.archivecheck.setChecked(True) return self.load_attributes(servfilter, devfilter, attrfilter, warn=False) if len(devs) > self.MAX_DEVICES and warn: Qt.QMessageBox.warning( self, "Warning", "Your search (%s,%s) matches too many devices!!! (%d); please refine your search\n\n%s\n..." % (devfilter, attrfilter, len(devs), '\n'.join(devs[:30]))) return {} elif warn and len(devs) > 15: r = Qt.QMessageBox.warning( self, "Message", "Your search (%s,%s) matches %d devices." % (devfilter, attrfilter, len(devs)), Qt.QMessageBox.Ok | Qt.QMessageBox.Cancel) if r == Qt.QMessageBox.Cancel: return {} self.matching_attributes = { } #{attribute: (device,alias,attribute,label)} failed_devs = [] for d in sorted(devs): try: dp = taurus.Device(d) if not archive: dp.ping() tcs = [t for t in dp.get_attribute_list()] else: tcs = [ a.split('/')[-1] for a in self.archattrs if a.startswith(d + '/') ] matches = [ t for t in tcs if fandango.searchCl(attrfilter, t, extend=True) ] for t in sorted(tcs): if not self.archivecheck.isChecked() or not matches: label = dp.get_attribute_config(t).label else: label = t if t in matches or fandango.searchCl( attrfilter, label, extend=True): if d in self.alias_devs: alias = self.alias_devs[d] else: try: alias = str(self.tango.get_alias(d)) except: alias = '' self.matching_attributes['%s/%s' % (d, t)] = (d, alias, t, label) if warn and len(self.matching_attributes ) > self.MAX_ATTRIBUTES: Qt.QMessageBox.warning( self, "Warning", "Your search (%s,%s) matches too many attributes!!! (%d); please refine your search\n\n%s\n..." % (devfilter, attrfilter, len(self.matching_attributes), '\n'.join( sorted(self.matching_attributes.keys()) [:30]))) return {} except: print('load_attributes(%s,%s,%s => %s) failed!' % (servfilter, devfilter, attrfilter, d)) failed_devs.append(d) if attrfilter in ('state', '', '*', '**'): self.matching_attributes[d + '/state'] = ( d, d, 'state', None ) #A None label means device-not-readable if warn and len(self.matching_attributes) > 30: r = Qt.QMessageBox.warning( self, "Message", "(%s) matches %d attributes." % (attrfilter, len(self.matching_attributes)), Qt.QMessageBox.Ok | Qt.QMessageBox.Cancel) if r == Qt.QMessageBox.Cancel: return {} if not len(self.matching_attributes): Qt.QMessageBox.warning( self, "Warning", "No matching attribute has been found in %s." % ('Archiving DB' if archive else 'Tango DB (try Archiving option)')) if failed_devs: print('\t%d failed devs!!!: %s' % (len(failed_devs), failed_devs)) if warn: Qt.QMessageBox.warning( self, "Warning", "%d devices were not running:\n" % len(failed_devs) + '\n'.join(failed_devs[:10] + (['...'] if len(failed_devs) > 10 else []))) tracer('\t%d attributes found' % len(self.matching_attributes)) return self.matching_attributes
def start_element(config_data): astor = fn.Astor() hosts_data = config_data["tango_hosts"] for host_name, host_data in hosts_data.items(): # if inside a Docker container, then Starter must just user the container # (in future could use Ansible templating to modify config file instead) if os.path.exists('/.dockerenv'): host_name = platform.node() for data in host_data: srv_instance_startup_level = data["startup_level"] server_instances = data["server_instances"] for server_instance in server_instances: try: astor.set_server_level(server_instance, host_name, srv_instance_startup_level) except Exception as exc: logging.error("EXCEPTION in set level {} {}".format( server_instance, exc)) print """EXCEPTION IN SET LEVEL in ASTOR""" print """host={!r} level={!r} server_instance={!r}.""".format( host_name, srv_instance_startup_level, server_instance) servers_not_running.append( "{}(EXC IN SET LEVEL:{})".format(server_instance, exc)) continue # For now - start each server - else they do not show up in the # Astor GUI. Start them independently since they do not all exist # in StartDsPath yet try: # astor.restart_servers does not return a value on whether it was successful or not # even if the Server was not registered on the DsPath it returns None, also when # it successfully restarted it returns also None. # Thus we use stop and start (ignoring errors on stop) try: astor.stop_servers([server_instance]) except Exception as exc: # Ignore errors on stop pass result = astor.start_servers([server_instance]) try: if result: running_servers.append(server_instance) else: logging.error("ERROR in start {} {}".format( server_instance, result)) print """ERROR IN START in ASTOR""" print """host={!r} level={!r} server_instance={!r}.""".format( host_name, srv_instance_startup_level, server_instance) servers_not_running.append( "{}(ERROR IN START:{})".format( server_instance, result)) except Exception as exc: logging.error("EXC in start {} {}".format( server_instance, exc)) print """EXC IN START in ASTOR""" print """host={!r} level={!r} server_instance={!r}.""".format( host_name, srv_instance_startup_level, server_instance) servers_not_running.append( "{}(EXC IN START:{})".format(server_instance, exc)) except Exception as exc: logging.error("EXCEPTION in restart {} {}".format( server_instance, exc)) print """EXCEPTION IN RESTART in ASTOR""" print """host={!r} level={!r} server_instance={!r}.""".format( host_name, srv_instance_startup_level, server_instance) servers_not_running.append("{}(EXC IN RESTART:{})".format( server_instance, exc))