def find(self, device, mask='', N=0, strict=False): """ Method for finding devices or files find(device): will return devices or folders matching the given string find(device,file): will return files matching the given argument for the given devices """ if not strict and '*' not in device: device = '*' + device + '*' m = [d for d in self.get_all_devices() if fn.clmatch(device, d)] if not m: for d in self.values(): try: if fn.clmatch(device, d.SaveFolder): m.append(d.name()) if N and N >= len(m): return m except: pass if m and not mask: return m if not strict and '*' not in mask: mask = '*' + mask + '*' r = [] for d in m: l = fn.get_device(d).ListFiles(mask) for f in l: r.append(d + ':' + f) if N and N >= len(r): return r return r
def get_next_archiver(self, errors=False, use_freq=False, attrexp=''): """ errors/use_freq are used to compute the archiver load attrexp can be used to get archivers already archiving attributes """ props = dict((a, fn.tango.get_device_property(a, 'AttributeFilters')) for a in self.get_archivers()) if any(props.values()): archs = [a for a, v in props.items() if not v] else: archs = [a for a in props if fn.clmatch('*[0-9]$', a)] loads = dict( (a, self.get_archiver_load(a, use_freq=use_freq)) for a in archs) if errors: # Errors count twice as load for a, v in loads.items(): errs = self.get_archiver_errors(a) loads[a] += 10 * len(errs) if not len(loads): self.warning('No free archivers found!') elif attrexp: attrs = [ a for a in self.get_attributes(True) if fn.clmatch(attrexp, a) ] archs = [self.get_attribute_archiver(a) for a in attrs] if any(a in loads for a in archs): loads = dict((k, v) for k, v in loads.items() if k in archs) loads = sorted((v, k) for k, v in loads.items()) return loads[0][-1]
def main(*args,**opts): schema = args[0] api = pta.api(schema) tables = [a for a in api.getTables() if fn.clmatch('att_(scalar|array)_',a)] descriptions = dict((t,get_table_description(api,t)) for t in tables) partitioned = [t for t,v in descriptions.items() if 'partition' in str(v).lower()] print('%s: partitioned tables: %d/%d' % (schema,len(partitioned),len(tables)))
def get_attributes_row_counts(db,attrs='*',start=-86400,stop=-1,limit=0): """ DUPLICATED BY HDBPP.get_attribute_rows !!! It will return matching $attrs that recorded more than $limit values in the $start-$stop period:: countsrf = get_attributes_row_counts('hdbrf',start=-3*86400,limit=20000) """ db = pta.api(db) if fn.isString(db) else db start = start if fn.isString(start) else fn.time2str(start) stop = stop if fn.isString(stop) else fn.time2str(stop) if fn.isString(attrs): attrs = [a for a in db.get_attributes() if fn.clmatch(attrs,a)] r = {} for a in attrs: i,t,b = db.get_attr_id_type_table(a) l = db.Query("select count(*) from %s where att_conf_id = %d" " and data_time between '%s' and '%s'" % (b,i,start,stop)) c = l[0][0] if len(l) else 0 if c >= limit: r[a] = c return r
def get_attributes_row_counts(db, attrs='*', start=-86400, stop=-1, limit=0): """ DUPLICATED BY HDBPP.get_attribute_rows !!! It will return matching $attrs that recorded more than $limit values in the $start-$stop period:: countsrf = get_attributes_row_counts('hdbrf',start=-3*86400,limit=20000) """ db = pta.api(db) if fn.isString(db) else db start = start if fn.isString(start) else fn.time2str(start) stop = stop if fn.isString(stop) else fn.time2str(stop) if fn.isString(attrs): attrs = [a for a in db.get_attributes() if fn.clmatch(attrs, a)] r = {} for a in attrs: i, t, b = db.get_attr_id_type_table(a) l = db.Query("select count(*) from %s where att_conf_id = %d" " and data_time between '%s' and '%s'" % (b, i, start, stop)) c = l[0][0] if len(l) else 0 if c >= limit: r[a] = c return r
def forceReadings(self,filters='',emit=True): for n,ts in self.trend.trendSets.iteritems(): model = ts.getModel() if not filters or fn.clmatch(filters,model): self.warning('forceReadings(%s,%s)' % (model,emit)) ts.forceReading() if emit: self.trend.emit(Qt.SIGNAL('refreshData'))
def get_hdbpp_for_attributes(attrlist): filters = get_hdbpp_filters() r = fn.defaultdict(list) for a in attrlist: for d, f in filters.items(): if fn.clmatch(f, a, extend=True): r[d].append(a) return r
def get_periodic_archivers_attributes(self, regexp='*'): #archs = fn.tango.get_class_devices('PyHdbppPeriodicArchiver') archivers = dict.fromkeys([ a for a in self.get_periodic_archivers() if fn.clmatch(regexp, a) ]) for a in archivers: archivers[a.lower()] = self.get_periodic_archiver_attributes(a) return archivers
def forceReadings(self, filters='', emit=True): for n, ts in self.trend.trendSets.iteritems(): model = ts.getModel() if not filters or fn.clmatch(filters, model): self.warning('forceReadings(%s)' % model) ts.forceReading() if emit: self.trend.emit(Qt.SIGNAL('refreshData'))
def get_device_attribute_list(self, dev_name, wildcard='*'): #return self.command_inout("DbGetDeviceAttributeList", (dev_name, wildcard)) dev_name = fn.tango.get_normal_name(dev_name) attrs = self._reader.get_attributes(active=True) dattrs = [ a.rsplit('/')[-1] for a in attrs if (dev_name + '/').lower() in a ] return [a for a in dattrs if fn.clmatch(wildcard, a)]
def main(*args, **opts): schema = args[0] api = pta.api(schema) tables = [ a for a in api.getTables() if fn.clmatch('att_(scalar|array)_', a) ] descriptions = dict((t, get_table_description(api, t)) for t in tables) partitioned = [ t for t, v in descriptions.items() if 'partition' in str(v).lower() ] print('%s: partitioned tables: %d/%d' % (schema, len(partitioned), len(tables)))
def list_files(self, mask, files=None): if not files: if '/' in mask: folder, mask = mask.rsplit('/', 1) else: folder, mask = '', mask #if self.SaveFolder and not folder.startswith('/'): folder = self.SaveFolder + '/' + folder #SAFER TO FORCE ALWAYS PATH return fn.listdir(folder, mask) else: #using cache return [f for f in files if fn.clmatch(mask, f)]
def clear_caches(self, regexp='get*'): self.info('Clear attribute lists caches ...') for m in dir(self): if fn.clmatch(regexp, m) and hasattr(m, 'cache'): getattr(self, m).cache.clear() #self.get_att_conf_table.cache.clear() #self.get_subscribed_attributes.cache.clear() #self.get_attributes_by_table.cache.clear() #self.get_attribute_archiver.cache.clear() #self.get_archiver_attributes.cache.clear() #self.get_attribute_subscriber.cache.clear() #self.get_archivers_attributes.cache.clear() self.dedicated = {} self.attributes = {}
def get_device(self, dev, exported=True): dev = dev or '*' if dev.startswith('folderds:'): #dev.startswith('folderds:'): dev = dev.split(':', 1)[-1] #replace('folderds:','') dev = dev.strip('/') if '*' in dev: m = [ d for d in self.get_all_devices(exported) if fn.clmatch(dev, d) ] dev = m[random.randint(0, len(m) - 1)] else: dev = fn.clsub('^tango:/{0,2}', '', dev) parts = dev.split('/') dev = '/'.join(parts[0:3 + dev.count(':')]) return self[dev]
def get_next_periodic_archiver(self, attrexp=''): """ attrexp can be used to get archivers already archiving attributes """ loads = self.get_periodic_archivers_attributes() if attrexp: attrs = [ a for a in self.get_periodic_attributes() if fn.clmatch(attrexp, a) ] archs = [self.get_periodic_attribute_archiver(a) for a in attrs] if archs: loads = dict((k, v) for k, v in loads.items() if k in archs) loads = sorted((len(v), k) for k, v in loads.items()) return loads[0][-1]
def get_host_last_partitions(host, user, passwd, exclude_db='information_schema|tdb*'): import fandango.db as fdb db = fdb.FriendlyDB(host=host, db_name='information_schema', user=user, passwd=passwd) result = {} for d in db.Query('show databases'): if fn.clmatch(exclude_db, d[0]): continue q = ("select partition_name from partitions where " "table_schema = '%s' and partition_name is not NULL " "and data_length > %d order by partition_name DESC limit 1;" % (d[0], MIN_FILE_SIZE)) r = db.Query(q) result[d[0]] = r return result
def check_db_schema(schema, attributes = None, values = None, tref = -12*3600, n = 1, filters = '*', export = 'json', restart = False, subscribe = False): """ tref is the time that is considered updated (e.g. now()-86400) n is used to consider multiple values attrs: all attributes in db on: archived off: in db but not currently archived ok: updated known error causes (attrs not lost but not updated): nok: attributes are not currently readable noevs: attributes not sending events novals: attributes never recorded a value stall: not updated, but current value matches archiving lost: not updated, and values doesn't match with current """ t0 = fn.now() if hasattr(schema,'schema'): api,schema = schema,api.schema else: api = pta.api(schema) r = fn.Struct(api=api,schema=schema) if isString(tref): tref = fn.str2time(tref) r.tref = fn.now()+tref if tref < 0 else tref r.attrs = [a for a in (attributes or api.get_attributes()) if fn.clmatch(filters,a)] print('check_db_schema(%s,attrs[%s],tref="%s",export as %s)' % (schema,len(r.attrs),fn.time2str(r.tref),export)) if restart and schema!='hdbpc': archs = [a for a in api.get_archivers() if not fn.check_device(a)] if archs: try: print('Restarting archivers: %s' % str(archs)) astor = fn.Astor(archs) astor.stop_servers() astor.start_servers() except: traceback.print_exc() stopped = api.get_stopped_attributes() print('Restarting %d stopped attributes' % len(stopped)) api.restart_attributes(stopped) r.on = [a for a in api.get_archived_attributes() if a in r.attrs] r.off = [a for a in r.attrs if a not in r.on] r.archs = fn.defaultdict(list) r.pers = fn.defaultdict(list) r.values = load_schema_values(api,r.on,values,n,tref=tref) if schema in ('tdb','hdb'): [r.archs[api[k].archiver].append(k) for k in r.on] else: r.rvals = r.values r.freq, r.values = {}, {} for k,v in r.rvals.items(): try: if n > 1: v = v[0] if isSequence(v) and len(v) else v r.values[k] = v[0] if isSequence(v) and len(v) else v r.freq[k] = v and float(len(v))/abs(v[0][0]-v[-1][0]) else: r.values[k] = v except Exception as e: print(k,v) print(fn.except2str()) for k in api.get_archivers(): r.archs[k] = api.get_archiver_attributes(k) for k in api.get_periodic_archivers(): r.pers[k] = api.get_periodic_archivers_attributes(k) # Get all updated attributes r.ok = [a for a,v in r.values.items() if v and v[0] > r.tref] # Try to read not-updated attributes r.check = dict((a,fn.check_attribute(a) ) for a in r.on if a not in r.ok) #r.novals = [a for a,v in r.values.items() if not v] r.nok, r.stall, r.noevs, r.lost, r.novals, r.evs, r.rem = [],[],[],[],[],{},[] # Method to compare numpy values for a,v in r.check.items(): state = check_archived_attribute(a, v, default=CheckState.LOST, cache=r, tref=r.tref, check_events = subscribe and not api.is_periodic_archived(a)) { #CheckState.ON : r.on, #CheckState.OFF : r.off, CheckState.OK : r.ok, #Shouldn't be any ok in check list CheckState.NO_READ : r.nok, CheckState.STALL : r.stall, CheckState.NO_EVENTS : r.noevs, CheckState.LOST : r.lost, CheckState.UNK : r.novals, }[state].append(a) # SUMMARY r.summary = schema +'\n' r.summary += ','.join( """on: archived off: not archived ok: updated nok: not readable noevs: no events novals: no values stall: not changing lost: not updated """.split('\n'))+'\n' getline = lambda k,v,l: '\t%s:\t:%d\t(%s)' % (k,len(v),l) r.summary += '\n\t%s:\t:%d\tok+stall: %2.1f %%' % ( 'attrs',len(r.attrs), (100.*(len(r.ok)+len(r.stall))/(len(r.on) or 1e12))) r.summary += '\n\t%s/%s:\t:%d/%d' % ( 'on','off',len(r.on),len(r.off)) #if r.off > 20: r.summary+=' !!!' r.summary += '\n\t%s/%s:\t:%d/%d' % ( 'ok','nok',len(r.ok),len(r.nok)) if len(r.nok) > 10: r.summary+=' !!!' r.summary += '\n\t%s/%s:\t:%d/%d' % ( 'noevs','novals',len(r.noevs),len(r.novals)) if len(r.novals) > 1: r.summary+=' !!!' r.summary += '\n\t%s/%s:\t:%d/%d' % ( 'lost','stall',len(r.lost),len(r.stall)) if len(r.lost) > 1: r.summary+=' !!!' r.summary += '\n' r.archivers = dict.fromkeys(api.get_archivers()) for d in sorted(r.archivers): r.archivers[d] = api.get_archiver_attributes(d) novals = [a for a in r.archivers[d] if a in r.novals] lost = [a for a in r.archivers[d] if a in r.lost] if (len(novals)+len(lost)) > 2: r.summary += ('\n%s (all/novals/lost): %s/%s/%s' % (d,len(r.archivers[d]),len(novals),len(lost))) if hasattr(api,'get_periodic_archivers'): r.periodics = dict.fromkeys(api.get_periodic_archivers()) for d in sorted(r.periodics): r.periodics[d] = api.get_periodic_archiver_attributes(d) novals = [a for a in r.periodics[d] if a in r.novals] lost = [a for a in r.periodics[d] if a in r.lost] if len(novals)+len(lost) > 2: r.summary += ('\n%s (all/novals/lost): %s/%s/%s' % (d,len(r.periodics[d]),len(novals),len(lost))) r.perattrs = [a for a in r.on if a in api.get_periodic_attributes()] r.notper = [a for a in r.on if a not in r.perattrs] r.summary += '\nfinished in %d seconds\n\n'%(fn.now()-t0) print(r.summary) if restart: try: retries = r.lost+r.novals+r.nok print('restarting %d attributes' % len(retries)) api.restart_attributes(retries) except: traceback.print_exc() if export is not None: if export is True: export = 'txt' for x in (export.split(',') if isString(export) else export): if x in ('json','pck','pickle','txt'): x = '/tmp/%s.%s' % (schema,x) print('Saving %s file with keys:\n%s' % (x,r.keys())) if 'json' in x: fn.dict2json(r.dict(),x) else: f = open(x,'w') if 'pck' in x or 'pickle' in x: pickle.dump(r.dict(),f) else: f.write(fn.dict2str(r.dict())) f.close() for k,v in r.items(): if fn.isSequence(v): r[k] = sorted(v) return r
def start_attributes_for_archivers(target,attr_regexp='',event_conf={}, load=False, by_class=False, min_polling = 100, overwrite = False, check = True): """ Target may be an attribute list or a device regular expression if by_class = True, config will be loaded from Tango class properties """ import PyTangoArchiving.hdbpp as ptah if fn.isSequence(target): if attr_regexp: attrs = [a for a in target if fn.clmatch(attr_regexp,a.rsplit('/')[-1])] else: attrs = target else: dev_regexp = target attrs = fn.find_attributes(dev_regexp+'/'+(attr_regexp or '*')) if by_class: classes = fn.defaultdict(dict) devs = fn.defaultdict(list) [devs[a.rsplit('/',1)[0]].append(a) for a in attrs] for d,v in devs.items(): classes[fn.tango.get_device_class(d)][d] = v attrs = {} for c,devs in classes.items(): cfg = get_class_archiving(devs.keys()[0]) for d in devs: raw = devs[d] for a,v in cfg.items(): for aa in raw: if fn.clmatch(a,aa.split('/')[-1],terminate=True): if not attr_regexp or fn.clmatch(attr_regexp,aa): attrs[aa] = v elif event_conf: attrs = dict((a,event_conf) for a in attrs) else: attrs = dict((a,get_current_conf(a)) for a in attrs) print('Starting %d attributes' % (len(attrs))) archs = ptah.multi.match_attributes_and_archivers(attrs.keys()) rd = PyTangoArchiving.Reader() #print(archs) alldbs = ptah.multi.get_hdbpp_databases() dbs = ptah.multi.get_hdbpp_databases(archs,alldbs) #return dbs,archs,attrs for db,rcs in dbs.items(): api = PyTangoArchiving.Schemas.getApi(db) dbs[db] = dict.fromkeys(rcs) for d in rcs: dbs[db][d] = ts = dict.fromkeys(archs[d]) #return ts for a in ts: try: m = fn.parse_tango_model(a,fqdn=True) dbs[db][d][a] = mode = attrs[a] if not overwrite and db in rd.is_attribute_archived(a): print('%s already archived in %s' % (a,db)) continue events = ft.check_attribute_events(a,ft.EventType.ARCHIVE_EVENT) ep = events.get(ft.EventType.ARCHIVE_EVENT,False) if ep is True: if 'polling' in mode: mode.pop('polling') elif isinstance(events.get(ep,(int,float))): mode['polling'] = min((ep,mode.get('polling',10000))) mode['polling'] = max((mode['polling'],min_polling)) if not events.get(ft.EventType.CHANGE_EVENT,False): if mode.get('archive_abs_change',0): mode['abs_event'] = mode['archive_abs_change'] if mode.get('archive_rel_change',0): mode['rel_event'] = mode['archive_rel_change'] if mode.get('arch_per_event',0): mode['per_event'] = mode['archive_per_event'] print('%s.start_archiving(%s,%s,%s): %s' % (db,d,m.fullname,mode,load)) if load: fn.tango.set_attribute_events(a,**mode) r = api.start_archiving(m.fullname,d,code_event=True) assert not check or r except: print('%s failed!'%a) traceback.print_exc() return dbs
def check_archiving_schema( schema='hdb', attributes=[],values={}, ti = None, period = 7200, old_period=24*3600*90,\ exclude=['*/waveid','*/wavename','*/elotech-*'], use_index = True, loads = True, action=False, trace=True, export=None): ti = fn.now() if ti is None else str2time(ti) if isString(ti) else ti api = pta.api(schema) is_hpp = isinstance(api, pta.HDBpp) check = dict() old_period = 24*3600*old_period if old_period < 1000 \ else (24*old_period if old_period<3600 else old_period) allattrs = api.get_attributes() if hasattr( api, 'get_attributes') else api.keys() print('%s contains %d attributes' % (schema, len(allattrs))) if attributes: if fn.isString(attributes) and fn.isRegexp(attributes): tattrs = [a for a in allattrs if clsearch(attributes, a)] else: attributes = map(fn.tango.get_normal_name, fn.toList(attributes)) tattrs = [ a for a in allattrs if fn.tango.get_normal_name(a) in allattrs ] else: tattrs = allattrs excluded = [a for a in tattrs if any(fn.clmatch(e, a) for e in exclude)] tattrs = [a for a in tattrs if a not in excluded] print('%d attributes to check' % len(tattrs)) if not len(tattrs): return if excluded: print('\t%d attributes excluded' % len(excluded)) archived = {} for a in tattrs: if hasattr(api, 'get_attribute_archiver'): arch = api.get_attribute_archiver(a) else: arch = api[a].archiver if arch: archived[a] = arch print('\t%d attributes are archived' % len(archived)) #Getting Tango devices currently not running alldevs = set(t.rsplit('/', 1)[0] for t in tattrs) #tdevs = filter(fn.check_device,alldevs) #nodevs = [fn.tango.get_normal_name(d) for d in alldevs if d not in tdevs] #if nodevs: #print('\t%d devices are not running' % len(nodevs)) archs = sorted(set(archived.values())) if loads: astor = fn.Astor() astor.load_from_devs_list(archs) loads = fn.defaultdict(list) for k, s in astor.items(): for d in s.get_device_list(): d = fn.tango.get_normal_name(d) for a in archived: if fn.tango.get_normal_name(archived[a]) == d: loads[k].append(a) for k, s in sorted(loads.items()): print('\t%s archives %d attributes' % (k, len(s))) noarchs = [ fn.tango.get_normal_name(d) for d in archs if not fn.check_device(d) ] if noarchs: print('\t%d archivers are not running: %s' % (len(noarchs), noarchs)) ########################################################################### if isString(values) and values.endswith('.pck'): print('\nLoading last values from %s file\n' % values) import pickle values = pickle.load(open(values)) elif isString(values) and values.endswith('.json'): print('\nLoading last values from %s file\n' % values) values = fn.json2dict(values) elif not use_index or is_hpp: print('\nGetting last values ...\n') for a in tattrs: values[a] = api.load_last_values(a) else: print('\nGetting updated tables from database ...\n') tups = pta.utils.get_table_updates(schema) # Some tables do not update MySQL index tables t0 = [a for a in tarch if a in tattrs and not tups[api[a].table]] check.update((t, check_attribute(a, readable=True)) for t in t0 if not check.get(t)) t0 = [t for t in t0 if check[t]] print('%d/%d archived attributes have indexes not updated ...' % (len(t0), len(tarch))) if t0 and len(t0) < 100: vs = api.load_last_values(t0) tups.update((api[t].table, api[t].last_date) for t in t0) for a in tattrs: if a in tups: values[a] = [tups[api[a].table], 0] for k, v in values.items(): if (len(v) if isSequence(v) else v): if isinstance(v, dict): v = v.values()[0] if isSequence(v) and len(v) == 1: v = v[0] if v and not isNumber(v[0]): v = [date2time(v[0]), v[1]] values[k] = v else: values[k] = [] if isSequence(v) else None print('%d values obtained' % len(values)) ########################################################################### now = fn.now() result = fn.Struct() times = [t[0] for t in values.values() if t] futures = [t for t in times if t > now] times = [t for t in times if t < now] tmiss = [] tfutures = [k for k, v in values.items() if v and v[0] in futures] tmin, tmax = min(times), max(times) print('\toldest update was %s' % time2str(tmin)) print('\tnewest update was %s' % time2str(tmax)) if futures: print('\t%d attributes have values in the future!' % len(futures)) tnovals = [a for a in archived if not values.get(a, None)] if tnovals: print('\t%d archived attributes have no values' % len(tnovals)) try: tmiss = [ a for a, v in values.items() if v and old_period < v[0] < ti - period and a not in archived ] except: print(values.items()[0]) if tmiss: print('\t%d/%d attrs with values are not archived anymore' % (len(tmiss), len(tattrs))) result.Excluded = excluded result.Schema = schema result.All = tattrs result.Archived = values result.NoValues = tnovals result.MissingOrRemoved = tmiss result.TMin = tmin result.TMax = tmax result.Futures = tfutures tup = sorted(a for a in values if values[a] and values[a][0] > ti - period) tok = [a for a in tup if values[a][1] not in (None, [])] print('\n%d/%d archived attributes are updated since %s - %s' % (len(tup), len(archived), ti, period)) print('%d archived attributes are fully ok\n' % (len(tok))) tnotup = sorted(a for a in values if values[a] and values[a][0] < ti - period) print('\t%d archived attrs are not updated' % len(tnotup)) tupnoread = [ a for a in tup if not values[a][1] and fn.read_attribute(a) is None ] reads = dict((a, fn.read_attribute(a)) for a in tnotup) tnotupread = [a for a in tnotup if reads[a] is not None] print('\t%d not updated attrs are readable (Lost)' % len(tnotupread)) print('\t%d of them are not floats' % len([t for t in tnotupread if not isinstance(reads[t], float)])) print('\t%d of them are states' % len([t for t in tnotupread if t.lower().endswith('/state')])) print('\t%d of them seem motors' % len([t for t in tnotupread if t.lower().endswith('/position')])) tnotupevs = [a for a in tnotupread if fn.tango.check_attribute_events(a)] print('\t%d not updated attrs are readable and have events (LostEvents)' % len(tnotupevs)) tnotupnotread = [a for a in tnotup if a not in tnotupread] print('\t%d not updated attrs are not readable' % len(tnotupnotread)) result.Lost = tnotupread result.LostEvents = tnotupevs losts = (tnotupevs if is_hpp else tnotupread) diffs = dict() for a in losts: try: v, vv = values.get(a, (None, ))[1], reads[a] if fn.isSequence(v): v = fn.toList(v) if fn.isSequence(vv): vv = fn.toList(vv) diffs[a] = v != vv if fn.isSequence(diffs[a]): diffs[a] = any(diffs[a]) else: diffs[a] = bool(diffs[a]) except: diffs[a] = None fams = fn.defaultdict(list) for a in tnotupread: fams['/'.join(a.split('/')[-4:-2])].append(a) for f in sorted(fams): print('\t%s: %d attrs not updated' % (f, len(fams[f]))) print() differ = [a for a in losts if diffs[a]] #is True] print('\t%d/%d not updated attrs have also wrong values!!!' % (len(differ), len(losts))) rd = pta.Reader() only = [a for a in tnotupread if len(rd.is_attribute_archived(a)) == 1] print('\t%d/%d not updated attrs are archived only in %s' % (len(only), len(losts), schema)) result.LostDiff = differ print() archs = sorted(set(archived.values())) astor = fn.Astor() astor.load_from_devs_list(archs) badloads = fn.defaultdict(list) for k, s in astor.items(): for d in s.get_device_list(): d = fn.tango.get_normal_name(d) for a in losts: if fn.tango.get_normal_name(archived[a]) == d: badloads[k].append(a) for k, s in badloads.items(): if len(s): print('\t%s archives %d lost attributes' % (k, len(s))) print('\t%d updated attrs are not readable' % len(tupnoread)) result.ArchivedAndReadable = tok result.Updated = tup result.NotUpdated = tnotup result.Unreadable = tnotupnotread #result.DeviceNotRunning = nodevs result.ArchiverNotRunning = noarchs result.LostFamilies = fams # Tnones is for readable attributes not being archived tnones = [ a for a in archived if (a not in values or values[a] and values[a][1] in (None, [])) and a not in tupnoread and a not in tnotupread ] tupnones = [a for a in tnones if a in tup] if tupnones: print('\t%d archived readable attrs record empty values' % len(tupnones)) result.Nones = tnones if 0: get_ratio = lambda a, b: float(len(a)) / float(len(b)) #result.ArchRatio = get_ratio([t for t in readarch if t not in tnotup],readarch) #result.ReadRatio = get_ratio(result.Readable,tattrs) #result.LostRatio = get_ratio([a for a in tread if a in tnotup],tread) #result.MissRatio = get_ratio([a for a in tread if a not in tarch],tread) #result.OkRatio = 1.0-result.LostRatio-result.MissRatio #result.Summary = '\n'.join(( #('Checking archiving of %s attributes'%(len(attributes) if attributes else schema)) #,('%d attributes in %s, %d are currently active'%(len(api),schema,len(tarch))) #,('%d devices with %d archived attributes are not running'%(len(nodevs),len([a for a in api if a.rsplit('/',1) in nodevs]))) #,('%d archived attributes (%2.1f %%) are unreadable! (check and remove)'%(len(tnoread),1e2*get_ratio(tnoread,tarch))) #,('%d readable attributes are not archived'%(len(tmiss))) #,('%d attributes (readable or not) are updated (%2.1f %% of all readables)'%(len(tok),1e2*result.OkRatio)) #,('-'*80) #,('%d archived attributes (readable or not) are not updated!'%len(tnotup)) #,('%d archived and readable attributes are not updated! (check and restart?)'%len(treadnotup)) #,('-'*80) #,('%d readable attributes have been removed in the last %d days!'%(len(removed),old_period/(24*3600))) #,('%d readable scalar attributes are not being archived (not needed anymore?)'%len(tmscalar)) #,('%d readable array attributes are not being archived (Ok)'%len(tmarray)) #,('%d readable array attributes are archived (Expensive)'%len(tarray)) #,(''))) #if trace: print(result.Summary) #print('%d readable lost,Ok = %2.1f%%, %2.1f %% over all Readables (%2.1f %% of total)'%\ #(len(treadnotup),1e2*result.ArchRatio,1e2*result.OkRatio,1e2*result.ReadRatio)) if action: if action == 'start_devices': print('Executing action %s' % action) api.start_devices() if action == 'restart_all': print('Executing action %s' % action) devs = api.get_archivers() astor = fn.Astor() print('Restarting %d devs:' % (len(devs), devs)) astor.load_from_devs_list(devs) astor.stop_servers() fn.wait(10.) astor.start_servers() #print('NO ACTIONS ARE GONNA BE EXECUTED, AS THESE ARE ONLY RECOMMENDATIONS') #print(""" #api = PyTangoArchiving.HDBpp(schema) #api.start_devices() #or #api = PyTangoArchiving.ArchivingAPI('%s') #lostdevs = sorted(set(api[a].archiver for a in result.NotUpdated)) #print(lostdevs) #if lostdevs < a_reasonable_number: #astor = fn.Astor() #astor.load_from_devs_list(lostdevs) #astor.stop_servers() #fn.time.sleep(10.) #astor.start_servers() #"""%schema) print('\nfinished in %d seconds\n\n' % (fn.now() - ti)) if export is not None: if export is True: export = 'txt' for x in (export.split(',') if isString(export) else export): if x in ('json', 'pck', 'pickle', 'txt'): x = '/tmp/%s.%s' % (schema, x) print('Saving %s file with keys:\n%s' % (x, result.keys())) if 'json' in x: fn.dict2json(result.dict(), x) else: f = open(x, 'w') if 'pck' in x or 'pickle' in x: pickle.dump(result.dict(), f) else: f.write(fn.dict2str(result.dict())) f.close() return result
def get_device_attribute_list(self, dev_name, wildcard='*'): #return self.command_inout("DbGetDeviceAttributeList", (dev_name, wildcard)) dev_name = fn.tango.get_normal_name(dev_name) attrs = self._reader.get_attributes(active=True) dattrs = [a.rsplit('/')[-1] for a in attrs if (dev_name+'/').lower() in a] return [a for a in dattrs if fn.clmatch(wildcard,a)]
tests = getattr(mod, '__test__', {}) except: pass f = open(fname) lines = f.readlines() f.close() if sys.argv[3:]: fname2 = sys.argv[3] else: fname2 = 'test_' + fname.split('/')[-1] decs = [] for l in lines: if fandango.clmatch('^(def|class)[\ ].*', l): decs.append(l.split()[1].split('(')[0].strip(':').strip()) if action == 'generate': if os.path.exists(fname2) and not fname2.endswith('.tmp'): fname2 += '.tmp' outs.append('#!/usr/bin/env python\n' '# -*- coding: utf-8 -*-' '#\n' '# %s test template generated\n' '# from %s\n' '# \n' % (fname2, fname)) outs.append('\n') if mod: outs.append('import %s\n' % mod.__name__)
def checkSchema(k, schema, attribute='', start=None, stop=None): if not isinstance(schema, SchemaDict): schema = k.getSchema(schema) if not schema: return False f = schema.get('check') if not f: print('%s has no check function' % str(schema)) return True try: now = time.time() start = (str2time(start) if fn.isString(start) else fn.notNone( start, now - 1)) stop = (str2time(stop) if fn.isString(stop) else fn.notNone( stop, now)) xmatch = lambda e, a: clmatch(e, a, extend=True) k.LOCALS.update({ 'attr': attribute.lower(), 'attribute': attribute.lower(), 'device': attribute.lower().rsplit('/', 1)[0], 'match': lambda r: xmatch(r, attribute), 'clmatch': xmatch, 'overlap': overlap, 'time2str': time2str, 'str2time': str2time, 't2s': time2str, 's2t': str2time, 'start': start, 'stop': stop, 'now': now, 'begin': start, 'end': stop, 'NOW': now, 'reader': schema.get('reader', schema.get('api')), 'schema': schema.get('schema'), 'dbname': schema.get('dbname', schema.get('db_name', schema.get('schema', ''))), }) if 'reader' in f: k.getReader(schema.get('schema')) if 'api' in f: k.getApi(schema.get('schema')) #print('In reader.Schemas.checkSchema(%s,%s,%s,%s): %s' #% (schema,attribute,start,stop,f)) #print('(%s)%%(%s)'%(f,[t for t in k.LOCALS.items() if t[0] in f])) v = fn.evalX(f, k.LOCALS, k.MODULES) except: print('checkSchema(%s,%s) failed!' % (schema, attribute)) traceback.print_exc() v = False #print('checkSchema(%s): %s'%(schema,v)) return v
def get_attributes_errors(self, regexp='*', timeout=3*3600, from_db=False, extend = False): """ Returns a dictionary {attribute, error/last value} If from_db=True and extend=True, it performs a full attribute check """ if regexp == '*': self.status = fn.defaultdict(list) if from_db or extend: timeout = fn.now()-timeout attrs = self.get_attributes(True) attrs = fn.filtersmart(attrs,regexp) print('get_attributes_errors([%d/%d])' % (len(attrs),len(self.attributes))) vals = self.load_last_values(attrs) for a,v in vals.items(): if v and v[0] > timeout: self.status['Updated'].append(a) if v[1] is not None: self.status['Readable'].append(a) else: rv = fn.read_attribute(a) if rv is not None: self.status['WrongNone'].append(a) else: self.status['None'].append(a) vals.pop(a) if not extend: self.status['NotUpdated'] = vals.keys() else: for a,v in vals.items(): c = fn.check_attribute(a) if c is None: vals[a] = 'Unreadable' self.status['Unreadable'].append(a) elif isinstance(c,Exception): vals[a] = str(c) self.status['Exception'].append(a) else: ev = fn.tango.check_attribute_events(a) if not ev: vals[a] = 'NoEvents' self.status['NoEvents'].append(a) else: d = self.get_attribute_archiver(a) e = self.get_archiver_errors(d) if a in e: vals[a] = e[a] self.status['ArchiverError'].append(a) else: rv = fn.read_attribute(a) if v and str(rv) == str(v[1]): vals[a] = 'NotChanged' self.status['NotChanged'].append(a) else: self.status['NotUpdated'].append(a) if regexp == '*': for k,v in self.status.items(): print('%s: %s' % (k,len(v))) return vals else: # Should inspect the Subscribers Error Lists vals = dict() for d in self.get_archivers(): err = self.get_archiver_errors(d) for a,e in err.items(): if fn.clmatch(regexp,a): vals[a] = e return vals
def check_archiving_performance(schema='hdb',attributes=[],period=24*3600*90,\ exclude=['*/waveid','*/wavename','*/elotech-*'],action=False,trace=True): import PyTangoArchiving as pta import fandango as fn ti = fn.now() api = pta.api(schema) check = dict() period = 24*3600*period if period < 1000 else (24*period if period<3600 else period) attributes = fn.get_matching_attributes(attributes) if fn.isString(attributes) else map(str.lower,attributes) tattrs = [a for a in api if not attributes or a in attributes] excluded = [a for a in tattrs if any(fn.clmatch(e,a) for e in exclude)] tattrs = [a for a in tattrs if a not in excluded] #Getting Tango devices currently not running alldevs = set(t.rsplit('/',1)[0] for t in tattrs if api[t].archiver) tdevs = filter(fn.check_device,alldevs) nodevs = [d for d in alldevs if d not in tdevs] #Updating data from archiving config tables if not attributes: tattrs = sorted(a for a in api if a.rsplit('/',1)[0] in tdevs) tattrs = [a for a in tattrs if not any(fn.clmatch(e,a) for e in exclude)] print('%d attributes will not be checked (excluded or device not running)'%(len(api)-len(tattrs))) tarch = sorted(a for a in api if api[a].archiver) tnoread = sorted(t for t in tarch if t not in tattrs) check.update((t,None) for t in tnoread) #Getting attributes archived in the past and not currently active tmiss = [t for t in tattrs if not api[t].archiver] check.update((t,fn.check_attribute(t,readable=True)) for t in tmiss) tmiss = [t for t in tmiss if check[t]] tmarray = [t for t in tmiss if fn.isString(check[t].value) or fn.isSequence(check[t].value)] tmscalar = [t for t in tmiss if t not in tmarray] #Getting updated tables from database tups = pta.utils.get_table_updates(schema) # Some tables do not update MySQL index tables t0 = [a for a in tarch if a in tattrs and not tups[api[a].table]] check.update((t,check_attribute(a,readable=True)) for t in t0 if not check.get(t)) t0 = [t for t in t0 if check[t]] print('%d/%d archived attributes have indexes not updated ...'%(len(t0),len(tarch))) if t0 and len(t0)<100: vs = api.load_last_values(t0); tups.update((api[t].table,api[t].last_date) for t in t0) tnotup = [a for a in tarch if tups[api[a].table]<fn.now()-1800] check.update((t,1) for t in tarch if t not in tnotup) #Updating readable attributes (all updated are considered as readable) tread = sorted(t for t in tattrs if t not in tnoread) for t in tattrs: if t not in check: check[t] = fn.check_attribute(t,readable=True) tread = sorted(t for t in tattrs if check[t]) tnoread.extend(t for t in tread if not check[t]) tnoread = sorted(set(tnoread)) #tread contains all readable attributes from devices with some attribute archived #tnoread contains all unreadable attributes from already archived #Calcullating all final stats #tok will be all archivable attributes that are archived #tnotup = [a for a in tnotup if check[a]] #tok = [t for t in tread if t in tarch and t not in tnotup] tok = [t for t in tarch if t not in tnotup] readarch = [a for a in tread if a in tarch] treadnotup = [t for t in readarch if t in tnotup] #tnotup contains only data from tarch tokread = [t for t in readarch if t not in tnotup] #Useless, all archived are considered readable tarray = [t for t in tarch if check[t] and get_attribute_pytype(t) in (str,list)] removed = [a for a in tattrs if not api[a].archiver and tups[api[a].table]>fn.now()-period] result = fn.Struct() result.Excluded = excluded result.Schema = schema result.All = api.keys() result.Archived = tarch result.Readable = tread result.ArchivedAndReadable = readarch result.Updated = tok #tokread result.Lost = treadnotup result.Removed = removed result.TableUpdates = tups result.NotUpdated = tnotup result.Missing = tmiss result.MissingScalars = tmscalar result.MissingArrays = tmarray result.ArchivedArray = tarray result.Unreadable = tnoread result.DeviceNotRunning = nodevs get_ratio = lambda a,b:float(len(a))/float(len(b)) result.ArchRatio = get_ratio([t for t in readarch if t not in tnotup],readarch) result.ReadRatio = get_ratio(result.Readable,tattrs) result.LostRatio = get_ratio([a for a in tread if a in tnotup],tread) result.MissRatio = get_ratio([a for a in tread if a not in tarch],tread) result.OkRatio = 1.0-result.LostRatio-result.MissRatio result.Summary = '\n'.join(( ('Checking archiving of %s attributes'%(len(attributes) if attributes else schema)) ,('%d attributes in %s, %d are currently active'%(len(api),schema,len(tarch))) ,('%d devices with %d archived attributes are not running'%(len(nodevs),len([a for a in api if a.rsplit('/',1) in nodevs]))) ,('%d archived attributes (%2.1f %%) are unreadable! (check and remove)'%(len(tnoread),1e2*get_ratio(tnoread,tarch))) ,('%d readable attributes are not archived'%(len(tmiss))) ,('%d attributes (readable or not) are updated (%2.1f %% of all readables)'%(len(tok),1e2*result.OkRatio)) ,('-'*80) ,('%d archived attributes (readable or not) are not updated!'%len(tnotup)) ,('%d archived and readable attributes are not updated! (check and restart?)'%len(treadnotup)) ,('-'*80) ,('%d readable attributes have been removed in the last %d days!'%(len(removed),period/(24*3600))) ,('%d readable scalar attributes are not being archived (not needed anymore?)'%len(tmscalar)) ,('%d readable array attributes are not being archived (Ok)'%len(tmarray)) ,('%d readable array attributes are archived (Expensive)'%len(tarray)) ,(''))) if trace: print(result.Summary) print('%d readable lost,Ok = %2.1f%%, %2.1f %% over all Readables (%2.1f %% of total)'%\ (len(treadnotup),1e2*result.ArchRatio,1e2*result.OkRatio,1e2*result.ReadRatio)) if action: print('NO ACTIONS ARE GONNA BE EXECUTED, AS THESE ARE ONLY RECOMMENDATIONS') print(""" api = PyTangoArchiving.ArchivingAPI('%s') lostdevs = sorted(set(api[a].archiver for a in result.NotUpdated)) print(lostdevs) if lostdevs < a_reasonable_number: astor = fn.Astor() astor.load_from_devs_list(lostdevs) astor.stop_servers() fn.time.sleep(10.) astor.start_servers() """%schema) if trace: print('finished in %d seconds'%(fn.now()-ti)) return result
def get_attributes_errors(self, regexp='*', timeout=3 * 3600, from_db=False, extend=False): """ Returns a dictionary {attribute, error/last value} If from_db=True and extend=True, it performs a full attribute check """ if regexp == '*': self.status = fn.defaultdict(list) if from_db or extend: timeout = fn.now() - timeout attrs = self.get_attributes(True) attrs = fn.filtersmart(attrs, regexp) print('get_attributes_errors([%d/%d])' % (len(attrs), len(self.attributes))) vals = self.load_last_values(attrs) for a, v in vals.items(): if v and v[0] > timeout: self.status['Updated'].append(a) if v[1] is not None: self.status['Readable'].append(a) else: rv = fn.read_attribute(a) if rv is not None: self.status['WrongNone'].append(a) else: self.status['None'].append(a) vals.pop(a) if not extend: self.status['NotUpdated'] = vals.keys() else: for a, v in vals.items(): c = fn.check_attribute(a) if c is None: vals[a] = 'Unreadable' self.status['Unreadable'].append(a) elif isinstance(c, Exception): vals[a] = str(c) self.status['Exception'].append(a) else: ev = fn.tango.check_attribute_events(a) if not ev: vals[a] = 'NoEvents' self.status['NoEvents'].append(a) else: d = self.get_attribute_archiver(a) e = self.get_archiver_errors(d) if a in e: vals[a] = e[a] self.status['ArchiverError'].append(a) else: rv = fn.read_attribute(a) if v and str(rv) == str(v[1]): vals[a] = 'NotChanged' self.status['NotChanged'].append(a) else: self.status['NotUpdated'].append(a) if regexp == '*': for k, v in self.status.items(): print('%s: %s' % (k, len(v))) return vals else: # Should inspect the Subscribers Error Lists vals = dict() for d in self.get_archivers(): err = self.get_archiver_errors(d) for a, e in err.items(): if fn.clmatch(regexp, a): vals[a] = e return vals