Пример #1
0
def export_attributes_to_pck(filein='ui_exported_devices.txt',
                             fileout='ui_attribute_values.pck'):
    print('export_attributes:' + str((filein, fileout)))
    if fandango.isSequence(filein):
        devs = filein
    else:
        devs = map(str.strip, open(filein).readlines())
    proxies = dict((d, PyTango.DeviceProxy(d)) for d in devs)
    devs = defaultdict(Struct)

    for d, dp in sorted(proxies.items()):
        print('%s (%d/%d)' % (d, 1 + len(devs), len(proxies)))
        obj = devs[d]
        obj.dev_class, obj.attrs, obj.comms = '', defaultdict(Struct), {}
        obj.props = dict(
            (k, v if not 'vector' in str(type(v)).lower() else list(v))
            for k, v in fandango.tango.get_matching_device_properties(
                d, '*').items() if 'dynamicattributes' not in k.lower())
        if fandango.check_device(d):
            devs[d].name = d
            devs[d].dev_class = dp.info().dev_class
            for c in dp.command_list_query():
                if c.cmd_name.lower() not in ('state', 'status', 'init'):
                    obj.comms[c.cmd_name] = (str(c.in_type), str(c.out_type))
            for a in dp.get_attribute_list():
                if a.lower() == 'status':
                    continue
                obj.attrs[a] = fandango.tango.export_attribute_to_dict(
                    d, a, as_struct=True)

    pickle.dump(devs, open(fileout, 'w'))
    return (fileout)
Пример #2
0
def tango2table(filters,opts=[]):
    """
    New method to generate .csv using export_device_to_dict and dict2array
    
    Valid options are:
        --hosts, --text, --print, --skip-attributes
        
    Calling tango2table(filters,['--hosts','--skip-attributes']) will 
        return same output that the old tango2csv scripts
        
    """
    import fandango as fd
    import fandango.tango as ft
    devices = []
    [devices.extend(ft.find_devices(f)) for f in filters]
    output = fd.defaultdict(lambda :fd.defaultdict(dict))
    attr_info = ('display_unit','standard_unit','label','unit','min_alarm',
        'events','description','format','max_alarm','polling','alarms',)
    excluded = ('None','No standard unit','No display unit','Not specified'
                'nada',)

    for d in devices:
        v = ft.export_device_to_dict(d)
        if '--hosts' in opts:
            d = output[v['host']][v['server']]
        else:
            d = output[v['server']]
            
        if '--skip-attributes' not in opts:
            d[v['name']] = {'properties':v['properties']}
            
            da = d[v['name']]['attributes'] = fd.defaultdict(dict)
            for a,t in v['attributes'].items():
                da[a] = dict((i,t.get(i)) for i in attr_info)
                
        else:
            d[v['name']] = v['properties']
        

    table = fd.arrays.dict2array(output,
                branch_filter = (lambda x: 
                    str(x)!='extensions'),
                leave_filter = (lambda y: 
                    y and str(y).split()[0] not in excluded),
                empty='')
                
    if '--text' in opts or '--print' in opts:
        text = '\n'.join('\t'.join(map(str,r)) for r in table)
    
    if '--print' in opts: 
        print(text)

    return text if '--text' in opts else table
Пример #3
0
    def __init__(self,db_name='',host='',user='',
                 passwd='', manager='',
                 other=None, port = '3306'):
        """
        Configuration can be loaded from PyTangoArchiving.Schemas,
        an HdbConfigurationManager or another DB object.
        
        api = pta.HDBpp(db_name,host,user,passwd,manager)
        """
        assert db_name or manager, 'db_name/manager argument is required!'
        self.tango = get_database()

        if not all((db_name,host,user,passwd)):
            if other:
                print('HDBpp(): Loading from DB object')
                db_name,host,user,passwd = \
                    other.db_name,other.host,other.user,other.passwd
            elif manager:
                print('HDBpp(): Loading from manager')
                d,h,u,p = HDBpp.get_db_config(manager=manager,db_name=db_name)
                db_name = db_name or d
                host = host or h    
                user = user or u
                passwd = passwd or p                
            else:
                sch = Schemas.getSchema(db_name)
                if sch:
                    print('HDBpp(): Loading from Schemas')
                    db_name = sch.get('dbname',sch.get('db_name'))
                    host = host or sch.get('host')
                    user = user or sch.get('user')
                    passwd = passwd or sch.get('passwd')
                    port = port or sch.get('port')
                elif not manager:
                    print('HDBpp(): Searching for manager')
                    m = self.get_manager(db_name)
                    t = HDBpp.get_db_config(manager=m,db_name=db_name)
                    host,user,passwd = t[1],t[2],t[3]

        self.port = port
        self.archivers = []
        self.attributes = fn.defaultdict(fn.Struct)
        self.dedicated = {}
        self.status = fn.defaultdict(list)
        ArchivingDB.__init__(self,db_name,host,user,passwd,)
        try:
            self.get_manager()
            self.get_attributes()
        except:
            traceback.print_exc()
            print('Unable to get manager')
Пример #4
0
    def add_periodic_attributes(self, attributes, periods, wait=3.):
        """
        attributes must be a list, periods a number, list or dict
        """
        attributes = sorted(
            parse_tango_model(a, fqdn=True).fullname.lower()
            for a in attributes)
        if fn.isNumber(periods):
            periods = dict((a, periods) for a in attributes)
        elif fn.isSequence(periods):
            periods = dict(zip(attributes, periods))

        devs = fn.defaultdict(list)
        [devs[fn.tango.get_dev_name(a)].append(a) for a in attributes]
        done = []

        for dev, attrs in devs.items():
            archiver = self.get_next_periodic_archiver(attrexp=dev + '/*')
            for attribute in attrs:
                try:
                    period = periods[attribute]
                    self.info('add_periodic_attribute(%s,%s,%s)' %
                              (attribute, period, archiver))
                    self.add_periodic_attribute(attribute,
                                                period=period,
                                                archiver=archiver,
                                                wait=wait)
                    done.append((attribute, period, archiver))
                except:
                    self.warning(fn.except2str())

        return done
Пример #5
0
    def __init__(self,
                 db_name='',
                 host='',
                 user='',
                 passwd='',
                 manager='',
                 other=None,
                 port='3306'):
        """
        Configuration can be loaded from PyTangoArchiving.Schemas,
        an HdbConfigurationManager or another DB object.
        """
        assert db_name or manager, 'db_name/manager argument is required!'
        self.tango = get_database()

        if not all((db_name, host, user, passwd)):
            if other:
                print('HDBpp(): Loading from DB object')
                db_name,host,user,passwd = \
                    other.db_name,other.host,other.user,other.passwd
            elif manager:
                print('HDBpp(): Loading from manager')
                d, h, u, p = self.get_db_config(manager=manager,
                                                db_name=db_name)
                db_name = db_name or d
                host = host or h
                user = user or u
                passwd = passwd or p
            else:
                sch = Schemas.getSchema(db_name)
                if sch:
                    print('HDBpp(): Loading from Schemas')
                    db_name = sch.get('dbname', sch.get('db_name'))
                    host = host or sch.get('host')
                    user = user or sch.get('user')
                    passwd = passwd or sch.get('passwd')
                    port = port or sch.get('port')
                elif not manager:
                    print('HDBpp(): Searching for manager')
                    m = self.get_manager(db_name)
                    t = self.get_db_config(manager=m, db_name=db_name)
                    host, user, passwd = t[1], t[2], t[3]

        self.port = port
        self.archivers = []
        self.attributes = fn.defaultdict(fn.Struct)
        self.dedicated = {}
        ArchivingDB.__init__(
            self,
            db_name,
            host,
            user,
            passwd,
        )
        try:
            self.get_manager()
            self.get_attributes()
        except:
            traceback.print_exc()
            print('Unable to get manager')
Пример #6
0
def get_hdbpp_for_attributes(attrlist):
    filters = get_hdbpp_filters()
    r = fn.defaultdict(list)
    for a in attrlist:
        for d, f in filters.items():
            if fn.clmatch(f, a, extend=True):
                r[d].append(a)
    return r
Пример #7
0
def export_attributes_to_pck(filein='ui_exported_devices.txt',
                             fileout='ui_attribute_values.pck'):

    print('export_attributes from:' + str((filein, fileout)))
    assert fileout.endswith('.pck'), 'output must be a pickle file!'

    all_devs = fd.tango.get_all_devices()
    filein = fd.toList(filein)
    if all(d.lower() in all_devs for d in filein):
        devs = filein
    else:
        devs = export_devices_from_sources(*filein, check=True)

    print('devices to export: %s' % str(devs))

    proxies = dict((d, PyTango.DeviceProxy(d)) for d in devs)
    devs = defaultdict(Struct)

    for d, dp in sorted(proxies.items()):
        print('%s (%d/%d)' % (d, 1 + len(devs), len(proxies)))
        obj = devs[d]
        obj.dev_class, obj.attrs, obj.comms = '', defaultdict(Struct), {}
        obj.props = dict(
            (k, v if not 'vector' in str(type(v)).lower() else list(v))
            for k, v in fd.tango.get_matching_device_properties(
                d, '*').items() if 'dynamicattributes' not in k.lower())
        if fd.check_device(d):
            devs[d].name = d
            devs[d].dev_class = dp.info().dev_class
            for c in dp.command_list_query():
                if c.cmd_name.lower() not in ('state', 'status', 'init'):
                    obj.comms[c.cmd_name] = (str(c.in_type), str(c.out_type))
            for a in dp.get_attribute_list():
                if a.lower() == 'status':
                    continue
                obj.attrs[a] = fd.tango.export_attribute_to_dict(
                    d, a, as_struct=True)

    pickle.dump(devs, open(fileout, 'w'))
    return (fileout)
Пример #8
0
    def __init__(self, mask=None):
        #mask = mask or 'FolderDS/*'
        self.mask = mask
        ProxiesDict.__init__(self)
        devs = tango.get_class_devices('FolderDS')
        extra = fn.get_database().get_class_property('FolderDS',
                                                     ['ExtraDevices'])
        devs.extend(extra.get('ExtraDevices', []))
        if mask: devs = fn.filtersmart(devs, mask)
        self.hosts = fn.defaultdict(list)

        for d in devs:
            self[d]
Пример #9
0
def main(args):

    import sys, re, traceback, taurus
    assert len(
        args
    ) > 1, '\n\nUsage:\n\t> python panel.py [a/device/name or synoptic.jdw] [--attrs] [attr_regexps] --comms [comm_regexps]'

    model = args[1]
    taurus.setLogLevel(taurus.core.util.Logger.Debug)
    app = Qt.QApplication(args)
    form = None

    if re.match('[\w-]+/[\w-]+/[\w-]+.*', model):

        print 'loading a device panel'
        k, filters = '--attrs', fandango.defaultdict(list)
        for f in args[2:]:
            if f.startswith('--'): k = f.strip('-')
            else: filters[k].append(f)  #(f,()) if k=='comms' else f)

        form = VaccaPanel(filters=filters)  #palette=get_fullWhite_palette()
        form.setModel(model)

    elif model.lower().endswith('.jdw'):
        print 'loading a synoptic'
        form = taurus.qt.qtgui.graphic.TauJDrawSynopticsView(
            designMode=False,
            updateMode=taurus.qt.qtgui.graphic.TauJDrawSynopticsView.
            NoViewportUpdate)
        #FullViewportUpdate, : AWFUL CPU USAGE!!!!!!!!
        #MinimalViewportUpdate, : Qt Defaults
        #SmartViewportUpdate, : ?
        #BoundingRectViewportUpdate, : ?
        #NoViewportUpdate : Tau defaults
        form.setModel(model)
        models = form.get_item_list()
        for m in models:
            m = str(m)
            if m.count('/') == 2: m += '/state'
            period = 120000.
            try:
                taurus.Attribute(m).changePollingPeriod(period)
            except:
                print '(%s).changePollingPeriod(%s): Failed: %s' % (
                    m, period, traceback.format_exc())

    print 'showing ...'
    form.show()
    sys.exit(app.exec_())
Пример #10
0
def restart_attributes_archivers(schema,attributes,action=False):
    import PyTangoArchiving
    api = PyTangoArchiving.api(schema)
    devs = fandango.defaultdict(list)
    [devs[api[a].archiver].append(a) for a in attributes]
    if not action:
      print('%d archivers to restart, call with action=True to execute it'%len(devs))
    else:
      print('Restarting %d archivers'%len(devs))
      astor = fandango.Astor()
      astor.load_from_devs_list(devs.keys())
      astor.stop_servers()
      time.sleep(10.)
      astor.start_servers()
    return dict((k,len(v)) for k,v in devs.items())
Пример #11
0
def restart_attributes_archivers(schema,attributes,action=False):
    import PyTangoArchiving
    api = PyTangoArchiving.api(schema)
    devs = fn.defaultdict(list)
    [devs[api[a].archiver].append(a) for a in attributes]
    if not action:
      print('%d archivers to restart, call with action=True to execute it'%len(devs))
    else:
      print('Restarting %d archivers'%len(devs))
      astor = fn.Astor()
      astor.load_from_devs_list(devs.keys())
      astor.stop_servers()
      time.sleep(10.)
      astor.start_servers()
    return dict((k,len(v)) for k,v in devs.items())
Пример #12
0
def merge_csv_attrs(exported = True, currents = True, check_dups = True):
    """
    OJU! Correctors are not exported but should be archived anyway!
    """
    folder = fn.tango.get_free_property('PyTangoArchiving','CSVFolder')
    csvs = [f for f in fn.listdir(folder) if f.endswith('csv')]
    print('Parsing %d files from %s' % (len(csvs),folder))
    archattrs = fn.defaultdict(dict)

    alldevs = fn.tango.get_all_devices(exported = exported)
    
    sources = dict()
    for f in csvs:
        try:
            sources[f] = pta.ParseCSV(folder+f)
        except Exception,e:
            print('%s failed: %s\n'%(f,e))
def get_panic_status(*args):
    if args and isinstance(args[0], panic.AlarmAPI):
        api = args[0]
    else:
        api = panic.api(*args)

    txt = ['Panic(%s) Status:' % str(api.filters)]
    txt.append('\t%d alarms in db' % len(api))
    txt.append('\t%d devices in db' % len(api.devices))
    txt.append('')

    states = defaultdict(list)
    [states[check_device(d)].append(d) for d in api.devices]
    for s, v in sorted(states.items()):
        txt.append('%d devices in %s state' % (len(v), s))
        ds = ['%s (%d)' % (d, len(api.devices[d].alarms)) for d in sorted(v)]
        txt.append('\t%s' % ', '.join(ds))

    return '\n'.join(txt)
Пример #14
0
    def get_archivers_attributes(self, archs=None, from_db=True, full=False):
        archs = archs or self.get_archivers()
        dedicated = fn.defaultdict(list)
        if from_db:
            for a in archs:
                dedicated[a] = [
                    str(l) for l in get_device_property(a, 'AttributeList')
                ]
                if not full:
                    dedicated[a] = [str(l).split(';')[0] for l in dedicated[a]]
        else:
            for a in archs:
                try:
                    dedicated[a].extend(get_device(a).AttributeList)
                except:
                    dedicated[a] = []

        self.dedicated.update(dedicated)
        return dedicated
Пример #15
0
 def get_archivers_attributes(self,archs=None,from_db=True,full=False):
     """
     If not got from_db, the manager may limit the list available
     """        
     archs = archs or self.get_archivers()
     dedicated = fn.defaultdict(list)
     if from_db:
         for a in archs:
             dedicated[a] = [str(l) for l in 
                 get_device_property(a,'AttributeList')]
             if not full:
                 dedicated[a] = [str(l).split(';')[0] for l in 
                     dedicated[a]]
     else:
         for a in archs:
             try:
                 dedicated[a].extend(get_device(a, keep=True).AttributeList)
             except:
                 dedicated[a] = []
                 
     self.dedicated.update(dedicated)
     return dedicated    
Пример #16
0
    def add_attributes(self, attributes, *args, **kwargs):
        """
        Call add_attribute sequentially with a 1s pause between calls
        :param start: True by default, will force Start() in related archivers
        See add_attribute? for more help on arguments
        """
        try:
            attributes = sorted(attributes)
            start = kwargs.get('start', True)
            devs = fn.defaultdict(list)
            [devs[fn.tango.get_dev_name(a)].append(a) for a in attributes]
            for dev, attrs in devs.items():
                arch = self.get_next_archiver(attrexp=dev + '/*')
                for a in attrs:
                    kwargs['start'] = False  #Avoid recursive start
                    try:
                        kwargs['clear'] = False
                        self.add_attribute(a, archiver=arch, *args, **kwargs)
                    except:
                        self.warning('add_attribute(%s) failed!\n%s' %
                                     (a, traceback.format_exc()))
                    time.sleep(3.)

            self.clear_caches()

            if start:
                self.get_archivers_attributes()
                archs = set(map(self.get_attribute_archiver, attributes))
                for h in archs:
                    try:
                        if h:
                            self.info('%s.Start()' % h)
                            fn.get_device(h, keep=True).Start()
                    except:
                        traceback.print_exc()

        except Exception, e:
            self.error('add_attributes(%s) failed!: %s' %
                       (attributes, traceback.print_exc()))
Пример #17
0
    def get_attributes_errors(self,
                              regexp='*',
                              timeout=3 * 3600,
                              from_db=False,
                              extend=False):
        """
        Returns a dictionary {attribute, error/last value}
        
        If from_db=True and extend=True, it performs a full attribute check
        """
        if regexp == '*':
            self.status = fn.defaultdict(list)
        if from_db or extend:
            timeout = fn.now() - timeout
            attrs = self.get_attributes(True)
            attrs = fn.filtersmart(attrs, regexp)
            print('get_attributes_errors([%d/%d])' %
                  (len(attrs), len(self.attributes)))
            vals = self.load_last_values(attrs)
            for a, v in vals.items():
                if v and v[0] > timeout:
                    self.status['Updated'].append(a)
                    if v[1] is not None:
                        self.status['Readable'].append(a)
                    else:
                        rv = fn.read_attribute(a)
                        if rv is not None:
                            self.status['WrongNone'].append(a)
                        else:
                            self.status['None'].append(a)
                    vals.pop(a)

            if not extend:
                self.status['NotUpdated'] = vals.keys()
            else:
                for a, v in vals.items():
                    c = fn.check_attribute(a)
                    if c is None:
                        vals[a] = 'Unreadable'
                        self.status['Unreadable'].append(a)
                    elif isinstance(c, Exception):
                        vals[a] = str(c)
                        self.status['Exception'].append(a)
                    else:
                        ev = fn.tango.check_attribute_events(a)
                        if not ev:
                            vals[a] = 'NoEvents'
                            self.status['NoEvents'].append(a)
                        else:
                            d = self.get_attribute_archiver(a)
                            e = self.get_archiver_errors(d)
                            if a in e:
                                vals[a] = e[a]
                                self.status['ArchiverError'].append(a)
                            else:
                                rv = fn.read_attribute(a)
                                if v and str(rv) == str(v[1]):
                                    vals[a] = 'NotChanged'
                                    self.status['NotChanged'].append(a)
                                else:
                                    self.status['NotUpdated'].append(a)

            if regexp == '*':
                for k, v in self.status.items():
                    print('%s: %s' % (k, len(v)))

            return vals
        else:
            # Should inspect the Subscribers Error Lists
            vals = dict()
            for d in self.get_archivers():
                err = self.get_archiver_errors(d)
                for a, e in err.items():
                    if fn.clmatch(regexp, a):
                        vals[a] = e
            return vals
Пример #18
0
def start_attributes_for_archivers(target,attr_regexp='',event_conf={},
            load=False, by_class=False, min_polling = 100, overwrite = False, 
            check = True):
    """
    Target may be an attribute list or a device regular expression
    if by_class = True, config will be loaded from Tango class properties
    """
    import PyTangoArchiving.hdbpp as ptah
    
    if fn.isSequence(target):
        if attr_regexp:
            attrs = [a for a in target if fn.clmatch(attr_regexp,a.rsplit('/')[-1])]
        else:
            attrs = target

    else:
        dev_regexp = target
        attrs = fn.find_attributes(dev_regexp+'/'+(attr_regexp or '*'))

    if by_class:
        classes = fn.defaultdict(dict)
        devs = fn.defaultdict(list)
        [devs[a.rsplit('/',1)[0]].append(a) for a in attrs]
        
        for d,v in devs.items():
            classes[fn.tango.get_device_class(d)][d] = v
            
        attrs = {}
        for c,devs in classes.items():
            cfg = get_class_archiving(devs.keys()[0])
            for d in devs:
                raw = devs[d]
                for a,v in cfg.items():
                    for aa in raw:
                        if fn.clmatch(a,aa.split('/')[-1],terminate=True):
                            if not attr_regexp or fn.clmatch(attr_regexp,aa):
                                attrs[aa] = v

    elif event_conf:
        attrs = dict((a,event_conf) for a in attrs)
        
    else:
        attrs = dict((a,get_current_conf(a)) for a in attrs)
        
    print('Starting %d attributes' % (len(attrs)))

    archs = ptah.multi.match_attributes_and_archivers(attrs.keys())
    rd = PyTangoArchiving.Reader()
    #print(archs)
    alldbs = ptah.multi.get_hdbpp_databases()
    dbs = ptah.multi.get_hdbpp_databases(archs,alldbs)
    #return dbs,archs,attrs

    for db,rcs in dbs.items():
        api = PyTangoArchiving.Schemas.getApi(db)
        dbs[db] = dict.fromkeys(rcs)
        for d in rcs:
            dbs[db][d] = ts = dict.fromkeys(archs[d])
            #return ts
            for a in ts:
                try:
                    m = fn.parse_tango_model(a,fqdn=True)
                    dbs[db][d][a] = mode = attrs[a]
                    if not overwrite and db in rd.is_attribute_archived(a):
                        print('%s already archived in %s' % (a,db))
                        continue
                    events = ft.check_attribute_events(a,ft.EventType.ARCHIVE_EVENT)
                    ep = events.get(ft.EventType.ARCHIVE_EVENT,False)
                    if ep is True:
                        if 'polling' in mode: 
                            mode.pop('polling')
                    elif isinstance(events.get(ep,(int,float))):
                        mode['polling'] = min((ep,mode.get('polling',10000)))
                        mode['polling'] = max((mode['polling'],min_polling))
                        
                    if not events.get(ft.EventType.CHANGE_EVENT,False):
                        if mode.get('archive_abs_change',0):
                            mode['abs_event'] = mode['archive_abs_change']
                        if mode.get('archive_rel_change',0):
                            mode['rel_event'] = mode['archive_rel_change']    
                        if mode.get('arch_per_event',0):
                            mode['per_event'] = mode['archive_per_event']                               
                        
                    print('%s.start_archiving(%s,%s,%s): %s' % (db,d,m.fullname,mode,load))
                    if load:
                        fn.tango.set_attribute_events(a,**mode)
                        r = api.start_archiving(m.fullname,d,code_event=True)
                        assert not check or r
                except:
                    print('%s failed!'%a)
                    traceback.print_exc()

    return dbs
Пример #19
0
def main(args):

    import sys, re, traceback, taurus
    print('args: %s' % str(args))
    assert len(args)>1, '\n\nUsage:\n\t'\
        '> python panel.py [a/device/name or synoptic.jdw] [--attrs] '\
            '[attr_regexps] --comms [comm_regexps]'

    model = args[1]
    taurus.setLogLevel(taurus.core.util.Logger.Debug)
    app = Qt.QApplication(args)
    form = None

    VACCA_CONFIG = os.getenv('VACCA_CONFIG')
    if not VACCA_CONFIG:
        VACCA_CONFIG = fandango.objects.find_module('vaccagui')
    print('VACCA_CONFIG: %s' % VACCA_CONFIG)
    if VACCA_CONFIG:
        import vacca.utils as vu
        PROPS = vu.get_config_properties(VACCA_CONFIG)
        VACCA_DIR = WDIR = PROPS.get('VACCA_DIR', vu.VACCA_DIR)
        try:
            import default
        except:
            try:
                default = get_config_file(
                    imp.find_module('vacca')[1] + '/default.py')
            except:
                traceback.print_exc()
        CONFIG = vu.get_config_file()

    if re.match(fandango.tango.retango, model):

        print 'loading a device panel'
        k, filters = '--attrs', fandango.defaultdict(list)
        for f in args[2:]:
            if f.startswith('--'): k = f.strip('-')
            else: filters[k].append(f)  #(f,()) if k=='comms' else f)

        form = VaccaPanel(filters=filters)  #palette=get_fullWhite_palette()
        form.setModel(model)

    elif model.lower().endswith('.jdw'):
        print 'loading a synoptic'
        import taurus.qt.qtgui.graphic as tqqg
        form = tqqg.TauJDrawSynopticsView(
            designMode=False,
            updateMode=tqqg.TauJDrawSynopticsView.NoViewportUpdate)
        #FullViewportUpdate, : AWFUL CPU USAGE!!!!!!!!
        #MinimalViewportUpdate, : Qt Defaults
        #SmartViewportUpdate, : ?
        #BoundingRectViewportUpdate, : ?
        #NoViewportUpdate : Tau defaults
        form.setModel(model)
        models = form.get_item_list()
        for m in models:
            m = str(m)
            if m.count('/') == 2: m += '/state'
            period = 120000.
            try:
                taurus.Attribute(m).changePollingPeriod(period)
            except:
                print '(%s).changePollingPeriod(%s): Failed: %s' % (
                    m, period, traceback.format_exc())

    else:
        print('Unknown model: %s' % model)

    print 'showing ...'
    form.show()
    sys.exit(app.exec_())
Пример #20
0
def check_archiving_schema(
        schema='hdb',
        attributes=[],values={},
        ti = None,
        period = 7200,
        old_period=24*3600*90,\
        exclude=['*/waveid','*/wavename','*/elotech-*'],
        use_index = True,
        loads = True,
        action=False,
        trace=True,
        export=None):

    ti = fn.now() if ti is None else str2time(ti) if isString(ti) else ti

    api = pta.api(schema)
    is_hpp = isinstance(api, pta.HDBpp)
    check = dict()
    old_period = 24*3600*old_period if old_period < 1000 \
        else (24*old_period if old_period<3600 else old_period)

    allattrs = api.get_attributes() if hasattr(
        api, 'get_attributes') else api.keys()
    print('%s contains %d attributes' % (schema, len(allattrs)))

    if attributes:
        if fn.isString(attributes) and fn.isRegexp(attributes):
            tattrs = [a for a in allattrs if clsearch(attributes, a)]
        else:
            attributes = map(fn.tango.get_normal_name, fn.toList(attributes))
            tattrs = [
                a for a in allattrs if fn.tango.get_normal_name(a) in allattrs
            ]

    else:
        tattrs = allattrs

    excluded = [a for a in tattrs if any(fn.clmatch(e, a) for e in exclude)]
    tattrs = [a for a in tattrs if a not in excluded]

    print('%d attributes to check' % len(tattrs))
    if not len(tattrs):
        return

    if excluded:
        print('\t%d attributes excluded' % len(excluded))

    archived = {}
    for a in tattrs:
        if hasattr(api, 'get_attribute_archiver'):
            arch = api.get_attribute_archiver(a)
        else:
            arch = api[a].archiver
        if arch:
            archived[a] = arch

    print('\t%d attributes are archived' % len(archived))

    #Getting Tango devices currently not running
    alldevs = set(t.rsplit('/', 1)[0] for t in tattrs)
    #tdevs = filter(fn.check_device,alldevs)
    #nodevs = [fn.tango.get_normal_name(d) for d in alldevs if d not in tdevs]
    #if nodevs:
    #print('\t%d devices are not running' % len(nodevs))

    archs = sorted(set(archived.values()))
    if loads:
        astor = fn.Astor()
        astor.load_from_devs_list(archs)
        loads = fn.defaultdict(list)
        for k, s in astor.items():
            for d in s.get_device_list():
                d = fn.tango.get_normal_name(d)
                for a in archived:
                    if fn.tango.get_normal_name(archived[a]) == d:
                        loads[k].append(a)
        for k, s in sorted(loads.items()):
            print('\t%s archives %d attributes' % (k, len(s)))

    noarchs = [
        fn.tango.get_normal_name(d) for d in archs if not fn.check_device(d)
    ]
    if noarchs:
        print('\t%d archivers are not running: %s' % (len(noarchs), noarchs))

    ###########################################################################

    if isString(values) and values.endswith('.pck'):
        print('\nLoading last values from %s file\n' % values)
        import pickle
        values = pickle.load(open(values))

    elif isString(values) and values.endswith('.json'):
        print('\nLoading last values from %s file\n' % values)
        values = fn.json2dict(values)

    elif not use_index or is_hpp:
        print('\nGetting last values ...\n')
        for a in tattrs:
            values[a] = api.load_last_values(a)

    else:
        print('\nGetting updated tables from database ...\n')
        tups = pta.utils.get_table_updates(schema)
        # Some tables do not update MySQL index tables
        t0 = [a for a in tarch if a in tattrs and not tups[api[a].table]]
        check.update((t, check_attribute(a, readable=True)) for t in t0
                     if not check.get(t))
        t0 = [t for t in t0 if check[t]]
        print('%d/%d archived attributes have indexes not updated ...' %
              (len(t0), len(tarch)))
        if t0 and len(t0) < 100:
            vs = api.load_last_values(t0)
            tups.update((api[t].table, api[t].last_date) for t in t0)

        for a in tattrs:
            if a in tups:
                values[a] = [tups[api[a].table], 0]

    for k, v in values.items():
        if (len(v) if isSequence(v) else v):
            if isinstance(v, dict):
                v = v.values()[0]
            if isSequence(v) and len(v) == 1:
                v = v[0]
            if v and not isNumber(v[0]):
                v = [date2time(v[0]), v[1]]
            values[k] = v
        else:
            values[k] = [] if isSequence(v) else None

    print('%d values obtained' % len(values))

    ###########################################################################

    now = fn.now()
    result = fn.Struct()
    times = [t[0] for t in values.values() if t]
    futures = [t for t in times if t > now]
    times = [t for t in times if t < now]
    tmiss = []
    tfutures = [k for k, v in values.items() if v and v[0] in futures]
    tmin, tmax = min(times), max(times)
    print('\toldest update was %s' % time2str(tmin))
    print('\tnewest update was %s' % time2str(tmax))
    if futures:
        print('\t%d attributes have values in the future!' % len(futures))

    tnovals = [a for a in archived if not values.get(a, None)]
    if tnovals:
        print('\t%d archived attributes have no values' % len(tnovals))
    try:
        tmiss = [
            a for a, v in values.items()
            if v and old_period < v[0] < ti - period and a not in archived
        ]
    except:
        print(values.items()[0])
    if tmiss:
        print('\t%d/%d attrs with values are not archived anymore' %
              (len(tmiss), len(tattrs)))

    result.Excluded = excluded
    result.Schema = schema
    result.All = tattrs
    result.Archived = values

    result.NoValues = tnovals
    result.MissingOrRemoved = tmiss

    result.TMin = tmin
    result.TMax = tmax
    result.Futures = tfutures

    tup = sorted(a for a in values if values[a] and values[a][0] > ti - period)
    tok = [a for a in tup if values[a][1] not in (None, [])]
    print('\n%d/%d archived attributes are updated since %s - %s' %
          (len(tup), len(archived), ti, period))
    print('%d archived attributes are fully ok\n' % (len(tok)))

    tnotup = sorted(a for a in values
                    if values[a] and values[a][0] < ti - period)
    print('\t%d archived attrs are not updated' % len(tnotup))
    tupnoread = [
        a for a in tup if not values[a][1] and fn.read_attribute(a) is None
    ]

    reads = dict((a, fn.read_attribute(a)) for a in tnotup)
    tnotupread = [a for a in tnotup if reads[a] is not None]
    print('\t%d not updated attrs are readable (Lost)' % len(tnotupread))
    print('\t%d of them are not floats' %
          len([t for t in tnotupread if not isinstance(reads[t], float)]))
    print('\t%d of them are states' %
          len([t for t in tnotupread if t.lower().endswith('/state')]))
    print('\t%d of them seem motors' %
          len([t for t in tnotupread if t.lower().endswith('/position')]))

    tnotupevs = [a for a in tnotupread if fn.tango.check_attribute_events(a)]
    print('\t%d not updated attrs are readable and have events (LostEvents)' %
          len(tnotupevs))

    tnotupnotread = [a for a in tnotup if a not in tnotupread]
    print('\t%d not updated attrs are not readable' % len(tnotupnotread))

    result.Lost = tnotupread
    result.LostEvents = tnotupevs

    losts = (tnotupevs if is_hpp else tnotupread)

    diffs = dict()
    for a in losts:
        try:
            v, vv = values.get(a, (None, ))[1], reads[a]
            if fn.isSequence(v): v = fn.toList(v)
            if fn.isSequence(vv): vv = fn.toList(vv)
            diffs[a] = v != vv
            if fn.isSequence(diffs[a]):
                diffs[a] = any(diffs[a])
            else:
                diffs[a] = bool(diffs[a])
        except:
            diffs[a] = None

    fams = fn.defaultdict(list)
    for a in tnotupread:
        fams['/'.join(a.split('/')[-4:-2])].append(a)
    for f in sorted(fams):
        print('\t%s: %d attrs not updated' % (f, len(fams[f])))

    print()

    differ = [a for a in losts if diffs[a]]  #is True]
    print('\t%d/%d not updated attrs have also wrong values!!!' %
          (len(differ), len(losts)))

    rd = pta.Reader()
    only = [a for a in tnotupread if len(rd.is_attribute_archived(a)) == 1]
    print('\t%d/%d not updated attrs are archived only in %s' %
          (len(only), len(losts), schema))
    result.LostDiff = differ
    print()

    archs = sorted(set(archived.values()))
    astor = fn.Astor()
    astor.load_from_devs_list(archs)
    badloads = fn.defaultdict(list)
    for k, s in astor.items():
        for d in s.get_device_list():
            d = fn.tango.get_normal_name(d)
            for a in losts:
                if fn.tango.get_normal_name(archived[a]) == d:
                    badloads[k].append(a)
    for k, s in badloads.items():
        if len(s):
            print('\t%s archives %d lost attributes' % (k, len(s)))

    print('\t%d updated attrs are not readable' % len(tupnoread))

    result.ArchivedAndReadable = tok
    result.Updated = tup
    result.NotUpdated = tnotup
    result.Unreadable = tnotupnotread
    #result.DeviceNotRunning = nodevs
    result.ArchiverNotRunning = noarchs

    result.LostFamilies = fams

    # Tnones is for readable attributes not being archived
    tnones = [
        a for a in archived
        if (a not in values or values[a] and values[a][1] in (None, []))
        and a not in tupnoread and a not in tnotupread
    ]
    tupnones = [a for a in tnones if a in tup]

    if tupnones:
        print('\t%d archived readable attrs record empty values' %
              len(tupnones))

    result.Nones = tnones

    if 0:

        get_ratio = lambda a, b: float(len(a)) / float(len(b))

        #result.ArchRatio = get_ratio([t for t in readarch if t not in tnotup],readarch)
        #result.ReadRatio = get_ratio(result.Readable,tattrs)
        #result.LostRatio = get_ratio([a for a in tread if a in tnotup],tread)
        #result.MissRatio = get_ratio([a for a in tread if a not in tarch],tread)
        #result.OkRatio = 1.0-result.LostRatio-result.MissRatio

        #result.Summary = '\n'.join((
        #('Checking archiving of %s attributes'%(len(attributes) if attributes else schema))
        #,('%d attributes in %s, %d are currently active'%(len(api),schema,len(tarch)))
        #,('%d devices with %d archived attributes are not running'%(len(nodevs),len([a for a in api if a.rsplit('/',1) in nodevs])))
        #,('%d archived attributes (%2.1f %%) are unreadable! (check and remove)'%(len(tnoread),1e2*get_ratio(tnoread,tarch)))
        #,('%d readable attributes are not archived'%(len(tmiss)))
        #,('%d attributes (readable or not) are updated (%2.1f %% of all readables)'%(len(tok),1e2*result.OkRatio))
        #,('-'*80)
        #,('%d archived attributes (readable or not) are not updated!'%len(tnotup))
        #,('%d archived and readable attributes are not updated! (check and restart?)'%len(treadnotup))
        #,('-'*80)
        #,('%d readable attributes have been removed in the last %d days!'%(len(removed),old_period/(24*3600)))
        #,('%d readable scalar attributes are not being archived (not needed anymore?)'%len(tmscalar))
        #,('%d readable array attributes are not being archived (Ok)'%len(tmarray))
        #,('%d readable array attributes are archived (Expensive)'%len(tarray))
        #,('')))

        #if trace: print(result.Summary)
        #print('%d readable lost,Ok = %2.1f%%, %2.1f %% over all Readables (%2.1f %% of total)'%\
        #(len(treadnotup),1e2*result.ArchRatio,1e2*result.OkRatio,1e2*result.ReadRatio))

    if action:
        if action == 'start_devices':
            print('Executing action %s' % action)
            api.start_devices()

        if action == 'restart_all':
            print('Executing action %s' % action)
            devs = api.get_archivers()
            astor = fn.Astor()
            print('Restarting %d devs:' % (len(devs), devs))
            astor.load_from_devs_list(devs)
            astor.stop_servers()
            fn.wait(10.)
            astor.start_servers()

        #print('NO ACTIONS ARE GONNA BE EXECUTED, AS THESE ARE ONLY RECOMMENDATIONS')
        #print("""
        #api = PyTangoArchiving.HDBpp(schema)
        #api.start_devices()

        #or

        #api = PyTangoArchiving.ArchivingAPI('%s')
        #lostdevs = sorted(set(api[a].archiver for a in result.NotUpdated))
        #print(lostdevs)
        #if lostdevs < a_reasonable_number:
        #astor = fn.Astor()
        #astor.load_from_devs_list(lostdevs)
        #astor.stop_servers()
        #fn.time.sleep(10.)
        #astor.start_servers()
        #"""%schema)

    print('\nfinished in %d seconds\n\n' % (fn.now() - ti))

    if export is not None:
        if export is True:
            export = 'txt'
        for x in (export.split(',') if isString(export) else export):
            if x in ('json', 'pck', 'pickle', 'txt'):
                x = '/tmp/%s.%s' % (schema, x)
            print('Saving %s file with keys:\n%s' % (x, result.keys()))
            if 'json' in x:
                fn.dict2json(result.dict(), x)
            else:
                f = open(x, 'w')
                if 'pck' in x or 'pickle' in x:
                    pickle.dump(result.dict(), f)
                else:
                    f.write(fn.dict2str(result.dict()))
                f.close()

    return result
Пример #21
0
    def get_attributes_errors(self, regexp='*', timeout=3*3600, 
                              from_db=False, extend = False):
        """
        Returns a dictionary {attribute, error/last value}
        
        If from_db=True and extend=True, it performs a full attribute check
        """
        if regexp == '*':
            self.status = fn.defaultdict(list)
        if from_db or extend:
            timeout = fn.now()-timeout
            attrs = self.get_attributes(True)
            attrs = fn.filtersmart(attrs,regexp)
            print('get_attributes_errors([%d/%d])' 
                  % (len(attrs),len(self.attributes)))
            vals = self.load_last_values(attrs)
            for a,v in vals.items():
                if v and v[0] > timeout:
                    self.status['Updated'].append(a)
                    if v[1] is not None:
                        self.status['Readable'].append(a)
                    else:
                        rv = fn.read_attribute(a)
                        if rv is not None:
                            self.status['WrongNone'].append(a)
                        else:
                            self.status['None'].append(a)
                    vals.pop(a)

            if not extend:
                self.status['NotUpdated'] = vals.keys()
            else:
                for a,v in vals.items():
                    c = fn.check_attribute(a)
                    if c is None:
                        vals[a] = 'Unreadable'
                        self.status['Unreadable'].append(a)
                    elif isinstance(c,Exception):
                        vals[a] = str(c)
                        self.status['Exception'].append(a)
                    else:
                        ev = fn.tango.check_attribute_events(a)
                        if not ev:
                            vals[a] = 'NoEvents'
                            self.status['NoEvents'].append(a)
                        else:
                            d = self.get_attribute_archiver(a)
                            e = self.get_archiver_errors(d)
                            if a in e:
                                vals[a] = e[a]
                                self.status['ArchiverError'].append(a)
                            else:
                                rv = fn.read_attribute(a)
                                if v and str(rv) == str(v[1]):
                                    vals[a] = 'NotChanged'
                                    self.status['NotChanged'].append(a)
                                else:
                                    self.status['NotUpdated'].append(a)
                                
            if regexp == '*':
                for k,v in self.status.items():
                    print('%s: %s' % (k,len(v)))
            
            return vals
        else:
            # Should inspect the Subscribers Error Lists
            vals = dict()
            for d in self.get_archivers():
                err = self.get_archiver_errors(d)
                for a,e in err.items():
                    if fn.clmatch(regexp,a):
                        vals[a] = e
            return vals    
Пример #22
0
def decimate(db_name, keys, tstart, tend, period=10, dry=False):
    """
    time arguments are strings
    BUT!, this method seems to not work anymore to free space in TDB
    Maybe a mysqld restart is needed, I don't know; 
    but up to now space is not freed
    """
    api = pta.api(db_name)

    if '/' in keys[0]:
        print('Decimating by attribute names')
        tables = fn.defaultdict(list)
        for a in keys:
            api.get_attr_id_type_table(a)
            tables[api[a].table].append(a)

        print('tables: %s' % (tables.keys()))
        for table, attrs in tables.items():
            for a in attrs:
                pta.dbs.decimate_db_table_by_time(api,
                                                  table,
                                                  api[a].id,
                                                  tstart,
                                                  tend,
                                                  period,
                                                  optimize=(a == attrs[-1]))

    if not '/' in keys[0]:
        print('Decimating by data_type')

        data_types = keys
        if not data_types:
            data_types = [
                r[0]
                for r in api.Query('select data_type from att_conf_data_type')
            ]
        else:
            data_types = [d.replace('att_', '') for d in data_types]

        print('Decimating %s types between %s and %s: %s' %
              (db_name, tstart, tend, data_types))

        for data_type in data_types:

            attrs = api.Query(
                'select att_conf_id from att_conf,att_conf_data_type '
                'where att_conf.att_conf_data_type_id = att_conf_data_type.att_conf_data_type_id '
                'and data_type = "%s"' % data_type)
            attrs = [r[0] for r in attrs]

            q = ("select partition_name,table_name"
                 " from information_schema.partitions where"
                 " partition_name is not NULL"
                 " and table_schema = '%s'" % db_name +
                 " and table_name like '%" + data_type + "'")
            print(q)
            partitions = api.Query(q)
            if partitions:
                table = partitions[0][1]
            else:
                table = 'att_' + data_type
            print('%s has %d attributes in %d partitions' %
                  (table, len(attrs), len(partitions)))
            c0 = api.Query('select count(*) from %s ' % table)

            import re
            intervals = []

            for p in partitions:
                p = p[0]
                r = '(?P<year>[0-9][0-9][0-9][0-9])(?P<month>[0-9][0-9])'
                md = re.search(r, p).groupdict()
                t0 = '%s-%s-01 00:00:00' % (md['year'], md['month'])
                m, y = int(md['month']), int(md['year'])

                if m == 12:
                    m, y = 1, y + 1
                else:
                    m += 1

                t1 = '%04d-%02d-01 00:00:00' % (y, m)
                if fn.str2time(t0)<fn.str2time(tend) and \
                fn.str2time(t1)>fn.str2time(tstart):
                    intervals.append((t0, t1, p))

            if not partitions:
                ts, te = fn.str2time(tstart), fn.str2time(tend)
                tinc = (te - ts) / 10.
                for i in range(1, 11):
                    intervals.append((fn.time2str(ts + (i - 1) * tinc),
                                      fn.time2str(ts + i * tinc), None))

            print('%d intervals in %s' % (len(intervals), table))

            for t0, t1, p in intervals:

                print((t0, t1))
                if dry: continue
                for a in attrs:
                    c0 = api.getTableSize(table)
                    pta.dbs.decimate_db_table(
                        db=api,
                        table=table,
                        start=fn.str2time(t0),
                        end=fn.str2time(t1),
                        period=600 if 'string' in table else 300,
                        condition=' att_conf_id = %s ' % a,
                        iteration=2000,
                        cols=['data_time', 'value_r'],
                        us=True,
                        repeated=True)

                if p:
                    api.Query('alter table %s optimize partition %s' %
                              (table, p))

            if not dry:
                q = 'repair table %s;' % table
                print('\n' + q)
                api.Query(q)
                c1 = api.getTableSize(table)
                print('\n\n%s size reduced from %s to %s' % (table, c0, c1))

        print('ellapsed %d seconds' % (time.time() - tt0))
Пример #23
0
def check_db_schema(schema, attributes = None, values = None,
                    tref = -12*3600, n = 1, filters = '*', export = 'json',
                    restart = False, subscribe = False):
    """
    tref is the time that is considered updated (e.g. now()-86400)
    n is used to consider multiple values
    
    attrs: all attributes in db
    on: archived
    off: in db but not currently archived
    ok: updated   
    
    known error causes (attrs not lost but not updated):
    
    nok: attributes are not currently readable
    noevs: attributes not sending events
    novals: attributes never recorded a value
    stall: not updated, but current value matches archiving
    lost: not updated, and values doesn't match with current
    """
    
    t0 = fn.now()
    if hasattr(schema,'schema'):
        api,schema = schema,api.schema
    else:
        api = pta.api(schema)

    r = fn.Struct(api=api,schema=schema)    
    if isString(tref): 
        tref = fn.str2time(tref)
    r.tref = fn.now()+tref if tref < 0 else tref
    r.attrs = [a for a in (attributes or api.get_attributes())
                if fn.clmatch(filters,a)]
    print('check_db_schema(%s,attrs[%s],tref="%s",export as %s)' 
          % (schema,len(r.attrs),fn.time2str(r.tref),export))
    
    if restart and schema!='hdbpc':
        archs = [a for a in api.get_archivers() if not fn.check_device(a)]
        if archs:
            try:
                print('Restarting archivers: %s' % str(archs))
                astor = fn.Astor(archs)
                astor.stop_servers()
                astor.start_servers()
            except:
                traceback.print_exc()
        
        stopped = api.get_stopped_attributes()
        print('Restarting %d stopped attributes' % len(stopped))
        api.restart_attributes(stopped)
    
    r.on = [a for a in api.get_archived_attributes() if a in r.attrs]
    r.off = [a for a in r.attrs if a not in r.on]
    
    r.archs = fn.defaultdict(list)
    r.pers = fn.defaultdict(list)
    r.values = load_schema_values(api,r.on,values,n,tref=tref)
    
    if schema in ('tdb','hdb'):
        [r.archs[api[k].archiver].append(k) for k in r.on]
    else:
        r.rvals = r.values
        r.freq, r.values = {}, {}
        for k,v in r.rvals.items():
            try:
                if n > 1:
                    v = v[0] if isSequence(v) and len(v) else v
                    r.values[k] = v[0] if isSequence(v) and len(v) else v
                    r.freq[k] = v and float(len(v))/abs(v[0][0]-v[-1][0])
                else:
                    r.values[k] = v
            except Exception as e:
                print(k,v)
                print(fn.except2str())
                
        for k in api.get_archivers():
            r.archs[k] = api.get_archiver_attributes(k)
        for k in api.get_periodic_archivers():
            r.pers[k] = api.get_periodic_archivers_attributes(k)

    # Get all updated attributes
    r.ok = [a for a,v in r.values.items() if v and v[0] > r.tref]
    # Try to read not-updated attributes
    r.check = dict((a,fn.check_attribute(a)
                    ) for a in r.on if a not in r.ok)
    #r.novals = [a for a,v in r.values.items() if not v]
    r.nok, r.stall, r.noevs, r.lost, r.novals, r.evs, r.rem = [],[],[],[],[],{},[]
    # Method to compare numpy values
    
    for a,v in r.check.items():
        state = check_archived_attribute(a, v, default=CheckState.LOST, 
            cache=r, tref=r.tref, 
            check_events = subscribe and not api.is_periodic_archived(a))
        {
            #CheckState.ON : r.on,
            #CheckState.OFF : r.off,
            CheckState.OK : r.ok, #Shouldn't be any ok in check list               
            CheckState.NO_READ : r.nok,
            CheckState.STALL : r.stall,
            CheckState.NO_EVENTS : r.noevs,
            CheckState.LOST : r.lost,
            CheckState.UNK : r.novals,
         }[state].append(a)
                
    # SUMMARY
    r.summary = schema +'\n'
    r.summary += ','.join(
        """on: archived
        off: not archived
        ok: updated   
        nok: not readable
        noevs: no events
        novals: no values
        stall: not changing
        lost: not updated
        """.split('\n'))+'\n'
    
    getline = lambda k,v,l: '\t%s:\t:%d\t(%s)' % (k,len(v),l)
    
    r.summary += '\n\t%s:\t:%d\tok+stall: %2.1f %%' % (
        'attrs',len(r.attrs),
        (100.*(len(r.ok)+len(r.stall))/(len(r.on) or 1e12)))
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'on','off',len(r.on),len(r.off))
    #if r.off > 20: r.summary+=' !!!'
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'ok','nok',len(r.ok),len(r.nok))
    if len(r.nok) > 10: 
        r.summary+=' !!!'
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'noevs','novals',len(r.noevs),len(r.novals))
    if len(r.novals) > 1: 
        r.summary+=' !!!'
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'lost','stall',len(r.lost),len(r.stall))
    if len(r.lost) > 1: 
        r.summary+=' !!!'
    r.summary += '\n'
        
    r.archivers = dict.fromkeys(api.get_archivers())
    for d in sorted(r.archivers):
        r.archivers[d] = api.get_archiver_attributes(d)
        novals = [a for a in r.archivers[d] if a in r.novals]   
        lost = [a for a in r.archivers[d] if a in r.lost]
        if (len(novals)+len(lost)) > 2:
            r.summary += ('\n%s (all/novals/lost): %s/%s/%s' 
                % (d,len(r.archivers[d]),len(novals),len(lost)))
            
    if hasattr(api,'get_periodic_archivers'):
        r.periodics = dict.fromkeys(api.get_periodic_archivers())
        for d in sorted(r.periodics):
            r.periodics[d] = api.get_periodic_archiver_attributes(d)
            novals = [a for a in r.periodics[d] if a in r.novals]
            lost = [a for a in r.periodics[d] if a in r.lost]
            if len(novals)+len(lost) > 2:
                r.summary += ('\n%s (all/novals/lost): %s/%s/%s' % 
                    (d,len(r.periodics[d]),len(novals),len(lost)))
        
        r.perattrs = [a for a in r.on if a in api.get_periodic_attributes()]
        r.notper = [a for a in r.on if a not in r.perattrs]
        
        
    r.summary += '\nfinished in %d seconds\n\n'%(fn.now()-t0)
    print(r.summary)
    
    if restart:
        try:
            retries = r.lost+r.novals+r.nok
            print('restarting %d attributes' % len(retries))
            api.restart_attributes(retries)
        except:
            traceback.print_exc()
    
    if export is not None:
        if export is True:
            export = 'txt'
        for x in (export.split(',') if isString(export) else export):
            if x in ('json','pck','pickle','txt'):
                x = '/tmp/%s.%s' % (schema,x)
            print('Saving %s file with keys:\n%s' % (x,r.keys()))
            if 'json' in x:
                fn.dict2json(r.dict(),x)
            else:
                f = open(x,'w')
                if 'pck' in x or 'pickle' in x:
                    pickle.dump(r.dict(),f)
                else:
                    f.write(fn.dict2str(r.dict()))
                f.close()     
                
    for k,v in r.items():
        if fn.isSequence(v):
            r[k] = sorted(v)
                
    return r
Пример #24
0
__doc__ = """
script to generate all test_method or test_class functions for a module

if a module is passed, docs and test cases are added to the resulting file

Usage:
    gen_test_file.py [generate|check] [src/module] [dest]
    
"""

import sys, os, traceback, fandango

try:
    outs = []
    docs = fandango.defaultdict(str)
    tests = fandango.defaultdict(str)
    action = sys.argv[1]
    fname = sys.argv[2]
    modname = ''

    if not os.path.exists(fname):
        print('loading %s module' % fname)
        mod = fandango.objects.loadModule(sys.argv[1])
        fname = fandango.objects.findModule(sys.argv[1])
        modname = mod.__name__
        docs.update(
            (k, getattr(v, '__doc__', '')) for k, v in mod.__dict__.items())
        tests = getattr(mod, '__test__', {})

    else:
Пример #25
0
SUMMARY_FIELDS = 'tag', 'state', 'priority', 'time', 'formula', 'message'

# Must be lists, not tuples
DATA_FIELDS = ('tag', 'device', 'priority', 'formula', 'message',
               'annunciators')
STATE_FIELDS = ('state', 'time', 'counter', 'active', 'disabled',
                'acknowledged', 'updated', 'last_sent', 'last_error')

#ALARM_ROW = ['tag','get_state','get_time','device','description']
#DEFAULT_COLUMNS = ['tag','get_state','active','get_time','severity']
VIEW_FIELDS = ['tag', 'device', 'state', 'priority', 'time']

CSV_FIELDS = 'tag,device,description,severity,receivers,formula'.split(',')

FORMATTERS = fd.defaultdict(lambda: str)
FORMATTERS.update({
    'tag':
    lambda s, l=10: ('{0:<%d}' % (l or 4)).format(s),
    #'time' : lambda s,l=25: ('{:^%d}'%l).format(s),
    'device':
    lambda s, l=25: ('{0:^%d}' % (l or 4)).format(s),
    'description':
    lambda s, l=50: ('{0:<}').format(s),
    'message':
    lambda s, l=50: ('{0:<}').format(s),
    'severity':
    lambda s, l=10: ('{0:^%d}' % (l or 4)).format(s),
    'priority':
    lambda s, l=10: ('{0:^%d}' % (l or 4)).format(s),
    'get_state':
Пример #26
0
def generate_class_properties(filein='ui_attribute_values.pck', all_rw=False):

    print('generate_class_properties:' + str(filein))
    devs = pickle.load(open(filein))

    classes = defaultdict(Struct)
    print('classes in %s are: %s' %
          (filein, sorted(set(s.dev_class for s in devs.values()))))
    filters = raw_input(
        'Do you want to filter out some classes? [PyStateComposer]'
    ) or 'PyStateComposer'
    for d, s in devs.items():
        if s.dev_class in filters: continue
        classes[s.dev_class].attrs = {}
        classes[s.dev_class].comms = {}
        classes[s.dev_class].values = defaultdict(set)

    for d, s in devs.items():

        if s.dev_class in filters:
            continue

        for a, t in s.attrs.items():
            t['datatype'] = t.get('data_type', 'DevDouble')
            if not isinstance(t, Struct): t = Struct(t)

            if t.value is not None and not any(x in t.datatype.lower()
                                               for x in ('array', )):
                try:
                    classes[s.dev_class].values[a].add(t.value)
                except:
                    print d, s.dev_class, a, t

    for d, s in devs.items():

        if s.dev_class in filters:
            continue

        #Iterate attributes
        for a, t in s.attrs.items():

            if a.lower() in ('state', 'status'):
                continue

            if t.value is None and a in classes[s.dev_class].attrs:
                continue

            else:
                if t.value is None:
                    datatype, formula = 'DevDouble', 'NaN'

                else:
                    datatype = t.datatype if t.data_format == 'SCALAR' else t.datatype.replace(
                        'Dev', 'DevVar') + 'Array'
                    if 'bool' in datatype.lower(): formula = DEFAULT_BOOL()
                    elif 'state' in datatype.lower():
                        formula = DEFAULT_STATE(
                            f='choice(%s or [0])' %
                            list(classes[s.dev_class].values[a]))
                    elif 'string' in datatype.lower():
                        formula = DEFAULT_STRING(
                            d=d,
                            a=a,
                            f='choice(%s or [0])' %
                            list(classes[s.dev_class].values[a]))
                    elif 'double' in datatype.lower(
                    ) or 'float' in datatype.lower():
                        formula = DEFAULT_DOUBLE(f=random.choice(
                            list(classes[s.dev_class].values[a]) or [0]))
                    else:
                        formula = DEFAULT_INT(
                            f='choice(%s or [0])' %
                            list(classes[s.dev_class].values[a]))
                    if 'Array' in datatype:
                        formula = "[%s for i in range(10)]" % formula
                    if all_rw or 'WRITE' in t.writable:
                        formula = DEFAULT_WRITE(a=a, f=formula)
                    classes[s.dev_class].attrs[a] = '%s = %s(%s)' % (
                        a, datatype, formula)

        #Iterate commands
        for c, t in s.comms.items():

            if fd.isMapping(t):
                t = t['in_type'], t['out_type']
            datatype = t[1] if t[1] != 'DevVoid' else 'DevString'
            if 'bool' in datatype.lower(): formula = DEFAULT_BOOL()
            elif 'state' in datatype.lower(): formula = DEFAULT_STATE()
            elif 'string' in datatype.lower():
                formula = DEFAULT_STRING(d=d, a=c)
            elif 'double' in datatype.lower() or 'float' in datatype.lower():
                formula = DEFAULT_DOUBLE()
            else:
                formula = DEFAULT_INT()
            if 'Array' in datatype:
                formula = "[%s for i in range(10)]" % formula
            if 'DevVoid' not in t[0]: formula = DEFAULT_ARGS(f=formula)
            classes[s.dev_class].comms[c] = '%s = %s(%s)' % (c, datatype,
                                                             formula)

        classes[s.dev_class].states = DEFAULT_STATES

    for k, t in classes.items():
        print('\nWriting %s attributes ([%d])\n' % (k, len(t.attrs)))
        f = open('%s_attributes.txt' % k, 'w')
        for a in sorted(t.attrs.values()):
            print('%s' % a)
            f.write('%s\n' % a)
        f.close()
        print('\nWriting %s commands ([%d])\n' % (k, len(t.comms)))
        f = open('%s_commands.txt' % k, 'w')
        for a in sorted(t.comms.values()):
            print('%s' % a)
            f.write('%s\n' % a)
        f.close()
        print('\nWriting %s states ([%d])\n' % (k, len(t.states)))
        f = open('%s_states.txt' % k, 'w')
        for a in t.states:
            print('%s' % a)
            f.write('%s\n' % a)
        f.close()

    return (filein)
Пример #27
0
def decimate(db_name,keys,tstart,tend,period=10,dry=False):
    """
    time arguments are strings
    BUT!, this method seems to not work anymore to free space in TDB
    Maybe a mysqld restart is needed, I don't know; 
    but up to now space is not freed
    """
    api = pta.api(db_name)
    
    if '/' in keys[0]:
        print('Decimating by attribute names')
        tables = fn.defaultdict(list)
        for a in keys:
            api.get_attr_id_type_table(a)
            tables[api[a].table].append(a)
            
        print('tables: %s' % (tables.keys()))
        for table,attrs in tables.items():
            for a in attrs:
                pta.dbs.decimate_db_table_by_time(api,
                    table,api[a].id,tstart,tend,period,
                    optimize=(a==attrs[-1]))
    
    if not '/' in keys[0]:
        print('Decimating by data_type')

        data_types = keys
        if not data_types:
            data_types = [r[0] for r in api.Query('select data_type from att_conf_data_type')]
        else:
            data_types = [d.replace('att_','') for d in data_types]
            
        print('Decimating %s types between %s and %s: %s'%(db_name,tstart,tend,data_types))

        for data_type in data_types:

            attrs = api.Query('select att_conf_id from att_conf,att_conf_data_type '
                    'where att_conf.att_conf_data_type_id = att_conf_data_type.att_conf_data_type_id '
                    'and data_type = "%s"'%data_type)
            attrs = [r[0]  for r in attrs]

            q = ("select partition_name,table_name"
                    " from information_schema.partitions where"
                    " partition_name is not NULL"
                    " and table_schema = '%s'"%db_name +
                    " and table_name like '%"+data_type+"'" )
            print(q)
            partitions = api.Query(q)
            if partitions:
                table = partitions[0][1]
            else:
                table = 'att_'+data_type
            print('%s has %d attributes in %d partitions'%(table,len(attrs),len(partitions)))
            c0 = api.Query('select count(*) from %s '%table)

            import re
            intervals = []

            for p in partitions:
                p = p[0]
                r = '(?P<year>[0-9][0-9][0-9][0-9])(?P<month>[0-9][0-9])'
                md = re.search(r,p).groupdict()
                t0 = '%s-%s-01 00:00:00'%(md['year'],md['month'])
                m,y = int(md['month']),int(md['year'])
                
                if m == 12:
                    m,y = 1, y+1
                else:
                    m+=1
                    
                t1 = '%04d-%02d-01 00:00:00'%(y,m)
                if fn.str2time(t0)<fn.str2time(tend) and \
                fn.str2time(t1)>fn.str2time(tstart):
                    intervals.append((t0,t1,p))

            if not partitions:
                ts,te = fn.str2time(tstart),fn.str2time(tend)
                tinc = (te-ts)/10.
                for i in range(1,11):
                    intervals.append((fn.time2str(ts+(i-1)*tinc),
                                    fn.time2str(ts+i*tinc),None))
                
            print('%d intervals in %s'%(len(intervals),table))
                
            for t0,t1,p in intervals:
                
                print((t0,t1))
                if dry: continue
                for a in attrs:
                    c0 = api.getTableSize(table)
                    pta.dbs.decimate_db_table(db=api,table=table,
                        start=fn.str2time(t0),end=fn.str2time(t1),
                        period=600 if 'string' in table else 300,
                        condition=' att_conf_id = %s '%a,
                        iteration=2000,cols=['data_time','value_r'],
                        us=True, repeated=True)
                    
                if p: api.Query('alter table %s optimize partition %s'%(table,p))

            if not dry:
                q = 'repair table %s;'%table
                print('\n'+q)
                api.Query(q)
                c1 = api.getTableSize(table)
                print('\n\n%s size reduced from %s to %s'%(table,c0,c1))
            
        print('ellapsed %d seconds'%(time.time()-tt0))
Пример #28
0
def check_db_schema(schema, tref=None):

    r = fn.Struct()
    r.api = api = pta.api(schema)
    r.tref = fn.notNone(tref, fn.now() - 3600)

    r.attrs = api.keys()
    r.on = api.get_archived_attributes()
    r.off = [a for a in r.attrs if a not in r.on]
    if schema in ('tdb', 'hdb'):
        ups = api.db.get_table_updates()
        r.vals = dict((k, (ups[api[k].table], None)) for k in r.on)
    else:
        r.vals = dict(fn.kmap(api.load_last_values, r.on))
        r.vals = dict((k, v and v.values()[0]) for k, v in r.vals.items())

    dups = fn.defaultdict(list)
    if getattr(api, 'dedicated', None):
        [
            dups[a].append(k) for a in r.on for k, v in api.dedicated.items()
            if a in v
        ]
        nups = [a for a, v in dups.items() if len(v) <= 1]
        [dups.pop(a) for a in nups]
    r.dups = dict(dups)

    # Get all updated attributes
    r.ok = [a for a, v in r.vals.items() if v and v[0] > r.tref]
    # Try to read not-updated attributes
    r.check = dict((a, fn.check_attribute(a)) for a in r.on if a not in r.ok)
    r.nok, r.stall, r.noev, r.lost, r.evs = [], [], [], [], {}
    # Method to compare numpy values
    fbool = lambda x: all(x) if fn.isSequence(x) else bool(x)

    for a, v in r.check.items():
        # Get current value/timestamp
        vv, t = getattr(v, 'value', v), getattr(v, 'time', 0)
        t = t and fn.ctime2time(t)

        if isinstance(vv, (type(None), Exception)):
            # attribute is not readable
            r.nok.append(a)
        elif r.vals[a] and 0 < t <= r.vals[a][0]:
            # attribute timestamp doesnt change
            r.stall.append(a)
        elif r.vals[a] and fbool(vv == r.vals[a][1]):
            # attribute value doesnt change
            r.stall.append(a)
        else:
            r.evs[a] = fn.tango.check_attribute_events(a)
            if not r.evs[a]:
                # attribute doesnt send events
                r.noev.append(a)
            else:
                # archiving failure (events or polling)
                r.lost.append(a)

    # SUMMARY
    print(schema)
    for k in 'attrs on off dups ok nok noev stall lost'.split():
        print('\t%s:\t:%d' % (k, len(r.get(k))))

    return r