Example #1
0
    def trigger_callbacks(self, regs=None):
        """
        regs = list of addresses that changed
        """
        if not self.callbacks:
            return
        for key, cb in self.callbacks.items():
            try:
                rs, push, org = regs, False, cb
                if fn.isSequence(cb):
                    rs = regs and [r for r in regs if r in cb]
                    if not regs or rs or (len(cb) == 1
                                          and fn.isCallable(cb[0])):
                        cb = cb[-1]
                    else:
                        continue
                msg = ('%s: %s.trigger_callbacks(%s,%s): %s:%s' %
                       (fn.time2str(), self.name, fn.shortstr(
                           regs, 40), rs, key, org))
                if self.plc_obj is not None:
                    self.plc_obj.debug(msg)
                else:
                    print(msg)
                if fn.isCallable(cb):
                    cb(key)  #,push=push)
                else:
                    cb = getattr(cb, 'push_event',
                                 getattr(cb, 'event_received', None))
                    cb and cb(key)

                fn.wait(1.e-4)

            except Exception as e:
                print(fn.except2str())
                print('%s.callback(%s,%s): %s' % (self, key, cb, e))
Example #2
0
    def start_devices(self,regexp = '*', force = False, 
                      do_init = False, do_restart = False):
        #devs = fn.tango.get_class_devices('HdbEventSubscriber')
        devs = self.get_archivers()
        if regexp:
            devs = fn.filtersmart(devs,regexp)
        off = sorted(set(d for d in devs if not fn.check_device(d)))

        if off and do_restart:
            print('Restarting %s Archiving Servers ...'%self.db_name)
            astor = fn.Astor()
            astor.load_from_devs_list(list(off))
            astor.stop_servers()
            fn.wait(3.)
            astor.start_servers()
            fn.wait(3.)

        for d in devs:
            try:
                dp = fn.get_device(d, keep=True)
                if do_init:
                    dp.init()
                if force or dp.attributenumber != dp.attributestartednumber:
                    off.append(d)
                    print('%s.Start()' % d)
                    dp.start()
            except Exception,e:
                self.warning('start_archivers(%s) failed: %s' % (d,e))
Example #3
0
    def __test__(*args):
        t0 = fd.now()
        args = args or ['*value*', '20']
        opts = dict.fromkeys(a.strip('-') for a in args if a.startswith('-'))
        args = [a for a in args if a not in opts]
        scope = args[0]
        tlimit = int((args[1:] or ['20'])[0])

        if opts:
            opts = dict(o.split('=') if '=' in o else (o, True) for o in opts)
            opts.update((o, fd.str2type(v)) for o, v in opts.items())

        print('AlarmView(Test,\t'
              '\tscope=%s,\n\ttlimit=%s,\n\t**%s)\n' % (scope, tlimit, opts))

        if opts.get('d', False):
            th = TangoAttribute.get_thread()
            th.set_period_ms(500)
            th.setLogLevel('DEBUG')

        verbose = opts.get('v', 2)

        view = AlarmView('Test', scope=scope, verbose=verbose, **opts)
        print('\n'.join('>' * 80 for i in range(4)))

        cols = 'sortkey', 'tag', 'state', 'active', 'time', 'severity'
        while fd.now() < (t0 + tlimit):
            fd.wait(3.)
            print('\n' + '<' * 80)
            l = view.sort(as_text={'cols': cols})
            print('\n'.join(l))

        print('AlarmView.__test__(%s) finished after %d seconds' %
              (args[0], fd.now() - t0))
Example #4
0
def run():
    ### LAUNCH DEVICES
    dspath = path.join(path.dirname(path.abspath(panic.__file__)),
                       'ds','PyAlarm.py')
    fn.log.info('MAIL_TEST: RUN '+'>'*40)
    os.system(dspath+' test_mail -v2 &')
    fn.wait(30.)
Example #5
0
    def trigger_callbacks(self, regs=None):
        """
        regs = list of addresses that changed
        """
        if not self.callbacks:
            return
        for key, cb in self.callbacks.items():
            try:
                push = False
                if fun.isSequence(cb):
                    if not regs or any(r in cb for r in regs):
                        #print('%s: %s.trigger(%s): callback(%s):%s' %
                        #(fun.time2str(),self,key,cb,regs))
                        cb, push = cb[-1], True
                    else:
                        continue
                if fun.isCallable(cb):
                    cb(key, push=push)
                else:
                    cb = getattr(cb, 'push_event',
                                 getattr(cb, 'event_received', None))
                    cb and cb(key)

                fandango.wait(1.e-4)

            except Exception as e:
                print(fandango.except2str())
                print('%s.callback(%s): %s' % (self, cb, e))
Example #6
0
    def start_devices(self,
                      regexp='*',
                      force=False,
                      do_init=False,
                      do_restart=False):
        #devs = fn.tango.get_class_devices('HdbEventSubscriber')
        devs = self.get_archivers()
        if regexp:
            devs = fn.filtersmart(devs, regexp)
        off = sorted(set(d for d in devs if not fn.check_device(d)))

        if off and do_restart:
            print('Restarting %s Archiving Servers ...' % self.db_name)
            astor = fn.Astor()
            astor.load_from_devs_list(list(off))
            astor.stop_servers()
            fn.wait(3.)
            astor.start_servers()
            fn.wait(3.)

        for d in devs:
            try:
                dp = fn.get_device(d)
                if do_init:
                    dp.init()
                if force or dp.attributenumber != dp.attributestartednumber:
                    off.append(d)
                    print('%s.Start()' % d)
                    dp.start()
            except Exception, e:
                self.warning('start_archivers(%s) failed: %s' % (d, e))
    def readComm(self, commCode, READ=True, emulation=False):
        ## A WAIT TIME HAS BEEN NECESSARY BEFORE READING THE BUFFER
        # This wait is divided in smaller periods
        # In each period is tested what has been received from the serial port
        # The wait will finish when after receiving some information there's silence again
        t0, result, retries = fandango.now(),'',0
        if not hasattr(self,'_Dcache'): 
			self._Dcache = {}
        if emulation and commCode in self._Dcache: 
			return self._Dcache[commCode]
			
        wtime = 0.0; result = ""; rec = ""; lastrec = ""; div=4.; 
        before=time.time(); after=before+0.001
        
        while wtime<self.waitTime and not (not len(rec) \
			and len(lastrec.replace(commCode,'').replace('\r','').replace('\n',''))):
            
            #if self.trace and retries: 
            #    print('In readComm(%s)(%d) Waiting %fs for answer ...'
            #        %(commCode,retries,self.waitTime))
            retries += 1			
            
            #The wait condition aborts after waitTime or nothing read after something different from \r or \n.
            #Between reads a pause of TimeWait/10.0 is performed.
            #I've tried to make smaller the time that the thread spends waiting for an answer ... more attempts to improve has been inefficient do to imprecission of the time.sleep method
            
            last=before
            after=time.time()
            pause = self.waitTime/div - (after-before)
            fandango.wait(max(pause,0)) #time.sleep(max(pause,0))
            before=time.time();
            lastrec=lastrec+rec
            
            if self.getSerialClass() == 'PySerial':
                nchars = self.dp.read_attribute('InputBuffer').value
                rec = self.dp.command_inout("Read",nchars)
            else: #Class is 'Serial'
                #rec = self.dp.command_inout("DevSerReadRaw")
                rec = self.dp.command_inout("DevSerReadString",0)
                
            rec.rstrip().lstrip()
            
            lrclean = lastrec.replace(commCode,'').replace('\r','').replace('\n','')
            rrclean = rec.replace('\r','\\r').replace('\n','\\n')
            if self.trace and rrclean: 
                #self.debug
                print( 'received('+str(wtime)+';'+str(after-last)+';'
                    +str(len(lrclean))+';'+str(len(rec))+"): '" + rrclean+"'")
					
            result += rec
            wtime += self.waitTime/div

        self._Dcache[commCode] = result
        readtime = fandango.now()-t0
        self.maxreadtime = max((self.maxreadtime,readtime))
        if self.trace:
            print('ReadComm(%s) = %s done in %f seconds (max = %f, + %f)' % 
                (commCode,result.strip(),readtime,self.maxreadtime,fandango.now()-self.lasttime))

        return result
 def run(self):
     ### LAUNCH DEVICES
     dspath = path.join(path.dirname(path.abspath(panic.__file__)), 'ds',
                        'PyAlarm.py')
     self.log('RUN ' + '>' * 40)
     for s in self.servers:
         os.system(dspath + ' ' + s + ' -v2 &')
     fn.wait(30.)
def restart_servers(servers=[], host=''):
    if not servers:
        servers = get_servers_status()['restart']
    astor = fandango.Astor()
    astor.load_from_servers_list(servers)
    astor.stop_servers()
    print('waiting ...')
    fandango.wait(10.)
    for s in astor:
        host = host or astor[s].host
        print('Starting %s at %s' % (s, host))
        astor.start_servers(s, host=host)
    return
 def delete_device(self):
     # PROTECTED REGION ID(SchemaManager.delete_device) ENABLED START #
     try:
         print('-'*80)
         print("[Device delete_device method] for %s"%self.get_name())
         self.set_state(PyTango.DevState.INIT)
         if self.threadDict and self.threadDict.alive():
             self.threadDict.stop()
             print('waiting ...')
             #Waiting longer times does not avoid segfault (sigh)
             fn.wait(3.) 
     except: 
         traceback.print_exc()
     print('-'*80)
Example #11
0
 def delete_device(self):
     # PROTECTED REGION ID(SchemaManager.delete_device) ENABLED START #
     try:
         print('-' * 80)
         print("[Device delete_device method] for %s" % self.get_name())
         self.set_state(PyTango.DevState.INIT)
         if self.threadDict and self.threadDict.alive():
             self.threadDict.stop()
             print('waiting ...')
             #Waiting longer times does not avoid segfault (sigh)
             fn.wait(3.)
     except:
         traceback.print_exc()
     print('-' * 80)
Example #12
0
    def restart_attribute(self, attr, d=''):
        try:
            d = self.get_attribute_archiver(attr)
            print('%s.restart_attribute(%s)' % (d, attr))
            dp = fn.get_device(d, keep=True)

            if not fn.check_device(dp):
                self.start_devices('(.*/)?' + d, do_restart=True)

            dp.AttributeStop(attr)
            fn.wait(.1)
            dp.AttributeStart(attr)
        except:
            print('%s.AttributeStart(%s) failed!' % (d, attr))
Example #13
0
    def restart_attribute(self,attr, d=''):
        try:
            d = self.get_attribute_archiver(attr)
            print('%s.restart_attribute(%s)' % (d,attr))
            dp = fn.get_device(d, keep=True)

            if not fn.check_device(dp):
                self.start_devices('(.*/)?'+d,do_restart=True)
                
            dp.AttributeStop(attr)
            fn.wait(.1)
            dp.AttributeStart(attr)
        except:
            print('%s.AttributeStart(%s) failed!'%(d,attr))
Example #14
0
 def stop_periodic_archiving(self, attribute):
     try:
         attribute = parse_tango_model(attribute,
                                       fqdn=True).fullname.lower()
         arch = self.get_periodic_attribute_archiver(attribute)
         if not arch:
             self.warning('%s is not archived!' % attribute)
         else:
             self.info('Removing %s from %s' % (attribute, arch))
             dp = fn.get_device(archiver)
             v = dp.AttributeRemove([attribute, str(int(float(period)))])
             dp.UpdateAttributeList()
             fn.wait(wait)
             return v
     except:
         self.warning('stop_periodic_archiving(%s) failed!' %
                      (attribute, traceback.format_exc()))
Example #15
0
 def restart_periodic_archiving(self, attribute):
     try:
         attribute = parse_tango_model(attribute,
                                       fqdn=True).fullname.lower()
         arch = self.get_periodic_attribute_archiver(attribute)
         if not arch:
             self.warning('%s is not archived!' % attribute)
         else:
             self.info('Restarting %s at %s' % (attribute, arch))
             dp = fn.get_device(archiver)
             v = dp.AttributeStop(attribute)
             dp.ResetErrorAttributes()
             fn.wait(wait)
             v = dp.AttributeStart(attribute)
             return v
     except:
         self.warning('restart_periodic_archiving(%s) failed!' %
                      (attribute, traceback.format_exc()))
Example #16
0
    def GetAttDataBetweenDates(self, argin):
        """
        Arguments to be AttrName, StartDate, StopDate, Synchronous
        
        If Synchronous is missing or False, data is buffered into attributes, which names are returned
        If True or Yes, all the data is returned when ready
        
        Data returned will be (rows,[t0,v0,t1,v1,t2,v2,...])
        """
        print time.ctime() + "In ", self.get_name(
        ), "::GetAttDataBetweenDates(%s)" % argin
        #    Add your own code here
        size = 0
        aname = argin[0]
        tag = self.attr2tag(aname)
        dates = self.dates2times(argin[1:3])
        RW = False
        synch = fn.searchCl('yes|true', str(argin[3:4]))
        attrs = [tag, tag + '_r', tag + '_w', tag +
                 '_t'] if RW else [tag, tag + '_r', tag + '_w', tag + '_t']

        self.reader.get_attribute_values(
            aname, (lambda v: self.reader_hook(aname, v)),
            dates[0],
            dates[1],
            decimate=True,
            cache=self.UseApiCache)
        self.counter += 1
        print(self.counter)

        argout = [fn.shape(attrs), [a for a in attrs]]

        if not synch:
            print '\t%s' % argout
            return argout

        else:
            while not self.IsDataReady(aname):
                fandango.wait(0.1)
            data = self.AttrData[aname][-1]
            for t, v in data:
                argout.append(t)
                argout.extend(fn.toSequence(v))
            return [fn.shape(data), argout]
Example #17
0
    def restart_attribute(self, attr, d=''):
        """
        execute AttributeStop/Start on subscriber device
        """
        try:
            a = self.is_attribute_archived(attr)
            if not a:
                raise Exception('%s is not archived!' % attr)
            attr = a
            d = self.get_attribute_archiver(attr)
            print('%s.restart_attribute(%s)' % (d, attr))
            dp = fn.get_device(d, keep=True)

            if not fn.check_device(dp):
                self.start_devices('(.*/)?' + d, do_restart=True)

            dp.AttributeStop(attr)
            fn.wait(10.)
            dp.AttributeStart(attr)
        except:
            print('%s.AttributeStart(%s) failed!' % (d, attr))
Example #18
0
    def restart_attributes(self, attributes=None, timewait=0.5):
        if attributes is None:
            attributes = self.get_attributes_not_updated()

        todo = []
        for a in attributes:
            a = self.is_attribute_archived(a)
            if a:
                todo.append(a)
            else:
                self.warning('%s is not archived!' % a)

        devs = dict(fn.kmap(self.get_attribute_archiver, todo))

        for a, d in fn.randomize(sorted(devs.items())):
            if not fn.check_device(d):
                self.start_devices('(.*/)?' + d, do_restart=True)
            else:
                dp = fn.get_device(d, keep=True)
                dp.AttributeStop(a)
            fn.wait(timewait)

        fn.wait(10. * timewait)

        for a, d in devs.items():
            dp = fn.get_device(d, keep=True)
            dp.AttributeStart(a)
            fn.wait(timewait)

        print('%d attributes restarted' % len(attributes))
Example #19
0
    def add_periodic_attribute(self,
                               attribute,
                               period,
                               archiver=None,
                               wait=3.,
                               force=False):

        if not force and period < 500:
            raise Exception('periods below 500 ms are not allowed!')

        attribute = parse_tango_model(attribute, fqdn=True).fullname.lower()

        arch = self.get_periodic_attribute_archiver(attribute)
        if arch:
            print('%s is already archived by %s!' % (attribute, arch))
            p = self.get_periodic_attribute_period(attribute)
            if p == period:
                return False
            else:
                archiver = arch

        archiver = archiver or self.get_next_periodic_archiver(
            attrexp=fn.tango.get_dev_name(attribute) + '/*')

        if not self.is_attribute_archived(attribute):
            self.info('Attribute %s does not exist in %s database, adding it' %
                      (attribute, self.db_name))
            subs = [d for d in self.get_subscribers() if 'null' in d.lower()]
            self.add_attribute(attribute,
                               archiver=(subs[0] if subs else None),
                               code_event=True,
                               context='SERVICE')

        self.info('%s.AttributeAdd(%s,%s)' % (archiver, attribute, period))
        dp = fn.get_device(archiver, keep=True)
        dp.set_timeout_millis(30000)
        v = dp.AttributeAdd([attribute, str(int(float(period)))])
        fn.wait(wait)
        return v
 def GetAttDataBetweenDates(self, argin):
     """
     Arguments to be AttrName, StartDate, StopDate, Synchronous
     
     If Synchronous is missing or False, data is buffered into attributes, which names are returned
     If True or Yes, all the data is returned when ready
     
     Data returned will be (rows,[t0,v0,t1,v1,t2,v2,...])
     """
     print time.ctime()+"In ", self.get_name(), "::GetAttDataBetweenDates(%s)"%argin
     #    Add your own code here
     size = 0
     aname = argin[0]
     tag = self.attr2tag(aname)
     dates = self.dates2times(argin[1:3])
     RW = False
     synch = fn.searchCl('yes|true',str(argin[3:4]))
     attrs = [tag,tag+'_r',tag+'_w',tag+'_t'] if RW else [tag,tag+'_r',tag+'_w',tag+'_t']
     
     self.reader.get_attribute_values(aname,
         (lambda v: self.reader_hook(aname,v)),dates[0],dates[1],
         decimate=True, cache=self.UseApiCache)
     self.counter+=1
     print(self.counter)
     
     argout = [fn.shape(attrs),[a for a in attrs]]
     
     if not synch:
       print '\t%s'%argout
       return argout
   
     else:
       while not self.IsDataReady(aname):
         fandango.wait(0.1)
       data = self.AttrData[aname][-1]
       for t,v in data:
         argout.append(t)
         argout.extend(fn.toSequence(v))
       return [fn.shape(data),argout]
Example #21
0
def hook(src, t, value):
    cache[src.normal_name] = (value)


el = fc.EventListener('A Queue for All')
el.set_value_hook(hook)
print('Subscribing %d attributes' % len(attrs))
[s.addListener(el) for k, s in sorted(sources.items())]
print('Subscription took %f seconds' % (fn.now() - t0))

t0 = fn.now()
print('Waiting ...')
while len(cache) < len(attrs):
    if fn.now() > t0 + 18.:
        break
    fn.wait(1.)
print('Attributes upated in %f seconds' % (fn.now() - t0))


def print_all():
    for i, t in enumerate(sorted(cache.items())):
        k, v = t
        print('%s/%s: %s = %s' % (i, len(attrs), k, str(v)[:40]))


print_all()

print('%d attributes were not read' % (len(attrs) - len(cache)))
print(sorted(a for a in attrs if a not in cache.keys()))
print('%d Nones' % len([v for v in cache.values() if v is None]))
Example #22
0
def check_archiving_schema(
        schema='hdb',
        attributes=[],values={},
        ti = None,
        period = 7200,
        old_period=24*3600*90,\
        exclude=['*/waveid','*/wavename','*/elotech-*'],
        use_index = True,
        loads = True,
        action=False,
        trace=True,
        export=None):

    ti = fn.now() if ti is None else str2time(ti) if isString(ti) else ti

    api = pta.api(schema)
    is_hpp = isinstance(api, pta.HDBpp)
    check = dict()
    old_period = 24*3600*old_period if old_period < 1000 \
        else (24*old_period if old_period<3600 else old_period)

    allattrs = api.get_attributes() if hasattr(
        api, 'get_attributes') else api.keys()
    print('%s contains %d attributes' % (schema, len(allattrs)))

    if attributes:
        if fn.isString(attributes) and fn.isRegexp(attributes):
            tattrs = [a for a in allattrs if clsearch(attributes, a)]
        else:
            attributes = map(fn.tango.get_normal_name, fn.toList(attributes))
            tattrs = [
                a for a in allattrs if fn.tango.get_normal_name(a) in allattrs
            ]

    else:
        tattrs = allattrs

    excluded = [a for a in tattrs if any(fn.clmatch(e, a) for e in exclude)]
    tattrs = [a for a in tattrs if a not in excluded]

    print('%d attributes to check' % len(tattrs))
    if not len(tattrs):
        return

    if excluded:
        print('\t%d attributes excluded' % len(excluded))

    archived = {}
    for a in tattrs:
        if hasattr(api, 'get_attribute_archiver'):
            arch = api.get_attribute_archiver(a)
        else:
            arch = api[a].archiver
        if arch:
            archived[a] = arch

    print('\t%d attributes are archived' % len(archived))

    #Getting Tango devices currently not running
    alldevs = set(t.rsplit('/', 1)[0] for t in tattrs)
    #tdevs = filter(fn.check_device,alldevs)
    #nodevs = [fn.tango.get_normal_name(d) for d in alldevs if d not in tdevs]
    #if nodevs:
    #print('\t%d devices are not running' % len(nodevs))

    archs = sorted(set(archived.values()))
    if loads:
        astor = fn.Astor()
        astor.load_from_devs_list(archs)
        loads = fn.defaultdict(list)
        for k, s in astor.items():
            for d in s.get_device_list():
                d = fn.tango.get_normal_name(d)
                for a in archived:
                    if fn.tango.get_normal_name(archived[a]) == d:
                        loads[k].append(a)
        for k, s in sorted(loads.items()):
            print('\t%s archives %d attributes' % (k, len(s)))

    noarchs = [
        fn.tango.get_normal_name(d) for d in archs if not fn.check_device(d)
    ]
    if noarchs:
        print('\t%d archivers are not running: %s' % (len(noarchs), noarchs))

    ###########################################################################

    if isString(values) and values.endswith('.pck'):
        print('\nLoading last values from %s file\n' % values)
        import pickle
        values = pickle.load(open(values))

    elif isString(values) and values.endswith('.json'):
        print('\nLoading last values from %s file\n' % values)
        values = fn.json2dict(values)

    elif not use_index or is_hpp:
        print('\nGetting last values ...\n')
        for a in tattrs:
            values[a] = api.load_last_values(a)

    else:
        print('\nGetting updated tables from database ...\n')
        tups = pta.utils.get_table_updates(schema)
        # Some tables do not update MySQL index tables
        t0 = [a for a in tarch if a in tattrs and not tups[api[a].table]]
        check.update((t, check_attribute(a, readable=True)) for t in t0
                     if not check.get(t))
        t0 = [t for t in t0 if check[t]]
        print('%d/%d archived attributes have indexes not updated ...' %
              (len(t0), len(tarch)))
        if t0 and len(t0) < 100:
            vs = api.load_last_values(t0)
            tups.update((api[t].table, api[t].last_date) for t in t0)

        for a in tattrs:
            if a in tups:
                values[a] = [tups[api[a].table], 0]

    for k, v in values.items():
        if (len(v) if isSequence(v) else v):
            if isinstance(v, dict):
                v = v.values()[0]
            if isSequence(v) and len(v) == 1:
                v = v[0]
            if v and not isNumber(v[0]):
                v = [date2time(v[0]), v[1]]
            values[k] = v
        else:
            values[k] = [] if isSequence(v) else None

    print('%d values obtained' % len(values))

    ###########################################################################

    now = fn.now()
    result = fn.Struct()
    times = [t[0] for t in values.values() if t]
    futures = [t for t in times if t > now]
    times = [t for t in times if t < now]
    tmiss = []
    tfutures = [k for k, v in values.items() if v and v[0] in futures]
    tmin, tmax = min(times), max(times)
    print('\toldest update was %s' % time2str(tmin))
    print('\tnewest update was %s' % time2str(tmax))
    if futures:
        print('\t%d attributes have values in the future!' % len(futures))

    tnovals = [a for a in archived if not values.get(a, None)]
    if tnovals:
        print('\t%d archived attributes have no values' % len(tnovals))
    try:
        tmiss = [
            a for a, v in values.items()
            if v and old_period < v[0] < ti - period and a not in archived
        ]
    except:
        print(values.items()[0])
    if tmiss:
        print('\t%d/%d attrs with values are not archived anymore' %
              (len(tmiss), len(tattrs)))

    result.Excluded = excluded
    result.Schema = schema
    result.All = tattrs
    result.Archived = values

    result.NoValues = tnovals
    result.MissingOrRemoved = tmiss

    result.TMin = tmin
    result.TMax = tmax
    result.Futures = tfutures

    tup = sorted(a for a in values if values[a] and values[a][0] > ti - period)
    tok = [a for a in tup if values[a][1] not in (None, [])]
    print('\n%d/%d archived attributes are updated since %s - %s' %
          (len(tup), len(archived), ti, period))
    print('%d archived attributes are fully ok\n' % (len(tok)))

    tnotup = sorted(a for a in values
                    if values[a] and values[a][0] < ti - period)
    print('\t%d archived attrs are not updated' % len(tnotup))
    tupnoread = [
        a for a in tup if not values[a][1] and fn.read_attribute(a) is None
    ]

    reads = dict((a, fn.read_attribute(a)) for a in tnotup)
    tnotupread = [a for a in tnotup if reads[a] is not None]
    print('\t%d not updated attrs are readable (Lost)' % len(tnotupread))
    print('\t%d of them are not floats' %
          len([t for t in tnotupread if not isinstance(reads[t], float)]))
    print('\t%d of them are states' %
          len([t for t in tnotupread if t.lower().endswith('/state')]))
    print('\t%d of them seem motors' %
          len([t for t in tnotupread if t.lower().endswith('/position')]))

    tnotupevs = [a for a in tnotupread if fn.tango.check_attribute_events(a)]
    print('\t%d not updated attrs are readable and have events (LostEvents)' %
          len(tnotupevs))

    tnotupnotread = [a for a in tnotup if a not in tnotupread]
    print('\t%d not updated attrs are not readable' % len(tnotupnotread))

    result.Lost = tnotupread
    result.LostEvents = tnotupevs

    losts = (tnotupevs if is_hpp else tnotupread)

    diffs = dict()
    for a in losts:
        try:
            v, vv = values.get(a, (None, ))[1], reads[a]
            if fn.isSequence(v): v = fn.toList(v)
            if fn.isSequence(vv): vv = fn.toList(vv)
            diffs[a] = v != vv
            if fn.isSequence(diffs[a]):
                diffs[a] = any(diffs[a])
            else:
                diffs[a] = bool(diffs[a])
        except:
            diffs[a] = None

    fams = fn.defaultdict(list)
    for a in tnotupread:
        fams['/'.join(a.split('/')[-4:-2])].append(a)
    for f in sorted(fams):
        print('\t%s: %d attrs not updated' % (f, len(fams[f])))

    print()

    differ = [a for a in losts if diffs[a]]  #is True]
    print('\t%d/%d not updated attrs have also wrong values!!!' %
          (len(differ), len(losts)))

    rd = pta.Reader()
    only = [a for a in tnotupread if len(rd.is_attribute_archived(a)) == 1]
    print('\t%d/%d not updated attrs are archived only in %s' %
          (len(only), len(losts), schema))
    result.LostDiff = differ
    print()

    archs = sorted(set(archived.values()))
    astor = fn.Astor()
    astor.load_from_devs_list(archs)
    badloads = fn.defaultdict(list)
    for k, s in astor.items():
        for d in s.get_device_list():
            d = fn.tango.get_normal_name(d)
            for a in losts:
                if fn.tango.get_normal_name(archived[a]) == d:
                    badloads[k].append(a)
    for k, s in badloads.items():
        if len(s):
            print('\t%s archives %d lost attributes' % (k, len(s)))

    print('\t%d updated attrs are not readable' % len(tupnoread))

    result.ArchivedAndReadable = tok
    result.Updated = tup
    result.NotUpdated = tnotup
    result.Unreadable = tnotupnotread
    #result.DeviceNotRunning = nodevs
    result.ArchiverNotRunning = noarchs

    result.LostFamilies = fams

    # Tnones is for readable attributes not being archived
    tnones = [
        a for a in archived
        if (a not in values or values[a] and values[a][1] in (None, []))
        and a not in tupnoread and a not in tnotupread
    ]
    tupnones = [a for a in tnones if a in tup]

    if tupnones:
        print('\t%d archived readable attrs record empty values' %
              len(tupnones))

    result.Nones = tnones

    if 0:

        get_ratio = lambda a, b: float(len(a)) / float(len(b))

        #result.ArchRatio = get_ratio([t for t in readarch if t not in tnotup],readarch)
        #result.ReadRatio = get_ratio(result.Readable,tattrs)
        #result.LostRatio = get_ratio([a for a in tread if a in tnotup],tread)
        #result.MissRatio = get_ratio([a for a in tread if a not in tarch],tread)
        #result.OkRatio = 1.0-result.LostRatio-result.MissRatio

        #result.Summary = '\n'.join((
        #('Checking archiving of %s attributes'%(len(attributes) if attributes else schema))
        #,('%d attributes in %s, %d are currently active'%(len(api),schema,len(tarch)))
        #,('%d devices with %d archived attributes are not running'%(len(nodevs),len([a for a in api if a.rsplit('/',1) in nodevs])))
        #,('%d archived attributes (%2.1f %%) are unreadable! (check and remove)'%(len(tnoread),1e2*get_ratio(tnoread,tarch)))
        #,('%d readable attributes are not archived'%(len(tmiss)))
        #,('%d attributes (readable or not) are updated (%2.1f %% of all readables)'%(len(tok),1e2*result.OkRatio))
        #,('-'*80)
        #,('%d archived attributes (readable or not) are not updated!'%len(tnotup))
        #,('%d archived and readable attributes are not updated! (check and restart?)'%len(treadnotup))
        #,('-'*80)
        #,('%d readable attributes have been removed in the last %d days!'%(len(removed),old_period/(24*3600)))
        #,('%d readable scalar attributes are not being archived (not needed anymore?)'%len(tmscalar))
        #,('%d readable array attributes are not being archived (Ok)'%len(tmarray))
        #,('%d readable array attributes are archived (Expensive)'%len(tarray))
        #,('')))

        #if trace: print(result.Summary)
        #print('%d readable lost,Ok = %2.1f%%, %2.1f %% over all Readables (%2.1f %% of total)'%\
        #(len(treadnotup),1e2*result.ArchRatio,1e2*result.OkRatio,1e2*result.ReadRatio))

    if action:
        if action == 'start_devices':
            print('Executing action %s' % action)
            api.start_devices()

        if action == 'restart_all':
            print('Executing action %s' % action)
            devs = api.get_archivers()
            astor = fn.Astor()
            print('Restarting %d devs:' % (len(devs), devs))
            astor.load_from_devs_list(devs)
            astor.stop_servers()
            fn.wait(10.)
            astor.start_servers()

        #print('NO ACTIONS ARE GONNA BE EXECUTED, AS THESE ARE ONLY RECOMMENDATIONS')
        #print("""
        #api = PyTangoArchiving.HDBpp(schema)
        #api.start_devices()

        #or

        #api = PyTangoArchiving.ArchivingAPI('%s')
        #lostdevs = sorted(set(api[a].archiver for a in result.NotUpdated))
        #print(lostdevs)
        #if lostdevs < a_reasonable_number:
        #astor = fn.Astor()
        #astor.load_from_devs_list(lostdevs)
        #astor.stop_servers()
        #fn.time.sleep(10.)
        #astor.start_servers()
        #"""%schema)

    print('\nfinished in %d seconds\n\n' % (fn.now() - ti))

    if export is not None:
        if export is True:
            export = 'txt'
        for x in (export.split(',') if isString(export) else export):
            if x in ('json', 'pck', 'pickle', 'txt'):
                x = '/tmp/%s.%s' % (schema, x)
            print('Saving %s file with keys:\n%s' % (x, result.keys()))
            if 'json' in x:
                fn.dict2json(result.dict(), x)
            else:
                f = open(x, 'w')
                if 'pck' in x or 'pickle' in x:
                    pickle.dump(result.dict(), f)
                else:
                    f.write(fn.dict2str(result.dict()))
                f.close()

    return result
Example #23
0
    def UpdateValues(self):
        # PROTECTED REGION ID(SchemaManager.UpdateValues) ENABLED START #
        try:

            t0 = t1 = fn.now()
            self.info_stream('UpdateValues()')

            if (self.ValuesFile or '').strip():
                self.info_stream('Load values from: %s ...' % self.ValuesFile)
                if self.ValuesFile.endswith('json'):
                    self.values = fn.json2dict(self.ValuesFile)
                else:
                    with open(self.ValuesFile) as f:
                        self.values = pickle.load(f)

                self.values = dict((a, self.get_last_value(a, v))
                                   for a, v in self.values.items())
                t1 = max(v[0] for v in self.values.values() if v)
                t1 = min((t1, fn.now()))
                self.info_stream('reference time is %s' % fn.time2str(t1))

            elif self.Threaded:
                self.info_stream('Loading values from thread cache ...')
                self.values = dict((a, v) for a, v in self.threadDict.items()
                                   if self.threadDict._updates.get(a, 0))
            else:
                self.info_stream('Loading values from db ...')
                self.values = self.api.load_last_values(self.attr_on)

            self.info_stream(
                'Updating %d values: %s' %
                (len(self.values),
                 str(len(self.values) and self.values.items()[0])))
            self.attr_ok = []
            self.attr_nok = []
            self.attr_lost = []
            self.attr_err = []
            for a, v in sorted(self.values.items()):
                try:
                    a = fn.tango.get_full_name(a)
                    if self.Threaded:
                        t1 = self.threadDict._updates.get(a, 0)
                    self.check_attribute_ok(a, v, t=t1)
                except Exception as e:
                    self.attr_err.append(a)
                    traceback.print_exc()
                    m = str("%s: %s: %s" % (a, str(v), str(e)))
                    #self.error_stream(m)
                    print('*' * 80)
                    print(fn.time2str() + ' ' + self.get_name() + '.ERROR!:' +
                          m)
                    fn.wait(1e-6)

            for a in [
                    'AttributeValues', 'AttributeOkList', 'AttributeNokList',
                    'AttributeWrongList', 'AttributeLostList',
                    'AttributeNoevList', 'AttributeStalledList'
            ]:
                self.push_change_event(a, getattr(self, 'read_%s' % a)())

            self.update_time = fn.now()
            self.state_machine()
            self.info_stream(self.get_status())
            self.info_stream('UpdateValues() took %f seconds' %
                             (fn.now() - t0))

        except Exception as e:
            traceback.print_exc()
            self.error_stream(fn.except2str())
            raise e
    def UpdateValues(self):
        # PROTECTED REGION ID(SchemaManager.UpdateValues) ENABLED START #
        try:

            t0 = t1 = fn.now()
            self.info_stream('UpdateValues()')
            
            if (self.ValuesFile or '').strip():
                self.info_stream('Load values from: %s ...' % self.ValuesFile)
                if self.ValuesFile.endswith('json'):
                    self.values = fn.json2dict(self.ValuesFile)
                else:
                    with open(self.ValuesFile) as f:
                        self.values = pickle.load(f)
                        
                self.values = dict((a,self.get_last_value(a,v)) 
                                   for a,v in self.values.items())
                t1 = max(v[0] for v in self.values.values() if v)
                t1 = min((t1,fn.now()))
                self.info_stream('reference time is %s' % fn.time2str(t1))

            elif self.Threaded:
                self.info_stream('Loading values from thread cache ...')
                self.values = dict((a,v) for a,v in self.threadDict.items()
                    if self.threadDict._updates.get(a,0))
            else:
                self.info_stream('Loading values from db ...')
                self.values = self.api.load_last_values(self.attr_on)

            self.info_stream('Updating %d values: %s' % (
                len(self.values),str(len(self.values) 
                                     and self.values.items()[0])))
            self.attr_ok = []
            self.attr_nok = []
            self.attr_lost = []
            self.attr_err = []
            for a,v in sorted(self.values.items()):
                try:
                    a = fn.tango.get_full_name(a)
                    if self.Threaded:
                        t1 = self.threadDict._updates.get(a,0)
                    self.check_attribute_ok(a,v,t=t1)
                except Exception as e:
                    self.attr_err.append(a)
                    traceback.print_exc()
                    m = str("%s: %s: %s" % (a, str(v), str(e)))
                    #self.error_stream(m)
                    print('*'*80)
                    print(fn.time2str()+' '+self.get_name()+'.ERROR!:'+m)
                    fn.wait(1e-6)
                    
            for a in ['AttributeValues','AttributeOkList','AttributeNokList',
                    'AttributeWrongList','AttributeLostList',
                    'AttributeNoevList','AttributeStalledList']:
                self.push_change_event(a,getattr(self,'read_%s'%a)())
                
            self.update_time = fn.now()
            self.state_machine()
            self.info_stream(self.get_status())
            self.info_stream('UpdateValues() took %f seconds' % (fn.now()-t0))
            
        except Exception as e:
            traceback.print_exc()
            self.error_stream(fn.except2str())
            raise e