Exemplo n.º 1
0
    def __test__(*args):
        t0 = fd.now()
        args = args or ['*value*', '20']
        opts = dict.fromkeys(a.strip('-') for a in args if a.startswith('-'))
        args = [a for a in args if a not in opts]
        scope = args[0]
        tlimit = int((args[1:] or ['20'])[0])

        if opts:
            opts = dict(o.split('=') if '=' in o else (o, True) for o in opts)
            opts.update((o, fd.str2type(v)) for o, v in opts.items())

        print('AlarmView(Test,\t'
              '\tscope=%s,\n\ttlimit=%s,\n\t**%s)\n' % (scope, tlimit, opts))

        if opts.get('d', False):
            th = TangoAttribute.get_thread()
            th.set_period_ms(500)
            th.setLogLevel('DEBUG')

        verbose = opts.get('v', 2)

        view = AlarmView('Test', scope=scope, verbose=verbose, **opts)
        print('\n'.join('>' * 80 for i in range(4)))

        cols = 'sortkey', 'tag', 'state', 'active', 'time', 'severity'
        while fd.now() < (t0 + tlimit):
            fd.wait(3.)
            print('\n' + '<' * 80)
            l = view.sort(as_text={'cols': cols})
            print('\n'.join(l))

        print('AlarmView.__test__(%s) finished after %d seconds' %
              (args[0], fd.now() - t0))
    def readComm(self, commCode, READ=True, emulation=False):
        ## A WAIT TIME HAS BEEN NECESSARY BEFORE READING THE BUFFER
        # This wait is divided in smaller periods
        # In each period is tested what has been received from the serial port
        # The wait will finish when after receiving some information there's silence again
        t0, result, retries = fandango.now(),'',0
        if not hasattr(self,'_Dcache'): 
			self._Dcache = {}
        if emulation and commCode in self._Dcache: 
			return self._Dcache[commCode]
			
        wtime = 0.0; result = ""; rec = ""; lastrec = ""; div=4.; 
        before=time.time(); after=before+0.001
        
        while wtime<self.waitTime and not (not len(rec) \
			and len(lastrec.replace(commCode,'').replace('\r','').replace('\n',''))):
            
            #if self.trace and retries: 
            #    print('In readComm(%s)(%d) Waiting %fs for answer ...'
            #        %(commCode,retries,self.waitTime))
            retries += 1			
            
            #The wait condition aborts after waitTime or nothing read after something different from \r or \n.
            #Between reads a pause of TimeWait/10.0 is performed.
            #I've tried to make smaller the time that the thread spends waiting for an answer ... more attempts to improve has been inefficient do to imprecission of the time.sleep method
            
            last=before
            after=time.time()
            pause = self.waitTime/div - (after-before)
            fandango.wait(max(pause,0)) #time.sleep(max(pause,0))
            before=time.time();
            lastrec=lastrec+rec
            
            if self.getSerialClass() == 'PySerial':
                nchars = self.dp.read_attribute('InputBuffer').value
                rec = self.dp.command_inout("Read",nchars)
            else: #Class is 'Serial'
                #rec = self.dp.command_inout("DevSerReadRaw")
                rec = self.dp.command_inout("DevSerReadString",0)
                
            rec.rstrip().lstrip()
            
            lrclean = lastrec.replace(commCode,'').replace('\r','').replace('\n','')
            rrclean = rec.replace('\r','\\r').replace('\n','\\n')
            if self.trace and rrclean: 
                #self.debug
                print( 'received('+str(wtime)+';'+str(after-last)+';'
                    +str(len(lrclean))+';'+str(len(rec))+"): '" + rrclean+"'")
					
            result += rec
            wtime += self.waitTime/div

        self._Dcache[commCode] = result
        readtime = fandango.now()-t0
        self.maxreadtime = max((self.maxreadtime,readtime))
        if self.trace:
            print('ReadComm(%s) = %s done in %f seconds (max = %f, + %f)' % 
                (commCode,result.strip(),readtime,self.maxreadtime,fandango.now()-self.lasttime))

        return result
Exemplo n.º 3
0
    def checkScales(self):
        bounds = getTrendBounds(self.trend,True)
        
        if self.on_check_scales:
            return False
        
        try:
            self.on_check_scales = True
            ## Check to be done before triggering anything else
            diff = bounds[0]-self.last_bounds[0], bounds[1]-self.last_bounds[-1]
            diff = max(map(abs,diff))
            td = fn.now()-self.last_bounds[-1]
            r = max((30.,0.5*(bounds[1]-bounds[0])))
            ##This avoids re-entring calls into checkScales
            self.last_bounds = (bounds[0],bounds[1],fn.now())
            
            if self.trend.getXDynScale():
                if not getattr(self.trend,'_configDialog',None):
                    if (bounds[-1]<(time.time()-3600) 
                        or (bounds[-1]-bounds[0])>7200):
                        self.info('Disabling XDynScale when showing past data')
                        self.trend.setXDynScale(False) 

                if self.trend.isPaused(): # A paused trend will not load data
                    self.warning('resume plotting ...')
                    self.trend.setPaused(False)
                    self.trend._pauseAction.setChecked(False)

            self.debug('In checkScales(%s,%s,%s)'%(str(bounds),diff,r))
            self.checkBuffers()
            self.debug('Out of checkScales(%s,%s,%s)'%(str(bounds),diff,r))
        except:
            self.warning(traceback.format_exc())
        finally:
            self.on_check_scales = False
Exemplo n.º 4
0
    def checkBuffers(self,*args,**kwargs):
        self.warning('In CheckBuffers(%s)'%str(self.trend.trendSets.keys()))
        #self.trend.doReplot()
        t0 = fn.now()
        if t0 - self.last_check_buffers < 1.:
            return
        
        self.show_dialog(not self.dialog()._checked)
        
        for n,ts in self.trend.trendSets.iteritems():
            try:
                model = ts.getModel()
                if model in self.last_args: self.last_args[model][-1] = 0
                self.debug('%s buffer has %d values' % 
                    (model, len(getattr(ts,'_xBuffer',[]))))
                
                # HOOK ADDED FROM CLIENT SIDE, getArchivedTrendValues
                self.value_setter(ts,model,
                    **{'insert':True,'forced':kwargs.get('forced')})
                
                if not fn.tango.check_attribute(model,readable=True):
                    ## THIS CODE MUST BE HERE, NEEDED FOR DEAD ATTRIBUTES
                    self.warning('checkBuffers(%s): attribute forced ...' % model)
                    ts.forceReading()
            except: 
                self.warning(traceback.format_exc())

        self.trend.doReplot()
        self.last_check_buffers = fn.now()
        #d = self.last_check_buffers - t0
        #if d > 0.2:
            #self.warning('checkBuffers is too intensive (%f), disable dynscale'%d)
            #self.stopPlotting()
        self.warning('Out of CheckBuffers(%s)'%str(self.trend.trendSets.keys()))
Exemplo n.º 5
0
    def get_table_timestamp(self,
                            table,
                            method='max',
                            epoch=None,
                            ignore_errors=False):  #, tref = -180*86400):
        """
        method should be min() for first value and max() for last
        this query goes directly to table indexes
        this doesn't access values (but it is much faster)
        
        if table is an attribute name, only these attribute is checked
        
        ignore_errors=True, it will ignore dates out of 1970-NOW interval
        
        epoch=timestamp, gets last timestamp before epoch
        
        Returns a tuple containing:
            (the first/last value stored, in epoch and date format, 
                size of table, time needed)
        """
        t0, last, size = fn.now(), 0, 0
        #print('get_last_value_in_table(%s, %s)' % (self.self_name, table))

        if table in self.get_data_tables():
            ids = self.get_attributes_by_table(table, as_id=True)
        else:
            aid, atype, table = self.get_attr_id_type_table(table)
            ids = [aid]

        int_time = any('int_time' in v
                       for v in self.getTableIndex(table).values())
        # If using UNIX_TIMESTAMP THE INDEXING FAILS!!
        field = 'int_time' if int_time else 'data_time'
        q = 'select %s(%s) from %s ' % (method, field, table)
        size = self.getTableSize(table)
        r = []
        part = None  #self.get_last_partition(table)
        if part is not None and method == 'max':
            q += 'partition (%s)' % part

        for i in ids:
            qi = q + ' where att_conf_id=%d' % i
            #if tref and int_time: where += ('int_time <= %d'% (tref))
            r.extend(self.Query(qi))

        method = {'max': max, 'min': min}[method]
        r = [
            self.mysqlsecs2time(l[0]) if int_time else fn.date2time(l[0])
            for l in r if l[0] not in (None, 0)
        ]
        r = [l for l in r if l if (ignore_errors or 1e9 < l < fn.now())]

        if len(r):
            last = method(r) if len(r) else 0
            date = fn.time2str(last)
        else:
            self.debug('No values in %s' % table)
            last, date = None, ''

        return (last, date, size, fn.now() - t0)
Exemplo n.º 6
0
def decimate_db_table_by_time(db,table,att_id,tstart,tend,period=1,
        id_column="att_conf_id",time_column='data_time',min_to_delete=3,
        optimize = False):
    """
    This simplified method will remove all values in a table that are nearer than a given period
    It doesnt analyze values, it just gets the last value within the interval
    
    It is the most suitable for hdb++ and arrays
    
    Partition optimization and repair should be called afterwards
    
    https://dev.mysql.com/doc/refman/5.6/en/partitioning-maintenance.html
    
    ALTER TABLE t1 REBUILD PARTITION p0, p1;
    ALTER TABLE t1 OPTIMIZE PARTITION p0, p1;
    ALTER TABLE t1 REPAIR PARTITION p0,p1;
    """
    t0 = fn.now()
    s0 = db.getTableSize(table)
    if fn.isNumber(tstart):
        tstart,tend = fn.time2str(tstart),fn.time2str(tend)
    q = "select distinct CAST(UNIX_TIMESTAMP(%s) AS DOUBLE) from %s where %s = %s and %s between '%s' and '%s'" % (
        time_column, table, id_column, att_id, time_column, tstart, tend)
    partitions = get_partitions_from_query(db,q)
    print('Query: '+q)
    print('table size is %s, partitions affected: %s' % (s0, partitions))
    vals = db.Query(q)
    t1 = fn.now()
    print('query took %d seconds, %d rows returned' % ((t1-t0), len(vals)))
    if not vals: 
        return
    goods,p = [vals[0][0]],vals[0][0]
    for i,v in enumerate(vals):
        v = v[0]
        if v > period+goods[-1] and p!=goods[-1]:
            goods.append(p)
        p = v
        
    print(fn.now()-t1)
    print('%d rows to delete, %d to preserve' % (len(vals)-len(goods), len(goods))) 
    for i in range(len(goods)-1):
        s,e = goods[i],goods[i+1]
        s,e = fn.time2str(s,us=True),fn.time2str(e,us=True)
        dq = "delete from %s where %s = %s and %s > '%s' and %s < '%s'" % (
            table, id_column, att_id, time_column, s, time_column, e)
        if not i%1000: print(dq)
        db.Query(dq)
        
    t2 = fn.now()
    s1 = db.getTableSize(table)
    print('deleting %d rows took %d seconds' % (s0-s1, t2-t1))
    if optimize:# or (goods[-1] - goods[0]) > 86400*5:
        rq = 'alter table %s optimize partition %s' % (table,partitions)
        print(rq)
        db.Query(rq)
        print('Optimizing took %d seconds' % (fn.now()-t2))
        
    return s1-s0
Exemplo n.º 7
0
def save_schema_values(schema, filename=None):
    t0 = fn.now()
    print('Saving %s attribute values' % schema)
    filename = filename or '%s_values.pck' % schema
    api = pta.api(schema)
    attrs = api.keys() if hasattr(api, 'keys') else api.get_attributes()
    print('%d attributes in %s' % (len(attrs), schema))
    values = dict.fromkeys(filter(api.is_attribute_archived, attrs))
    print('%d attributes archived' % (len(values)))
    values.update((a, api.load_last_values(a)) for a in values.keys())
    pickle.dump(values, open(filename, 'w'))
    print('%s written, %d seconds ellapsed' % (filename, fn.now() - t0))
    print(os.system('ls -lah %s' % filename))
    def always_executed_hook(self):
        self.debug_stream("In always_excuted_hook()")
        #----- PROTECTED REGION ID(PanicViewDS.always_executed_hook) ENABLED START -----#
        now = fd.now()
        self.Update(force=False)
        n = len(self.attr_AlarmList_read)

        if not self.view.last_event_time:
            self.set_state(PyTango.DevState.INIT)
        elif now - self.view.last_event_time > 60.:
            self.set_state(PyTango.DevState.UNKNOWN)
        elif len(self.attr_DisabledAlarms_read) == n:
            self.set_state(PyTango.DevState.DISABLED)
        elif len(self.attr_FailedAlarms_read) == n:
            self.set_state(PyTango.DevState.FAULT)
        elif any((self.attr_ActiveAlarms_read, self.attr_FailedAlarms_read)):
            self.set_state(PyTango.DevState.ALARM)
        else:
            self.set_state(PyTango.DevState.ON)

        status = 'AlarmView(%s): %s alarms' % (self.Scope, n)
        status += '\nupdated at %s' % fd.time2str(self.view.last_event_time)
        status = '\nDescription: %s' % '\n'.join(self.Description)
        status += '\n\nActive Alarms:\n%s' % ('\n'.join(
            self.attr_ActiveAlarms_read))

        self.set_status(status)
Exemplo n.º 9
0
 def get_last_attribute_values(self,table,n=1,
                               check_table=False,epoch=None):
     if epoch is None:
         start,epoch = None,fn.now()+600
     elif epoch < 0:
         start,epoch = fn.now()+epoch,fn.now()+600
     if start is None:
         #Rounding to the last month partition
         start = fn.str2time(
             fn.time2str().split()[0].rsplit('-',1)[0]+'-01')
     vals = self.get_attribute_values(table, N=n, human=True, desc=True,
                                         start_date=start, stop_date=epoch)
     if len(vals):
         return vals[0] if abs(n)==1 else vals
     else: 
         return vals
Exemplo n.º 10
0
    def get_last_attribute_values(self,
                                  attribute,
                                  n=1,
                                  check_attribute=False,
                                  epoch=None,
                                  period=90 * 86400):
        """
        load_last_values provided to comply with Reader API
        get_last_attribute_values provided to comply with CommonAPI
        
        returns last n values (or just one if n=1)
        """
        vals = []
        try:
            attribute, name = self.is_attribute_archived(attribute), attribute
            if attribute:

                if epoch is None:
                    epoch = self.get_attr_timestamp(attribute, method='max')[0]
                    epoch = (int(epoch) + 3600) if epoch else 0
                if epoch < 0:
                    epoch = fn.now() + epoch

                start = (epoch or fn.now()) - abs(period)

                vals = self.get_attribute_values(attribute,
                                                 N=n,
                                                 human=True,
                                                 desc=True,
                                                 start_date=start,
                                                 stop_date=epoch)

                v = vals and vals[0]
                self.attributes[attribute].last_date = v and v[0]
                self.attributes[attribute].last_value = v and v[1]

                if len(vals):
                    vals = vals[0] if abs(n) == 1 else vals
            else:
                self.warning('%s is not archived' % name)
        except:
            self.error('get_last_attribute_values(%s) failed!' % attribute)
            self.error(traceback.format_exc())
        finally:
            return vals
Exemplo n.º 11
0
    def get_attribute_names(self, active=False, regexp=''):
        t0 = fn.now()
        if not active:
            attributes = [
                a[0].lower()
                for a in self.Query('select att_name from att_conf')
            ]
            [
                self.get_attr_id_type_table(a) for a in attributes
                if a not in self.attributes
            ]
            r = self.attributes.keys()
        else:
            r = self.get_archived_attributes()

        r = sorted(fn.filtersmart(r, regexp) if regexp else r)
        self.debug('get attribute names took %d ms' % (1e3 * (fn.now() - t0)))
        return r
Exemplo n.º 12
0
 def get_failed_attributes(self,t=7200):
     vals = self.load_last_values(self.get_attributes())
     nones = [k for k,v in vals.items() 
                 if (not v or v[1] is None)]
     nones = [k for k in nones if fn.read_attribute(k) is not None]
     lost = [k for k,v in vals.items() 
             if k not in nones and v[0] < fn.now()-t]
     lost = [k for k in lost if fn.read_attribute(k) is not None]
     failed = nones+lost
     return sorted(failed)    
Exemplo n.º 13
0
 def get_failed_attributes(self, t=7200):
     vals = self.load_last_values(self.get_attributes())
     nones = [k for k, v in vals.items() if (not v or v[1] is None)]
     nones = [k for k in nones if fn.read_attribute(k) is not None]
     lost = [
         k for k, v in vals.items()
         if k not in nones and v[0] < fn.now() - t
     ]
     lost = [k for k in lost if fn.read_attribute(k) is not None]
     failed = nones + lost
     return sorted(failed)
Exemplo n.º 14
0
 def get_attributes_failed(self, regexp='*', timeout=3600, from_db=True):
     if from_db:
         timeout = fn.now() - timeout
         attrs = self.get_attributes(True)
         attrs = fn.filtersmart(attrs, regexp)
         print('get_attributes_failed([%d])' % len(attrs))
         print(attrs)
         vals = self.load_last_values(attrs)
         return sorted(t for t in vals if not t[1] or t[1][0] < timeout)
     else:
         # Should inspect the Subscribers Error Lists
         raise Exception('NotImplemented')
Exemplo n.º 15
0
def save_schema_values(schema, filename='', folder=''):
    """
    This method saves all last values from a given schema into a file
    it can be called from crontab to generate daily reports
    """
    t0 = fn.now()
    print('Saving %s attribute values' % schema)
    date = fn.time2str().split()[0].replace('-','')
    filename = filename or '%s_%s_values.pck' % (schema,date)
    if folder: 
        filename = '/'.join((folder,filename))

    api = pta.api(schema)
    attrs = api.keys() if hasattr(api,'keys') else api.get_attributes()
    print('%d attributes in %s' % (len(attrs),schema))
    values = dict.fromkeys(filter(api.is_attribute_archived,attrs))
    print('%d attributes archived' % (len(values)))
    values.update((a,api.load_last_values(a)) for a in values.keys())
    pickle.dump(values,open(filename,'w'))

    print('%s written, %d seconds ellapsed' % (filename,fn.now()-t0))
    print(os.system('ls -lah %s' % filename))
Exemplo n.º 16
0
    def checkBuffers(self, *args, **kwargs):
        self.warning('In CheckBuffers(%s)' % str(self.trend.trendSets.keys()))
        #self.trend.doReplot()
        t0 = fn.now()
        if t0 - self.last_check_buffers < 1.:
            return

        self.show_dialog(not self.dialog()._checked)

        for n, ts in self.trend.trendSets.iteritems():
            try:
                model = ts.getModel()
                if model in self.last_args: self.last_args[model][-1] = 0
                self.debug('%s buffer has %d values' %
                           (model, len(getattr(ts, '_xBuffer', []))))

                # HOOK ADDED FROM CLIENT SIDE, getArchivedTrendValues
                self.value_setter(
                    ts, model, **{
                        'insert': True,
                        'forced': kwargs.get('forced')
                    })

                if not fn.tango.check_attribute(model, readable=True):
                    ## THIS CODE MUST BE HERE, NEEDED FOR DEAD ATTRIBUTES
                    self.warning('checkBuffers(%s): attribute forced ...' %
                                 model)
                    ts.forceReading()
            except:
                self.warning(traceback.format_exc())

        self.trend.doReplot()
        self.last_check_buffers = fn.now()
        #d = self.last_check_buffers - t0
        #if d > 0.2:
        #self.warning('checkBuffers is too intensive (%f), disable dynscale'%d)
        #self.stopPlotting()
        self.warning('Out of CheckBuffers(%s)' %
                     str(self.trend.trendSets.keys()))
Exemplo n.º 17
0
def mysqldump_by_date(schema, user, passwd, folder, start, stop,
                      compress = True, delete = True):
    """
    This method creates a backup between selected dates for each table 
    of the selected database.
    
    All dump files are exported to the same folder, and a compressed file
    is created at the end.
    
    Deleting of temporary files created (folder/*dmp) must be done manually.
    """
    print('mysqldump_by_date(%s,,,folder=%s,%s,%s,compress=%s,delete=%s)'
          % (schema, folder, start, stop, compress, delete))
    db = FriendlyDB(schema,user=user,passwd=passwd)
    t,e = start,stop
    print(t,e)
    start = start if fn.isString(start) else fn.time2str(start)
    stop = stop if fn.isString(stop) else fn.time2str(stop)
    tables = db.getTables()

    print('mysqldump_by_date(%s): %d tables to backup between %s and %s' 
          % (schema,len(tables),start,stop))

    if not os.path.isdir(folder):
        print('mkdir %s' % folder)
        os.mkdir(folder)
        
    for t in tables:
        filename = ('%s/%s-%s-%s-%s.dmp' 
            % (folder,schema,t,start.split()[0],stop.split()[0]))
        cols = db.getTableCols(t)
        col = [c for c in ('time','data_time') if c in cols] 
        if col:
            where = " %s >= '%s' and %s < '%s' " % (col[0],start,col[0],stop)
        else:
            where = ""
        mysqldump(schema,user,passwd,filename,t,where)
        
    ext = ('part.' if fn.str2time(stop) > fn.now() else '') + 'tgz'
    if compress:
        filename = ('%s/%s-%s-%s.%s' 
            % (folder,schema,start.split()[0],stop.split()[0],ext))
        cmd = 'tar zcvf %s %s/*.dmp' % (filename,folder)
        print(cmd)
        fn.linos.shell_command(cmd)
    if compress and delete:
        cmd = 'rm -rf %s/*.dmp' % folder
        print(cmd)
        fn.linos.shell_command(cmd)
    return filename
Exemplo n.º 18
0
    def checkScales(self):
        bounds = getTrendBounds(self.trend, True)

        if self.on_check_scales:
            return False

        try:
            self.on_check_scales = True
            ## Check to be done before triggering anything else
            diff = bounds[0] - self.last_bounds[0], bounds[
                1] - self.last_bounds[-1]
            diff = max(map(abs, diff))
            td = fn.now() - self.last_bounds[-1]
            r = max((30., 0.5 * (bounds[1] - bounds[0])))
            ##This avoids re-entring calls into checkScales
            self.last_bounds = (bounds[0], bounds[1], fn.now())

            if self.trend.getXDynScale():
                if not getattr(self.trend, '_configDialog', None):
                    if (bounds[-1] < (time.time() - 3600)
                            or (bounds[-1] - bounds[0]) > 7200):
                        self.info('Disabling XDynScale when showing past data')
                        self.trend.setXDynScale(False)

                if self.trend.isPaused():  # A paused trend will not load data
                    self.warning('resume plotting ...')
                    self.trend.setPaused(False)
                    self.trend._pauseAction.setChecked(False)

            self.debug('In checkScales(%s,%s,%s)' % (str(bounds), diff, r))
            self.checkBuffers()
            self.debug('Out of checkScales(%s,%s,%s)' % (str(bounds), diff, r))
        except:
            self.warning(traceback.format_exc())
        finally:
            self.on_check_scales = False
Exemplo n.º 19
0
    def get_att_conf_table(self):
        t0 = fn.now()
        #types = self.Query('select att_conf_data_type_id, data_type from att_conf_data_type')
        #types = dict(types)
        q = "select att_name,att_conf_id,att_conf.att_conf_data_type_id,data_type "
        q += " from att_conf, att_conf_data_type where "
        q += "att_conf.att_conf_data_type_id = att_conf_data_type.att_conf_data_type_id"
        ids = self.Query(q)
        #self.debug(str((q, ids)))
        #ids = [list(t)+[types[t[-1]]] for t in ids]
        for i in ids:
            attr, aid, tid, table = i
            self.attributes[attr] = fn.Struct()
            self.attributes[attr].id = aid
            self.attributes[attr].tid = tid
            self.attributes[attr].type = table
            self.attributes[attr].table = 'att_' + table
            self.attributes[attr].modes = {'MODE_E': True}

        return ids
Exemplo n.º 20
0
    def check_attributes(self, attrs='', load=False, t0=0):

        db, t0, result, vals = self, t0 or fn.now(), {}, {}
        print('Checking %s' % str(db))

        if fn.isDictionary(attrs):
            attrs, vals = attrs.keys(), attrs
            if isinstance(vals.values()[0], dict):
                vals = dict((k, v.values()[0]) for k, v in vals.items())
        else:
            if fn.isString(attrs):
                attrs = fn.filtersmart(db.get_attributes(), attrs)
                load = True

        if load:
            [vals.update(db.load_last_values(a)) for a in attrs]

        print('\t%d attributes' % len(attrs))
        result['attrs'] = attrs
        result['vals'] = vals
        result['novals'] = [a for a, v in vals.items() if not v]
        result['nones'], result['down'], result['lost'] = [], [], []
        for a, v in vals.items():
            if not v or [1] is None:
                if not fn.read_attribute(a):  #USE read not check!!
                    result['down'].append(a)
                else:
                    result['novals' if not v else 'nones'].append(a)
            elif v[0] < (t0 - 7200):
                result['lost'].append(a)

        print('\t%d attributes have no values' % len(result['novals']))
        print('\t%d attributes are not readable' % len(result['down']))
        print('\t%d attributes are not updated' % len(result['lost']))
        print('\t%d attributes have None values' % len(result['nones']))

        return result
Exemplo n.º 21
0
    def check_attributes(self,attrs = '', load = False, t0 = 0):
        
        db,t0,result,vals = self,t0 or fn.now(),{},{}
        print('Checking %s' % str(db))

        if fn.isDictionary(attrs):
            attrs,vals = attrs.keys(),attrs
            if isinstance(vals.values()[0],dict):
                vals = dict((k,v.values()[0]) for k,v in vals.items())
        else:
            if fn.isString(attrs):
                attrs = fn.filtersmart(db.get_attributes(),attrs)
                load = True

        if load:
            [vals.update(db.load_last_values(a)) for a in attrs]

        print('\t%d attributes'%len(attrs))
        result['attrs'] = attrs
        result['vals'] = vals
        result['novals'] = [a for a,v in vals.items() if not v]
        result['nones'],result['down'],result['lost'] = [],[],[]
        for a,v in vals.items():
            if not v or [1] is None:
                if not fn.read_attribute(a): #USE read not check!!
                    result['down'].append(a)
                else:
                    result['novals' if not v else 'nones'].append(a)
            elif v[0] < (t0 - 7200):
                result['lost'].append(a)
        
        print('\t%d attributes have no values'%len(result['novals']))
        print('\t%d attributes are not readable'%len(result['down']))
        print('\t%d attributes are not updated'%len(result['lost']))
        print('\t%d attributes have None values'%len(result['nones']))
        
        return result    
Exemplo n.º 22
0
 def check_attribute_ok(self, a, v, t=0):
     """
     arguments are attribute name and last value from db, plus ref. time
     """
     r = check_attribute_value(a)
     rv = getattr(r, 'value', None)
     if isinstance(rv, (type(None), Exception)):
         # Attribute not readable
         self.attr_nok.append(a)
     elif self.is_hpp and not check_attribute_events(a):
         self.attr_nevs.append(a)
     else:
         if v is None or fn.isSequence(v) and not len(v):
             # Attribute has no values in DB
             self.attr_lost.append(a)
         else:
             # Time is compared against last update, current or read time
             t = min((t or fn.now(), fn.ctime2time(r.time)))
             v = self.get_last_value(a, v)
             try:
                 diff = v[1] != rv
             except:
                 diff = 1
             if v[0] < t - 3600:
                 if any(diff) if fn.isSequence(diff) else bool(diff):
                     # Last value much older than current data
                     self.attr_lost.append(a)
                 else:
                     self.attr_stall.append(a)
                     self.attr_ok.append(a)
             elif v[1] is None:
                 # Value is readable but not from DB
                 self.attr_err.append(a)
             else:
                 self.attr_ok.append(a)
     return
Exemplo n.º 23
0
 def check_attribute_ok(self,a,v,t=0):
     """
     arguments are attribute name and last value from db, plus ref. time
     """
     r = check_attribute_value(a)
     rv = getattr(r,'value',None)
     if isinstance(rv,(type(None),Exception)):
         # Attribute not readable
         self.attr_nok.append(a)
     elif self.is_hpp and not check_attribute_events(a):
         self.attr_nevs.append(a)
     else:
         if v is None or fn.isSequence(v) and not len(v):
             # Attribute has no values in DB
             self.attr_lost.append(a)
         else:
             # Time is compared against last update, current or read time
             t = min((t or fn.now(),fn.ctime2time(r.time)))
             v = self.get_last_value(a,v)
             try: 
                 diff = v[1]!=rv
             except: 
                 diff = 1
             if v[0] < t-3600:
                 if any(diff) if fn.isSequence(diff) else bool(diff):
                     # Last value much older than current data
                     self.attr_lost.append(a)
                 else:
                     self.attr_stall.append(a)
                     self.attr_ok.append(a)
             elif v[1] is None:
                 # Value is readable but not from DB
                 self.attr_err.append(a)
             else:
                 self.attr_ok.append(a)
     return
    def Update(self, force=True):
        now = fd.now()
        if not force and (now - self.last_active_alarms_check < self.Refresh):
            return
        try:
            self.last_active_alarms_check = now
            self.attr_ActiveAlarms_read = []
            self.attr_DisabledAlarms_read = []
            self.attr_FailedAlarms_read = []
            al = self.attr_Summary_read = self.view.sort(as_text=True)
            self.attr_AlarmList_read = list(
                a.to_str(VIEW_FIELDS) for a in reversed(self.view.ordered))

            for i, a in enumerate(reversed(self.view.ordered)):
                if a.disabled:
                    self.attr_DisabledAlarms_read.insert(0, al[i])
                elif a.get_state() == 'ERROR':
                    self.attr_FailedAlarms_read.insert(0, al[i])
                elif a.active:
                    self.attr_ActiveAlarms_read.insert(0, al[i])
        except:
            err = tb.format_exc()
            self.error_stream(err)
            self.attr_ActiveAlarms_read = err.split('\n')
Exemplo n.º 25
0
def transfer_table(db,
                   db2,
                   table,
                   bunch=16 * 16 * 1024,
                   is_str=False,
                   per_value=60,
                   min_tdelta=0.2,
                   ids=[]):

    t0 = fn.now()
    tq = 0
    cols = db.getTableCols(table)

    has_int = 'int_time' in cols

    cols = sorted(c for c in cols
                  if c not in ('recv_time', 'insert_time', 'int_time'))
    it, iv, ii = (cols.index('data_time'), cols.index('value_r'),
                  cols.index('att_conf_id'))
    ix = cols.index('idx') if 'idx' in cols else None

    is_float = 'double' in table or 'float' in table

    #if is_array:
    #print("%s: THIS METHOD IS NO SUITABLE YET FOR ARRAYS!" % table)
    ## dim_x/dim_y dim_x_r/dim_y_r columns should be taken into account
    ## when array should be stored?  only when value changes, or on time/fixed basis?
    #return

    lasts = dict()

    qcols = (','.join(cols)).replace(
        'data_time', 'CAST(UNIX_TIMESTAMP(data_time) AS DOUBLE)')
    query = 'select %s from %s' % (qcols, table)
    if has_int:
        where = " where int_time >= %d and int_time < %d "
    else:
        where = " where data_time >= '%s'"
        where += " and data_time < '%s'"

    order = ' order by data_time'
    if has_int:
        #order = ' order by int_time' #It may put NULL/error values FIRST!!
        if min_tdelta > 1:
            order = ' group by int_time DIV %d' % int(min_tdelta) + order
    else:
        if min_tdelta > 1:
            order = ' group by data_time DIV %d' % int(min_tdelta) + order

    limit = ' limit %s' % bunch

    print('inserting data ...')

    count, done, changed, periodic = 0, 0, 0, 0
    attr_ids = get_table_attr_ids(db, table)
    for aii, ai in enumerate(attr_ids):

        if ids and ai not in ids:
            continue

        print('attr: %s (%s/%s)' % (ai, aii, len(attr_ids)))

        print('getting limits ...')
        last = db2.Query('select UNIX_TIMESTAMP(data_time) from %s '
                         ' where att_conf_id = %d order by '
                         'att_conf_id, data_time desc limit 1' % (table, ai))
        last = last and last[0][0] or 0
        if not last:
            last = db.Query(
                'select CAST(UNIX_TIMESTAMP(data_time) AS DOUBLE) from %s '
                ' where att_conf_id = %d '
                'order by att_conf_id,data_time limit 1' % (table, ai))
            last = last and last[0][0] or 0
        last = fn.time2str(last)

        print(last)
        end = db.Query(
            'select CAST(UNIX_TIMESTAMP(data_time) AS DOUBLE) from %s '
            ' where att_conf_id = %d '
            'order by att_conf_id,data_time desc limit 1' % (table, ai))
        end = end and end[0][0] or fn.now()
        if end > fn.now(): end = fn.now()
        end = fn.time2str(end, us=True)
        print(end)

        #return
        while True:
            print('attr: %s (%s/%s)' % (ai, aii, len(attr_ids)))
            values = ''
            #.split('.')[0]
            prev = last
            print('last: %s' % last)
            nxt = fn.time2str(fn.str2time(last) + 4 * 86400)

            if fn.str2time(last) >= fn.now() or fn.str2time(nxt) >= fn.now():
                break
            if fn.str2time(last) + 60 >= fn.str2time(end):
                break
            if has_int:
                qr = query + (where %
                              (int(str2time(last)), int(str2time(nxt))))
            else:
                qr = query + (where % (last, nxt))

            qr += ' and att_conf_id = %s' % ai
            qr += order + limit
            print(qr)
            tq = fn.now()
            cursor = db.Query(qr, export=False)
            print(fn.now() - tq)
            v = cursor.fetchone()
            if v is None:
                last = nxt
            else:
                last = fn.time2str(v[it], us=True)

            if fn.str2time(last) + 60 >= fn.str2time(end):
                break  #It must be checked before and after querying
            if v is None:
                continue

            curr = 0
            for _i in range(bunch):
                #print(_i,bunch)
                curr += 1
                count += 1
                i, t, w = v[ii], v[it], v[iv]
                x = v[ix] if ix is not None else None

                last = fn.time2str(t, us=True)
                if i not in lasts:
                    diff = True
                elif t < lasts[i][0] + min_tdelta:
                    diff = False
                else:
                    diff = (w != lasts[i][1])
                    if is_float:
                        if w and None not in (w, lasts[i][1]):
                            diff = diff and abs((w - lasts[i][1]) / w) > 1e-12

                if ix is None and diff:
                    # changed scalar value
                    lasts[i] = (t, w)
                    v = map(str, v)
                    v[2] = repr(last)
                    if values:
                        values += ','
                    values += '(%s)' % ','.join(v)
                    changed += 1
                    done += 1
                    v = cursor.fetchone()
                    if v is None:
                        break

                elif ix is None and (t - lasts[i][0]) >= per_value:
                    # periodic scalar value
                    lasts[i] = (t, w)
                    v = map(str, v)
                    v[2] = repr(last)
                    if values:
                        values += ','
                    values += '(%s)' % ','.join(v)
                    periodic += 1
                    done += 1
                    v = cursor.fetchone()
                    if v is None:
                        break

                elif ix is not None and ((i, x) not in lasts or
                                         (t - lasts[(i, x)][0]) >= per_value):
                    # periodic array value
                    lasts[(i, x)] = (t, w)
                    v = map(str, v)
                    v[2] = repr(last)
                    if values:
                        values += ','
                    values += '(%s)' % ','.join(v)
                    done += 1
                    v = cursor.fetchone()
                    if v is None:
                        break

                else:
                    v = cursor.fetchone()
                    if v is None:
                        break

            if values:
                values = values.replace('None', 'NULL')
                insert = "insert into %s (%s) VALUES %s" % (
                    table, ','.join(cols), values)
                print(insert[:80], insert[-80:])
                db2.Query(insert)
            #else:
            #print('NO VALUES TO INSERT')
            #break

            print(curr, changed, periodic, done, count)
            #print(last,nxt,end)
            if last == prev:
                last = nxt
            if fn.str2time(last) >= fn.now():
                break

    print('%d/%d values inserted in %d seconds' % (done, count, fn.now() - t0))
def transfer_table(db, db2, table, bunch = 16*16*1024, is_str = False,
                   per_value = 60, min_tdelta = 0.2, ids = []):
    
    t0 = fn.now()       
    tq = 0
    cols = db.getTableCols(table)
    
    has_int = 'int_time' in cols
    
    cols = sorted(c for c in cols 
                  if c not in ('recv_time','insert_time','int_time'))
    it, iv, ii = (cols.index('data_time'), cols.index('value_r'), 
        cols.index('att_conf_id'))
    ix = cols.index('idx') if 'idx' in cols else None
    
    is_float = 'double' in table or 'float' in table
    
    #if is_array:
        #print("%s: THIS METHOD IS NO SUITABLE YET FOR ARRAYS!" % table)
        ## dim_x/dim_y dim_x_r/dim_y_r columns should be taken into account
        ## when array should be stored?  only when value changes, or on time/fixed basis?
        #return
    
    lasts = dict()
    
    qcols = (','.join(cols)).replace('data_time',
        'CAST(UNIX_TIMESTAMP(data_time) AS DOUBLE)')
    query = 'select %s from %s' % (qcols, table)
    if has_int:
        where = " where int_time >= %d and int_time < %d "
    else:
        where = " where data_time >= '%s'"
        where += " and data_time < '%s'"
    
    order = ' order by data_time'
    if has_int:
        #order = ' order by int_time' #It may put NULL/error values FIRST!!
        if min_tdelta > 1:
            order = ' group by int_time DIV %d'%int(min_tdelta) + order
    else:
        if min_tdelta > 1:
            order = ' group by data_time DIV %d'%int(min_tdelta) + order
        
    limit = ' limit %s' % bunch
    
    print('inserting data ...')
    
    count,done,changed,periodic = 0,0,0,0
    attr_ids = get_table_attr_ids(db, table)
    for aii,ai in enumerate(attr_ids):

        if ids and ai not in ids:
            continue
        
        print('attr: %s (%s/%s)' % (ai,aii,len(attr_ids)))
        
        print('getting limits ...')
        last = db2.Query('select UNIX_TIMESTAMP(data_time) from %s '
            ' where att_conf_id = %d order by '
            'att_conf_id, data_time desc limit 1' % (table,ai))
        last = last and last[0][0] or 0
        if not last:
            last = db.Query('select CAST(UNIX_TIMESTAMP(data_time) AS DOUBLE) from %s '
                ' where att_conf_id = %d '
                'order by att_conf_id,data_time limit 1' % (table,ai))
            last = last and last[0][0] or 0
        last = fn.time2str(last)
        
        print(last)
        end = db.Query('select CAST(UNIX_TIMESTAMP(data_time) AS DOUBLE) from %s '
            ' where att_conf_id = %d '
            'order by att_conf_id,data_time desc limit 1' % (table,ai))
        end = end and end[0][0] or fn.now()
        if end > fn.now(): end = fn.now()
        end = fn.time2str(end, us = True)         
        print(end)
        
        #return
        while True:        
            print('attr: %s (%s/%s)' % (ai,aii,len(attr_ids)))
            values = ''
            #.split('.')[0]
            prev = last
            print('last: %s' % last)
            nxt = fn.time2str(fn.str2time(last)+4*86400)
            
            if fn.str2time(last) >= fn.now() or fn.str2time(nxt) >= fn.now():
                break            
            if fn.str2time(last)+60 >= fn.str2time(end):
                break            
            if has_int:
                qr = query+(where%(int(str2time(last)),int(str2time(nxt))))
            else:
                qr = query+(where%(last,nxt))
                
            qr += ' and att_conf_id = %s' % ai
            qr += order+limit
            print(qr)
            tq = fn.now()
            cursor = db.Query(qr, export=False)
            print(fn.now()-tq)
            v = cursor.fetchone()
            if v is None:
                last = nxt
            else:
                last = fn.time2str(v[it],us=True)
                
            if fn.str2time(last)+60 >= fn.str2time(end):
                break #It must be checked before and after querying
            if v is None:
                continue
            
            curr = 0
            for _i in range(bunch):
                #print(_i,bunch)
                curr += 1
                count += 1
                i,t,w = v[ii], v[it], v[iv]
                x = v[ix] if ix is not None else None

                last = fn.time2str(t,us=True)
                if i not in lasts:
                    diff = True
                elif t < lasts[i][0]+min_tdelta:
                    diff = False
                else:
                    diff = (w != lasts[i][1])
                    if is_float:
                        if w and None not in (w,lasts[i][1]):
                            diff = diff and abs((w-lasts[i][1])/w)>1e-12
                            
                if ix is None and diff:
                    # changed scalar value
                    lasts[i] = (t,w)
                    v = map(str,v)
                    v[2] = repr(last)
                    if values:
                        values += ','
                    values += '(%s)' % ','.join(v)
                    changed += 1
                    done += 1
                    v = cursor.fetchone()
                    if v is None:
                        break
                    
                elif ix is None and (t-lasts[i][0]) >= per_value:
                    # periodic scalar value
                    lasts[i] = (t,w)
                    v = map(str,v)
                    v[2] = repr(last)
                    if values:
                        values += ','
                    values += '(%s)' % ','.join(v)
                    periodic += 1
                    done += 1                    
                    v = cursor.fetchone()
                    if v is None:
                        break
                    
                elif ix is not None and ((i,x) not in lasts 
                        or (t-lasts[(i,x)][0]) >= per_value):
                    # periodic array value
                    lasts[(i,x)] = (t,w)
                    v = map(str,v)
                    v[2] = repr(last)
                    if values:
                        values += ','
                    values += '(%s)' % ','.join(v)
                    done += 1
                    v = cursor.fetchone()
                    if v is None:
                        break
                    
                else:
                    v = cursor.fetchone()
                    if v is None:
                        break

            if values:        
                values = values.replace('None','NULL')
                insert = "insert into %s (%s) VALUES %s" % (
                    table, ','.join(cols), values)
                print(insert[:80],insert[-80:])
                db2.Query(insert)
            #else:
                #print('NO VALUES TO INSERT')
                #break
                
            print(curr,changed,periodic,done,count)
            #print(last,nxt,end)
            if last == prev:
                last = nxt
            if fn.str2time(last) >= fn.now():
                break
    
    print('%d/%d values inserted in %d seconds'  % (done,count,fn.now()-t0))
Exemplo n.º 27
0
    def get_attributes_errors(self, regexp='*', timeout=3*3600, 
                              from_db=False, extend = False):
        """
        Returns a dictionary {attribute, error/last value}
        
        If from_db=True and extend=True, it performs a full attribute check
        """
        if regexp == '*':
            self.status = fn.defaultdict(list)
        if from_db or extend:
            timeout = fn.now()-timeout
            attrs = self.get_attributes(True)
            attrs = fn.filtersmart(attrs,regexp)
            print('get_attributes_errors([%d/%d])' 
                  % (len(attrs),len(self.attributes)))
            vals = self.load_last_values(attrs)
            for a,v in vals.items():
                if v and v[0] > timeout:
                    self.status['Updated'].append(a)
                    if v[1] is not None:
                        self.status['Readable'].append(a)
                    else:
                        rv = fn.read_attribute(a)
                        if rv is not None:
                            self.status['WrongNone'].append(a)
                        else:
                            self.status['None'].append(a)
                    vals.pop(a)

            if not extend:
                self.status['NotUpdated'] = vals.keys()
            else:
                for a,v in vals.items():
                    c = fn.check_attribute(a)
                    if c is None:
                        vals[a] = 'Unreadable'
                        self.status['Unreadable'].append(a)
                    elif isinstance(c,Exception):
                        vals[a] = str(c)
                        self.status['Exception'].append(a)
                    else:
                        ev = fn.tango.check_attribute_events(a)
                        if not ev:
                            vals[a] = 'NoEvents'
                            self.status['NoEvents'].append(a)
                        else:
                            d = self.get_attribute_archiver(a)
                            e = self.get_archiver_errors(d)
                            if a in e:
                                vals[a] = e[a]
                                self.status['ArchiverError'].append(a)
                            else:
                                rv = fn.read_attribute(a)
                                if v and str(rv) == str(v[1]):
                                    vals[a] = 'NotChanged'
                                    self.status['NotChanged'].append(a)
                                else:
                                    self.status['NotUpdated'].append(a)
                                
            if regexp == '*':
                for k,v in self.status.items():
                    print('%s: %s' % (k,len(v)))
            
            return vals
        else:
            # Should inspect the Subscribers Error Lists
            vals = dict()
            for d in self.get_archivers():
                err = self.get_archiver_errors(d)
                for a,e in err.items():
                    if fn.clmatch(regexp,a):
                        vals[a] = e
            return vals    
Exemplo n.º 28
0
def check_archiving_schema(
        schema='hdb',
        attributes=[],values={},
        ti = None,
        period = 7200,
        old_period=24*3600*90,\
        exclude=['*/waveid','*/wavename','*/elotech-*'],
        use_index = True,
        loads = True,
        action=False,
        trace=True,
        export=None):

    ti = fn.now() if ti is None else str2time(ti) if isString(ti) else ti

    api = pta.api(schema)
    is_hpp = isinstance(api, pta.HDBpp)
    check = dict()
    old_period = 24*3600*old_period if old_period < 1000 \
        else (24*old_period if old_period<3600 else old_period)

    allattrs = api.get_attributes() if hasattr(
        api, 'get_attributes') else api.keys()
    print('%s contains %d attributes' % (schema, len(allattrs)))

    if attributes:
        if fn.isString(attributes) and fn.isRegexp(attributes):
            tattrs = [a for a in allattrs if clsearch(attributes, a)]
        else:
            attributes = map(fn.tango.get_normal_name, fn.toList(attributes))
            tattrs = [
                a for a in allattrs if fn.tango.get_normal_name(a) in allattrs
            ]

    else:
        tattrs = allattrs

    excluded = [a for a in tattrs if any(fn.clmatch(e, a) for e in exclude)]
    tattrs = [a for a in tattrs if a not in excluded]

    print('%d attributes to check' % len(tattrs))
    if not len(tattrs):
        return

    if excluded:
        print('\t%d attributes excluded' % len(excluded))

    archived = {}
    for a in tattrs:
        if hasattr(api, 'get_attribute_archiver'):
            arch = api.get_attribute_archiver(a)
        else:
            arch = api[a].archiver
        if arch:
            archived[a] = arch

    print('\t%d attributes are archived' % len(archived))

    #Getting Tango devices currently not running
    alldevs = set(t.rsplit('/', 1)[0] for t in tattrs)
    #tdevs = filter(fn.check_device,alldevs)
    #nodevs = [fn.tango.get_normal_name(d) for d in alldevs if d not in tdevs]
    #if nodevs:
    #print('\t%d devices are not running' % len(nodevs))

    archs = sorted(set(archived.values()))
    if loads:
        astor = fn.Astor()
        astor.load_from_devs_list(archs)
        loads = fn.defaultdict(list)
        for k, s in astor.items():
            for d in s.get_device_list():
                d = fn.tango.get_normal_name(d)
                for a in archived:
                    if fn.tango.get_normal_name(archived[a]) == d:
                        loads[k].append(a)
        for k, s in sorted(loads.items()):
            print('\t%s archives %d attributes' % (k, len(s)))

    noarchs = [
        fn.tango.get_normal_name(d) for d in archs if not fn.check_device(d)
    ]
    if noarchs:
        print('\t%d archivers are not running: %s' % (len(noarchs), noarchs))

    ###########################################################################

    if isString(values) and values.endswith('.pck'):
        print('\nLoading last values from %s file\n' % values)
        import pickle
        values = pickle.load(open(values))

    elif isString(values) and values.endswith('.json'):
        print('\nLoading last values from %s file\n' % values)
        values = fn.json2dict(values)

    elif not use_index or is_hpp:
        print('\nGetting last values ...\n')
        for a in tattrs:
            values[a] = api.load_last_values(a)

    else:
        print('\nGetting updated tables from database ...\n')
        tups = pta.utils.get_table_updates(schema)
        # Some tables do not update MySQL index tables
        t0 = [a for a in tarch if a in tattrs and not tups[api[a].table]]
        check.update((t, check_attribute(a, readable=True)) for t in t0
                     if not check.get(t))
        t0 = [t for t in t0 if check[t]]
        print('%d/%d archived attributes have indexes not updated ...' %
              (len(t0), len(tarch)))
        if t0 and len(t0) < 100:
            vs = api.load_last_values(t0)
            tups.update((api[t].table, api[t].last_date) for t in t0)

        for a in tattrs:
            if a in tups:
                values[a] = [tups[api[a].table], 0]

    for k, v in values.items():
        if (len(v) if isSequence(v) else v):
            if isinstance(v, dict):
                v = v.values()[0]
            if isSequence(v) and len(v) == 1:
                v = v[0]
            if v and not isNumber(v[0]):
                v = [date2time(v[0]), v[1]]
            values[k] = v
        else:
            values[k] = [] if isSequence(v) else None

    print('%d values obtained' % len(values))

    ###########################################################################

    now = fn.now()
    result = fn.Struct()
    times = [t[0] for t in values.values() if t]
    futures = [t for t in times if t > now]
    times = [t for t in times if t < now]
    tmiss = []
    tfutures = [k for k, v in values.items() if v and v[0] in futures]
    tmin, tmax = min(times), max(times)
    print('\toldest update was %s' % time2str(tmin))
    print('\tnewest update was %s' % time2str(tmax))
    if futures:
        print('\t%d attributes have values in the future!' % len(futures))

    tnovals = [a for a in archived if not values.get(a, None)]
    if tnovals:
        print('\t%d archived attributes have no values' % len(tnovals))
    try:
        tmiss = [
            a for a, v in values.items()
            if v and old_period < v[0] < ti - period and a not in archived
        ]
    except:
        print(values.items()[0])
    if tmiss:
        print('\t%d/%d attrs with values are not archived anymore' %
              (len(tmiss), len(tattrs)))

    result.Excluded = excluded
    result.Schema = schema
    result.All = tattrs
    result.Archived = values

    result.NoValues = tnovals
    result.MissingOrRemoved = tmiss

    result.TMin = tmin
    result.TMax = tmax
    result.Futures = tfutures

    tup = sorted(a for a in values if values[a] and values[a][0] > ti - period)
    tok = [a for a in tup if values[a][1] not in (None, [])]
    print('\n%d/%d archived attributes are updated since %s - %s' %
          (len(tup), len(archived), ti, period))
    print('%d archived attributes are fully ok\n' % (len(tok)))

    tnotup = sorted(a for a in values
                    if values[a] and values[a][0] < ti - period)
    print('\t%d archived attrs are not updated' % len(tnotup))
    tupnoread = [
        a for a in tup if not values[a][1] and fn.read_attribute(a) is None
    ]

    reads = dict((a, fn.read_attribute(a)) for a in tnotup)
    tnotupread = [a for a in tnotup if reads[a] is not None]
    print('\t%d not updated attrs are readable (Lost)' % len(tnotupread))
    print('\t%d of them are not floats' %
          len([t for t in tnotupread if not isinstance(reads[t], float)]))
    print('\t%d of them are states' %
          len([t for t in tnotupread if t.lower().endswith('/state')]))
    print('\t%d of them seem motors' %
          len([t for t in tnotupread if t.lower().endswith('/position')]))

    tnotupevs = [a for a in tnotupread if fn.tango.check_attribute_events(a)]
    print('\t%d not updated attrs are readable and have events (LostEvents)' %
          len(tnotupevs))

    tnotupnotread = [a for a in tnotup if a not in tnotupread]
    print('\t%d not updated attrs are not readable' % len(tnotupnotread))

    result.Lost = tnotupread
    result.LostEvents = tnotupevs

    losts = (tnotupevs if is_hpp else tnotupread)

    diffs = dict()
    for a in losts:
        try:
            v, vv = values.get(a, (None, ))[1], reads[a]
            if fn.isSequence(v): v = fn.toList(v)
            if fn.isSequence(vv): vv = fn.toList(vv)
            diffs[a] = v != vv
            if fn.isSequence(diffs[a]):
                diffs[a] = any(diffs[a])
            else:
                diffs[a] = bool(diffs[a])
        except:
            diffs[a] = None

    fams = fn.defaultdict(list)
    for a in tnotupread:
        fams['/'.join(a.split('/')[-4:-2])].append(a)
    for f in sorted(fams):
        print('\t%s: %d attrs not updated' % (f, len(fams[f])))

    print()

    differ = [a for a in losts if diffs[a]]  #is True]
    print('\t%d/%d not updated attrs have also wrong values!!!' %
          (len(differ), len(losts)))

    rd = pta.Reader()
    only = [a for a in tnotupread if len(rd.is_attribute_archived(a)) == 1]
    print('\t%d/%d not updated attrs are archived only in %s' %
          (len(only), len(losts), schema))
    result.LostDiff = differ
    print()

    archs = sorted(set(archived.values()))
    astor = fn.Astor()
    astor.load_from_devs_list(archs)
    badloads = fn.defaultdict(list)
    for k, s in astor.items():
        for d in s.get_device_list():
            d = fn.tango.get_normal_name(d)
            for a in losts:
                if fn.tango.get_normal_name(archived[a]) == d:
                    badloads[k].append(a)
    for k, s in badloads.items():
        if len(s):
            print('\t%s archives %d lost attributes' % (k, len(s)))

    print('\t%d updated attrs are not readable' % len(tupnoread))

    result.ArchivedAndReadable = tok
    result.Updated = tup
    result.NotUpdated = tnotup
    result.Unreadable = tnotupnotread
    #result.DeviceNotRunning = nodevs
    result.ArchiverNotRunning = noarchs

    result.LostFamilies = fams

    # Tnones is for readable attributes not being archived
    tnones = [
        a for a in archived
        if (a not in values or values[a] and values[a][1] in (None, []))
        and a not in tupnoread and a not in tnotupread
    ]
    tupnones = [a for a in tnones if a in tup]

    if tupnones:
        print('\t%d archived readable attrs record empty values' %
              len(tupnones))

    result.Nones = tnones

    if 0:

        get_ratio = lambda a, b: float(len(a)) / float(len(b))

        #result.ArchRatio = get_ratio([t for t in readarch if t not in tnotup],readarch)
        #result.ReadRatio = get_ratio(result.Readable,tattrs)
        #result.LostRatio = get_ratio([a for a in tread if a in tnotup],tread)
        #result.MissRatio = get_ratio([a for a in tread if a not in tarch],tread)
        #result.OkRatio = 1.0-result.LostRatio-result.MissRatio

        #result.Summary = '\n'.join((
        #('Checking archiving of %s attributes'%(len(attributes) if attributes else schema))
        #,('%d attributes in %s, %d are currently active'%(len(api),schema,len(tarch)))
        #,('%d devices with %d archived attributes are not running'%(len(nodevs),len([a for a in api if a.rsplit('/',1) in nodevs])))
        #,('%d archived attributes (%2.1f %%) are unreadable! (check and remove)'%(len(tnoread),1e2*get_ratio(tnoread,tarch)))
        #,('%d readable attributes are not archived'%(len(tmiss)))
        #,('%d attributes (readable or not) are updated (%2.1f %% of all readables)'%(len(tok),1e2*result.OkRatio))
        #,('-'*80)
        #,('%d archived attributes (readable or not) are not updated!'%len(tnotup))
        #,('%d archived and readable attributes are not updated! (check and restart?)'%len(treadnotup))
        #,('-'*80)
        #,('%d readable attributes have been removed in the last %d days!'%(len(removed),old_period/(24*3600)))
        #,('%d readable scalar attributes are not being archived (not needed anymore?)'%len(tmscalar))
        #,('%d readable array attributes are not being archived (Ok)'%len(tmarray))
        #,('%d readable array attributes are archived (Expensive)'%len(tarray))
        #,('')))

        #if trace: print(result.Summary)
        #print('%d readable lost,Ok = %2.1f%%, %2.1f %% over all Readables (%2.1f %% of total)'%\
        #(len(treadnotup),1e2*result.ArchRatio,1e2*result.OkRatio,1e2*result.ReadRatio))

    if action:
        if action == 'start_devices':
            print('Executing action %s' % action)
            api.start_devices()

        if action == 'restart_all':
            print('Executing action %s' % action)
            devs = api.get_archivers()
            astor = fn.Astor()
            print('Restarting %d devs:' % (len(devs), devs))
            astor.load_from_devs_list(devs)
            astor.stop_servers()
            fn.wait(10.)
            astor.start_servers()

        #print('NO ACTIONS ARE GONNA BE EXECUTED, AS THESE ARE ONLY RECOMMENDATIONS')
        #print("""
        #api = PyTangoArchiving.HDBpp(schema)
        #api.start_devices()

        #or

        #api = PyTangoArchiving.ArchivingAPI('%s')
        #lostdevs = sorted(set(api[a].archiver for a in result.NotUpdated))
        #print(lostdevs)
        #if lostdevs < a_reasonable_number:
        #astor = fn.Astor()
        #astor.load_from_devs_list(lostdevs)
        #astor.stop_servers()
        #fn.time.sleep(10.)
        #astor.start_servers()
        #"""%schema)

    print('\nfinished in %d seconds\n\n' % (fn.now() - ti))

    if export is not None:
        if export is True:
            export = 'txt'
        for x in (export.split(',') if isString(export) else export):
            if x in ('json', 'pck', 'pickle', 'txt'):
                x = '/tmp/%s.%s' % (schema, x)
            print('Saving %s file with keys:\n%s' % (x, result.keys()))
            if 'json' in x:
                fn.dict2json(result.dict(), x)
            else:
                f = open(x, 'w')
                if 'pck' in x or 'pickle' in x:
                    pickle.dump(result.dict(), f)
                else:
                    f.write(fn.dict2str(result.dict()))
                f.close()

    return result
Exemplo n.º 29
0
def check_archiving_performance(schema='hdb',attributes=[],period=24*3600*90,\
    exclude=['*/waveid','*/wavename','*/elotech-*'],action=False,trace=True):
    import PyTangoArchiving as pta
    import fandango as fn

    ti = fn.now()
    api = pta.api(schema)
    check = dict()
    period = 24*3600*period if period < 1000 else (24*period if period<3600 else period)
    attributes = fn.get_matching_attributes(attributes) if fn.isString(attributes) else map(str.lower,attributes)
    tattrs = [a for a in api if not attributes or a in attributes]
    excluded = [a for a in tattrs if any(fn.clmatch(e,a) for e in exclude)]
    tattrs = [a for a in tattrs if a not in excluded]

    #Getting Tango devices currently not running
    alldevs = set(t.rsplit('/',1)[0] for t in tattrs if api[t].archiver)
    tdevs = filter(fn.check_device,alldevs)
    nodevs = [d for d in alldevs if d not in tdevs]

    #Updating data from archiving config tables
    if not attributes:
      tattrs = sorted(a for a in api if a.rsplit('/',1)[0] in tdevs)
      tattrs = [a for a in tattrs if not any(fn.clmatch(e,a) for e in exclude)]
    print('%d attributes will not be checked (excluded or device not running)'%(len(api)-len(tattrs)))
    
    tarch = sorted(a for a in api if api[a].archiver)
    tnoread = sorted(t for t in tarch if t not in tattrs)
    check.update((t,None) for t in tnoread)

    #Getting attributes archived in the past and not currently active
    tmiss = [t for t in tattrs if not api[t].archiver]
    check.update((t,fn.check_attribute(t,readable=True)) for t in tmiss)
    tmiss = [t for t in tmiss if check[t]]
    tmarray = [t for t in tmiss if fn.isString(check[t].value) or fn.isSequence(check[t].value)]
    tmscalar = [t for t in tmiss if t not in tmarray]
    
    #Getting updated tables from database
    tups = pta.utils.get_table_updates(schema)
    # Some tables do not update MySQL index tables
    t0 = [a for a in tarch if a in tattrs and not tups[api[a].table]]
    check.update((t,check_attribute(a,readable=True)) for t in t0 if not check.get(t))
    t0 = [t for t in t0 if check[t]]
    print('%d/%d archived attributes have indexes not updated ...'%(len(t0),len(tarch)))
    if t0 and len(t0)<100: 
      vs = api.load_last_values(t0);
      tups.update((api[t].table,api[t].last_date) for t in t0)
    tnotup = [a for a in tarch if tups[api[a].table]<fn.now()-1800]
    check.update((t,1) for t in tarch if t not in tnotup)
    
    #Updating readable attributes (all updated are considered as readable)
    tread = sorted(t for t in tattrs if t not in tnoread)
    for t in tattrs:
      if t not in check:
        check[t] = fn.check_attribute(t,readable=True)
    tread = sorted(t for t in tattrs if check[t])
    tnoread.extend(t for t in tread if not check[t])
    tnoread = sorted(set(tnoread))
          
    #tread contains all readable attributes from devices with some attribute archived
    #tnoread contains all unreadable attributes from already archived

    #Calcullating all final stats
    #tok will be all archivable attributes that are archived
    #tnotup = [a for a in tnotup if check[a]]
    #tok = [t for t in tread if t in tarch and t not in tnotup]
    tok = [t for t in tarch if t not in tnotup]
    readarch = [a for a in tread if a in tarch]
    treadnotup = [t for t in readarch if t in tnotup] #tnotup contains only data from tarch
    tokread = [t for t in readarch if t not in tnotup] #Useless, all archived are considered readable
    tarray = [t for t in tarch if check[t] and get_attribute_pytype(t) in (str,list)]
    removed = [a for a in tattrs if not api[a].archiver and tups[api[a].table]>fn.now()-period]
    
    result = fn.Struct()
    result.Excluded = excluded
    result.Schema = schema
    result.All = api.keys()
    result.Archived = tarch
    result.Readable = tread
    result.ArchivedAndReadable = readarch
    result.Updated = tok #tokread
    result.Lost = treadnotup
    result.Removed = removed
    result.TableUpdates = tups
    result.NotUpdated = tnotup
    result.Missing = tmiss
    result.MissingScalars = tmscalar
    result.MissingArrays = tmarray
    result.ArchivedArray = tarray
    result.Unreadable = tnoread
    result.DeviceNotRunning = nodevs
    
    get_ratio = lambda a,b:float(len(a))/float(len(b))
    
    result.ArchRatio = get_ratio([t for t in readarch if t not in tnotup],readarch)
    result.ReadRatio = get_ratio(result.Readable,tattrs)
    result.LostRatio = get_ratio([a for a in tread if a in tnotup],tread)
    result.MissRatio = get_ratio([a for a in tread if a not in tarch],tread)
    result.OkRatio = 1.0-result.LostRatio-result.MissRatio
    
    result.Summary = '\n'.join((
      ('Checking archiving of %s attributes'%(len(attributes) if attributes else schema))
      ,('%d attributes in %s, %d are currently active'%(len(api),schema,len(tarch)))
      ,('%d devices with %d archived attributes are not running'%(len(nodevs),len([a for a in api if a.rsplit('/',1) in nodevs])))
      ,('%d archived attributes (%2.1f %%) are unreadable! (check and remove)'%(len(tnoread),1e2*get_ratio(tnoread,tarch)))
      ,('%d readable attributes are not archived'%(len(tmiss)))
      ,('%d attributes (readable or not) are updated (%2.1f %% of all readables)'%(len(tok),1e2*result.OkRatio))
      ,('-'*80)
      ,('%d archived attributes (readable or not) are not updated!'%len(tnotup))
      ,('%d archived and readable attributes are not updated! (check and restart?)'%len(treadnotup))
      ,('-'*80)
      ,('%d readable attributes have been removed in the last %d days!'%(len(removed),period/(24*3600)))
      ,('%d readable scalar attributes are not being archived (not needed anymore?)'%len(tmscalar))
      ,('%d readable array attributes are not being archived (Ok)'%len(tmarray))
      ,('%d readable array attributes are archived (Expensive)'%len(tarray))
      ,('')))
    
    if trace: print(result.Summary)
    print('%d readable lost,Ok = %2.1f%%, %2.1f %% over all Readables (%2.1f %% of total)'%\
        (len(treadnotup),1e2*result.ArchRatio,1e2*result.OkRatio,1e2*result.ReadRatio))

    if action:
        print('NO ACTIONS ARE GONNA BE EXECUTED, AS THESE ARE ONLY RECOMMENDATIONS')
        print("""
        api = PyTangoArchiving.ArchivingAPI('%s')
        lostdevs = sorted(set(api[a].archiver for a in result.NotUpdated))
        print(lostdevs)
        if lostdevs < a_reasonable_number:
          astor = fn.Astor()
          astor.load_from_devs_list(lostdevs)
          astor.stop_servers()
          fn.time.sleep(10.)
          astor.start_servers()
        """%schema)
        
    if trace: print('finished in %d seconds'%(fn.now()-ti))
        
    return result 
Exemplo n.º 30
0
def mysqldump_by_date(schema, user, passwd, folder, start, stop, options = '',
                      tables = None, compress = True, delete = True):
    """
    This method creates a backup between selected dates for each table 
    of the selected database.
    
    All dump files are exported to the same folder, and a compressed file
    is created at the end.
    
    Deleting of temporary files created (folder/*dmp) must be done manually.
    """
    print('mysqldump_by_date(%s,,,folder=%s,%s,%s,compress=%s,delete=%s)'
          % (schema, folder, start, stop, compress, delete))
    db = FriendlyDB(schema,user=user,passwd=passwd)
    t,e = start,stop
    print(t,e)
    start = start if fn.isString(start) else fn.time2str(start)
    stop = stop if fn.isString(stop) else fn.time2str(stop)
    tables = tables or db.getTables()

    print('mysqldump_by_date(%s): %d tables to backup between %s and %s' 
          % (schema,len(tables),start,stop))

    if not os.path.isdir(folder):
        print('mkdir %s' % folder)
        os.mkdir(folder)
        
    t0 = fn.now()
    filenames = []
    
    for t in sorted(tables):
        currops = options
        filename = ('%s/%s-%s-%s-%s.dmp' 
            % (folder,schema,t,start.split()[0],stop.split()[0]))
        cols = db.getTableCols(t)
        col = [c for c in ('int_time','time','data_time') if c in cols] 
        if col and col[0] == 'int_time':
            where = " %s >= %s and %s < %s " % (
                col[0],fn.str2time(start),col[0],fn.str2time(stop))
        elif col:
            where = " %s >= '%s' and %s < '%s' " % (col[0],start,col[0],stop)
        else:
            where = ""
        if t in CONFIG_TABLES:
            currops += " --add-drop-table "
        else:
            currops += ""
        mysqldump(schema,user,passwd,filename,t,where,options=currops)
        filenames.append(filename)
        
    t1 = fn.now()
        
    ext = ('part.' if fn.str2time(stop) > fn.now() else '') + 'tgz'
    dext = '.dmp'
    if compress:
        # doing it on separate files ...
        #for f in filenames:
            #cmd = 'tar zcvf %s.tgz %s' % (f,f)
            #print(cmd)
            #fn.linos.shell_command(cmd)
        #dext+='.tgz' 

        filename = ('%s/%s-%s-%s.%s' 
            % (folder,schema,start.split()[0],stop.split()[0],ext))
        cmd = 'tar zcvf %s %s/*%s' % (filename,folder,dext)
        print(cmd)
        fn.linos.shell_command(cmd)

    if compress and delete:
        cmd = 'rm -rf %s/*.dmp*' % folder
        print(cmd)
        fn.linos.shell_command(cmd)
        
    t2 = fn.now()
    print('Backup took %d seconds' % int(t1-t0))
    print('Compression took %d seconds' % int(t2-t1))
        
    return filename
Exemplo n.º 31
0
def check_db_schema(schema, attributes = None, values = None,
                    tref = -12*3600, n = 1, filters = '*', export = 'json',
                    restart = False, subscribe = False):
    """
    tref is the time that is considered updated (e.g. now()-86400)
    n is used to consider multiple values
    
    attrs: all attributes in db
    on: archived
    off: in db but not currently archived
    ok: updated   
    
    known error causes (attrs not lost but not updated):
    
    nok: attributes are not currently readable
    noevs: attributes not sending events
    novals: attributes never recorded a value
    stall: not updated, but current value matches archiving
    lost: not updated, and values doesn't match with current
    """
    
    t0 = fn.now()
    if hasattr(schema,'schema'):
        api,schema = schema,api.schema
    else:
        api = pta.api(schema)

    r = fn.Struct(api=api,schema=schema)    
    if isString(tref): 
        tref = fn.str2time(tref)
    r.tref = fn.now()+tref if tref < 0 else tref
    r.attrs = [a for a in (attributes or api.get_attributes())
                if fn.clmatch(filters,a)]
    print('check_db_schema(%s,attrs[%s],tref="%s",export as %s)' 
          % (schema,len(r.attrs),fn.time2str(r.tref),export))
    
    if restart and schema!='hdbpc':
        archs = [a for a in api.get_archivers() if not fn.check_device(a)]
        if archs:
            try:
                print('Restarting archivers: %s' % str(archs))
                astor = fn.Astor(archs)
                astor.stop_servers()
                astor.start_servers()
            except:
                traceback.print_exc()
        
        stopped = api.get_stopped_attributes()
        print('Restarting %d stopped attributes' % len(stopped))
        api.restart_attributes(stopped)
    
    r.on = [a for a in api.get_archived_attributes() if a in r.attrs]
    r.off = [a for a in r.attrs if a not in r.on]
    
    r.archs = fn.defaultdict(list)
    r.pers = fn.defaultdict(list)
    r.values = load_schema_values(api,r.on,values,n,tref=tref)
    
    if schema in ('tdb','hdb'):
        [r.archs[api[k].archiver].append(k) for k in r.on]
    else:
        r.rvals = r.values
        r.freq, r.values = {}, {}
        for k,v in r.rvals.items():
            try:
                if n > 1:
                    v = v[0] if isSequence(v) and len(v) else v
                    r.values[k] = v[0] if isSequence(v) and len(v) else v
                    r.freq[k] = v and float(len(v))/abs(v[0][0]-v[-1][0])
                else:
                    r.values[k] = v
            except Exception as e:
                print(k,v)
                print(fn.except2str())
                
        for k in api.get_archivers():
            r.archs[k] = api.get_archiver_attributes(k)
        for k in api.get_periodic_archivers():
            r.pers[k] = api.get_periodic_archivers_attributes(k)

    # Get all updated attributes
    r.ok = [a for a,v in r.values.items() if v and v[0] > r.tref]
    # Try to read not-updated attributes
    r.check = dict((a,fn.check_attribute(a)
                    ) for a in r.on if a not in r.ok)
    #r.novals = [a for a,v in r.values.items() if not v]
    r.nok, r.stall, r.noevs, r.lost, r.novals, r.evs, r.rem = [],[],[],[],[],{},[]
    # Method to compare numpy values
    
    for a,v in r.check.items():
        state = check_archived_attribute(a, v, default=CheckState.LOST, 
            cache=r, tref=r.tref, 
            check_events = subscribe and not api.is_periodic_archived(a))
        {
            #CheckState.ON : r.on,
            #CheckState.OFF : r.off,
            CheckState.OK : r.ok, #Shouldn't be any ok in check list               
            CheckState.NO_READ : r.nok,
            CheckState.STALL : r.stall,
            CheckState.NO_EVENTS : r.noevs,
            CheckState.LOST : r.lost,
            CheckState.UNK : r.novals,
         }[state].append(a)
                
    # SUMMARY
    r.summary = schema +'\n'
    r.summary += ','.join(
        """on: archived
        off: not archived
        ok: updated   
        nok: not readable
        noevs: no events
        novals: no values
        stall: not changing
        lost: not updated
        """.split('\n'))+'\n'
    
    getline = lambda k,v,l: '\t%s:\t:%d\t(%s)' % (k,len(v),l)
    
    r.summary += '\n\t%s:\t:%d\tok+stall: %2.1f %%' % (
        'attrs',len(r.attrs),
        (100.*(len(r.ok)+len(r.stall))/(len(r.on) or 1e12)))
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'on','off',len(r.on),len(r.off))
    #if r.off > 20: r.summary+=' !!!'
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'ok','nok',len(r.ok),len(r.nok))
    if len(r.nok) > 10: 
        r.summary+=' !!!'
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'noevs','novals',len(r.noevs),len(r.novals))
    if len(r.novals) > 1: 
        r.summary+=' !!!'
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'lost','stall',len(r.lost),len(r.stall))
    if len(r.lost) > 1: 
        r.summary+=' !!!'
    r.summary += '\n'
        
    r.archivers = dict.fromkeys(api.get_archivers())
    for d in sorted(r.archivers):
        r.archivers[d] = api.get_archiver_attributes(d)
        novals = [a for a in r.archivers[d] if a in r.novals]   
        lost = [a for a in r.archivers[d] if a in r.lost]
        if (len(novals)+len(lost)) > 2:
            r.summary += ('\n%s (all/novals/lost): %s/%s/%s' 
                % (d,len(r.archivers[d]),len(novals),len(lost)))
            
    if hasattr(api,'get_periodic_archivers'):
        r.periodics = dict.fromkeys(api.get_periodic_archivers())
        for d in sorted(r.periodics):
            r.periodics[d] = api.get_periodic_archiver_attributes(d)
            novals = [a for a in r.periodics[d] if a in r.novals]
            lost = [a for a in r.periodics[d] if a in r.lost]
            if len(novals)+len(lost) > 2:
                r.summary += ('\n%s (all/novals/lost): %s/%s/%s' % 
                    (d,len(r.periodics[d]),len(novals),len(lost)))
        
        r.perattrs = [a for a in r.on if a in api.get_periodic_attributes()]
        r.notper = [a for a in r.on if a not in r.perattrs]
        
        
    r.summary += '\nfinished in %d seconds\n\n'%(fn.now()-t0)
    print(r.summary)
    
    if restart:
        try:
            retries = r.lost+r.novals+r.nok
            print('restarting %d attributes' % len(retries))
            api.restart_attributes(retries)
        except:
            traceback.print_exc()
    
    if export is not None:
        if export is True:
            export = 'txt'
        for x in (export.split(',') if isString(export) else export):
            if x in ('json','pck','pickle','txt'):
                x = '/tmp/%s.%s' % (schema,x)
            print('Saving %s file with keys:\n%s' % (x,r.keys()))
            if 'json' in x:
                fn.dict2json(r.dict(),x)
            else:
                f = open(x,'w')
                if 'pck' in x or 'pickle' in x:
                    pickle.dump(r.dict(),f)
                else:
                    f.write(fn.dict2str(r.dict()))
                f.close()     
                
    for k,v in r.items():
        if fn.isSequence(v):
            r[k] = sorted(v)
                
    return r
Exemplo n.º 32
0
 shell_command('du -sh /var/lib/mysql/%s/* > /tmp/table_sizes.txt' % schema)
 lines = open('/tmp/table_sizes.txt').readlines()
 sizes = sorted(
     (float(l.split()[0].replace('G','e3').replace('M','')),
      l.split()[1].split('/')[-1].split('.')[0]) 
      for l in lines if clmatch('[.0-9]+(G|M)',l)
      )
 biggest = sizes[-1]
 
"""

import sys
import fandango as fn
import PyTangoArchiving as pta

tstart = fn.now()

if not sys.argv[4:]:
    print(__doc__)
    sys.exit(0)

opts = [a for a in sys.argv[1:] if a.startswith('-')]
flags = set(c for a in opts for c in a if not a.startswith('--'))
args = [a for a in sys.argv[1:] if not a.startswith('-')]
schema, table, tbegin, tend, mlimit = args[:5]
if '.' in table:
    table, aid = table.split('.')
else:
    aid = None
if table.count('_')>1 and aid is None:
    raise Exception('HDB++ tables require an ID!')
Exemplo n.º 33
0
import fandango as fn, fandango.callbacks as fc

attrs = map(str.lower, fn.get_matching_attributes('bl00*eps*plc*01/*'))

a = 'bl00/ct/eps-plc-01/State'

t0 = fn.now()
sources = dict(
    (d, fc.EventSource(d, asynchronous=True)) for d in sorted(attrs))

#sources.values()[10].read(synch=True)

cache = fn.CaselessDict()


def hook(src, t, value):
    cache[src.normal_name] = (value)


el = fc.EventListener('A Queue for All')
el.set_value_hook(hook)
print('Subscribing %d attributes' % len(attrs))
[s.addListener(el) for k, s in sorted(sources.items())]
print('Subscription took %f seconds' % (fn.now() - t0))

t0 = fn.now()
print('Waiting ...')
while len(cache) < len(attrs):
    if fn.now() > t0 + 18.:
        break
    fn.wait(1.)
Exemplo n.º 34
0
    def UpdateValues(self):
        # PROTECTED REGION ID(SchemaManager.UpdateValues) ENABLED START #
        try:

            t0 = t1 = fn.now()
            self.info_stream('UpdateValues()')
            
            if (self.ValuesFile or '').strip():
                self.info_stream('Load values from: %s ...' % self.ValuesFile)
                if self.ValuesFile.endswith('json'):
                    self.values = fn.json2dict(self.ValuesFile)
                else:
                    with open(self.ValuesFile) as f:
                        self.values = pickle.load(f)
                        
                self.values = dict((a,self.get_last_value(a,v)) 
                                   for a,v in self.values.items())
                t1 = max(v[0] for v in self.values.values() if v)
                t1 = min((t1,fn.now()))
                self.info_stream('reference time is %s' % fn.time2str(t1))

            elif self.Threaded:
                self.info_stream('Loading values from thread cache ...')
                self.values = dict((a,v) for a,v in self.threadDict.items()
                    if self.threadDict._updates.get(a,0))
            else:
                self.info_stream('Loading values from db ...')
                self.values = self.api.load_last_values(self.attr_on)

            self.info_stream('Updating %d values: %s' % (
                len(self.values),str(len(self.values) 
                                     and self.values.items()[0])))
            self.attr_ok = []
            self.attr_nok = []
            self.attr_lost = []
            self.attr_err = []
            for a,v in sorted(self.values.items()):
                try:
                    a = fn.tango.get_full_name(a)
                    if self.Threaded:
                        t1 = self.threadDict._updates.get(a,0)
                    self.check_attribute_ok(a,v,t=t1)
                except Exception as e:
                    self.attr_err.append(a)
                    traceback.print_exc()
                    m = str("%s: %s: %s" % (a, str(v), str(e)))
                    #self.error_stream(m)
                    print('*'*80)
                    print(fn.time2str()+' '+self.get_name()+'.ERROR!:'+m)
                    fn.wait(1e-6)
                    
            for a in ['AttributeValues','AttributeOkList','AttributeNokList',
                    'AttributeWrongList','AttributeLostList',
                    'AttributeNoevList','AttributeStalledList']:
                self.push_change_event(a,getattr(self,'read_%s'%a)())
                
            self.update_time = fn.now()
            self.state_machine()
            self.info_stream(self.get_status())
            self.info_stream('UpdateValues() took %f seconds' % (fn.now()-t0))
            
        except Exception as e:
            traceback.print_exc()
            self.error_stream(fn.except2str())
            raise e            
Exemplo n.º 35
0
    def get_attributes_errors(self,
                              regexp='*',
                              timeout=3 * 3600,
                              from_db=False,
                              extend=False):
        """
        Returns a dictionary {attribute, error/last value}
        
        If from_db=True and extend=True, it performs a full attribute check
        """
        if regexp == '*':
            self.status = fn.defaultdict(list)
        if from_db or extend:
            timeout = fn.now() - timeout
            attrs = self.get_attributes(True)
            attrs = fn.filtersmart(attrs, regexp)
            print('get_attributes_errors([%d/%d])' %
                  (len(attrs), len(self.attributes)))
            vals = self.load_last_values(attrs)
            for a, v in vals.items():
                if v and v[0] > timeout:
                    self.status['Updated'].append(a)
                    if v[1] is not None:
                        self.status['Readable'].append(a)
                    else:
                        rv = fn.read_attribute(a)
                        if rv is not None:
                            self.status['WrongNone'].append(a)
                        else:
                            self.status['None'].append(a)
                    vals.pop(a)

            if not extend:
                self.status['NotUpdated'] = vals.keys()
            else:
                for a, v in vals.items():
                    c = fn.check_attribute(a)
                    if c is None:
                        vals[a] = 'Unreadable'
                        self.status['Unreadable'].append(a)
                    elif isinstance(c, Exception):
                        vals[a] = str(c)
                        self.status['Exception'].append(a)
                    else:
                        ev = fn.tango.check_attribute_events(a)
                        if not ev:
                            vals[a] = 'NoEvents'
                            self.status['NoEvents'].append(a)
                        else:
                            d = self.get_attribute_archiver(a)
                            e = self.get_archiver_errors(d)
                            if a in e:
                                vals[a] = e[a]
                                self.status['ArchiverError'].append(a)
                            else:
                                rv = fn.read_attribute(a)
                                if v and str(rv) == str(v[1]):
                                    vals[a] = 'NotChanged'
                                    self.status['NotChanged'].append(a)
                                else:
                                    self.status['NotUpdated'].append(a)

            if regexp == '*':
                for k, v in self.status.items():
                    print('%s: %s' % (k, len(v)))

            return vals
        else:
            # Should inspect the Subscribers Error Lists
            vals = dict()
            for d in self.get_archivers():
                err = self.get_archiver_errors(d)
                for a, e in err.items():
                    if fn.clmatch(regexp, a):
                        vals[a] = e
            return vals
Exemplo n.º 36
0
 shell_command('du -sh /var/lib/mysql/%s/* > /tmp/table_sizes.txt' % schema)
 lines = open('/tmp/table_sizes.txt').readlines()
 sizes = sorted(
     (float(l.split()[0].replace('G','e3').replace('M','')),
      l.split()[1].split('/')[-1].split('.')[0]) 
      for l in lines if clmatch('[.0-9]+(G|M)',l)
      )
 biggest = sizes[-1]
 
"""

import sys
import fandango as fn
import PyTangoArchiving as pta

tstart = fn.now()

if not sys.argv[4:]:
    print(__doc__)
    sys.exit(0)

opts = [a for a in sys.argv[1:] if a.startswith('-')]
flags = set(c for a in opts for c in a if not a.startswith('--'))
args = [a for a in sys.argv[1:] if not a.startswith('-')]
schema, table, tbegin, tend, mlimit = args[:5]
if '.' in table:
    table, aid = table.split('.')
else:
    aid = None
if table.count('_') > 1 and aid is None:
    raise Exception('HDB++ tables require an ID!')
Exemplo n.º 37
0
def check_db_schema(schema, tref=None):

    r = fn.Struct()
    r.api = api = pta.api(schema)
    r.tref = fn.notNone(tref, fn.now() - 3600)

    r.attrs = api.keys()
    r.on = api.get_archived_attributes()
    r.off = [a for a in r.attrs if a not in r.on]
    if schema in ('tdb', 'hdb'):
        ups = api.db.get_table_updates()
        r.vals = dict((k, (ups[api[k].table], None)) for k in r.on)
    else:
        r.vals = dict(fn.kmap(api.load_last_values, r.on))
        r.vals = dict((k, v and v.values()[0]) for k, v in r.vals.items())

    dups = fn.defaultdict(list)
    if getattr(api, 'dedicated', None):
        [
            dups[a].append(k) for a in r.on for k, v in api.dedicated.items()
            if a in v
        ]
        nups = [a for a, v in dups.items() if len(v) <= 1]
        [dups.pop(a) for a in nups]
    r.dups = dict(dups)

    # Get all updated attributes
    r.ok = [a for a, v in r.vals.items() if v and v[0] > r.tref]
    # Try to read not-updated attributes
    r.check = dict((a, fn.check_attribute(a)) for a in r.on if a not in r.ok)
    r.nok, r.stall, r.noev, r.lost, r.evs = [], [], [], [], {}
    # Method to compare numpy values
    fbool = lambda x: all(x) if fn.isSequence(x) else bool(x)

    for a, v in r.check.items():
        # Get current value/timestamp
        vv, t = getattr(v, 'value', v), getattr(v, 'time', 0)
        t = t and fn.ctime2time(t)

        if isinstance(vv, (type(None), Exception)):
            # attribute is not readable
            r.nok.append(a)
        elif r.vals[a] and 0 < t <= r.vals[a][0]:
            # attribute timestamp doesnt change
            r.stall.append(a)
        elif r.vals[a] and fbool(vv == r.vals[a][1]):
            # attribute value doesnt change
            r.stall.append(a)
        else:
            r.evs[a] = fn.tango.check_attribute_events(a)
            if not r.evs[a]:
                # attribute doesnt send events
                r.noev.append(a)
            else:
                # archiving failure (events or polling)
                r.lost.append(a)

    # SUMMARY
    print(schema)
    for k in 'attrs on off dups ok nok noev stall lost'.split():
        print('\t%s:\t:%d' % (k, len(r.get(k))))

    return r
Exemplo n.º 38
0
    def UpdateValues(self):
        # PROTECTED REGION ID(SchemaManager.UpdateValues) ENABLED START #
        try:

            t0 = t1 = fn.now()
            self.info_stream('UpdateValues()')

            if (self.ValuesFile or '').strip():
                self.info_stream('Load values from: %s ...' % self.ValuesFile)
                if self.ValuesFile.endswith('json'):
                    self.values = fn.json2dict(self.ValuesFile)
                else:
                    with open(self.ValuesFile) as f:
                        self.values = pickle.load(f)

                self.values = dict((a, self.get_last_value(a, v))
                                   for a, v in self.values.items())
                t1 = max(v[0] for v in self.values.values() if v)
                t1 = min((t1, fn.now()))
                self.info_stream('reference time is %s' % fn.time2str(t1))

            elif self.Threaded:
                self.info_stream('Loading values from thread cache ...')
                self.values = dict((a, v) for a, v in self.threadDict.items()
                                   if self.threadDict._updates.get(a, 0))
            else:
                self.info_stream('Loading values from db ...')
                self.values = self.api.load_last_values(self.attr_on)

            self.info_stream(
                'Updating %d values: %s' %
                (len(self.values),
                 str(len(self.values) and self.values.items()[0])))
            self.attr_ok = []
            self.attr_nok = []
            self.attr_lost = []
            self.attr_err = []
            for a, v in sorted(self.values.items()):
                try:
                    a = fn.tango.get_full_name(a)
                    if self.Threaded:
                        t1 = self.threadDict._updates.get(a, 0)
                    self.check_attribute_ok(a, v, t=t1)
                except Exception as e:
                    self.attr_err.append(a)
                    traceback.print_exc()
                    m = str("%s: %s: %s" % (a, str(v), str(e)))
                    #self.error_stream(m)
                    print('*' * 80)
                    print(fn.time2str() + ' ' + self.get_name() + '.ERROR!:' +
                          m)
                    fn.wait(1e-6)

            for a in [
                    'AttributeValues', 'AttributeOkList', 'AttributeNokList',
                    'AttributeWrongList', 'AttributeLostList',
                    'AttributeNoevList', 'AttributeStalledList'
            ]:
                self.push_change_event(a, getattr(self, 'read_%s' % a)())

            self.update_time = fn.now()
            self.state_machine()
            self.info_stream(self.get_status())
            self.info_stream('UpdateValues() took %f seconds' %
                             (fn.now() - t0))

        except Exception as e:
            traceback.print_exc()
            self.error_stream(fn.except2str())
            raise e