Ejemplo n.º 1
0
    def addXYModels(self, attrs, t0=None, t1=None):
        """
        Convert model, dates to 
        'tgarch://alba03.cells.es:10000/sr/id/scw01/pressure?db=*;t0=2019-11-11T11:41:59;t1=2020-01-03T09:03:03;ts',
        """
        c = self.cursor()
        self.setCursor(Qt.Qt.WaitCursor)
        attrs = fn.toList(attrs)

        if not t0 and not t1 and not self.t0 and not self.t1:
            t0, t1 = self.tc.getTimes()

        if t0 and t1:
            t0 = t0 if fn.isNumber(t0) else fn.str2time(t0, relative=True)
            t1 = t1 if fn.isNumber(t1) else fn.str2time(t1, relative=True)
            self.t0, self.t1 = fn.time2str(t0, iso=1), fn.time2str(t1, iso=1)
            self.t0 = self.t0.replace(' ', 'T')
            self.t1 = self.t1.replace(' ', 'T')

        ms = []
        for attr in attrs:
            attr = fn.tango.get_full_name(attr, fqdn=True)
            attr = attr.replace('tango://', '')
            q = 'tgarch://%s?db=*;t0=%s;t1=%s' % (attr, self.t0, self.t1)
            m = (q + ';ts', q)
            ms.append(m)

        self.plot.onAddXYModel(ms)
        self.setCursor(c)
Ejemplo n.º 2
0
    def log(self, severity, msg, remote=None):
        """
    The remote flag should allow operations done outside of QMainThread to be logged
    """
        remote = remote if remote is not None else self.remote
        if remote:
            self.emit(Qt.SIGNAL('logging'), severity, msg, False)
            return
        if msg == self.last_msg:
            msg = '+1'
        else:
            self.last_msg = msg
            if self.logger:
                try:
                    if severity not in self.log_objs:                        self.log_objs[severity] = \
getattr(self.logger,severity,(lambda m,s=severity:'%s:%s: %s'%(s.upper(),F.time2str(),m)))
                    self.log_objs[severity](msg)
                except:
                    pass
        if self.dialog():
            if msg != '+1':
                msg = '%s:%s: %s' % (severity.upper(), F.time2str(), msg)
            if self.filters:
                msg = (F.filtersmart(msg, self.filters) or [''])[0]
            if msg:
                self.dialog().append(msg)
Ejemplo n.º 3
0
def do_repair(user,passwd,condition="engine is null",database="information_schema",force=False,days=0,db_host='localhost') :
    sql = "select CONCAT(table_schema, '.', table_name) from tables where %s" % condition
    db_con = MySQLdb.connect(db_host, port=3306, user=user,passwd=passwd,db=database)
    cursor = db_con.cursor()
    cursor.execute(sql)
    rset = cursor.fetchall()
    print '%d tables match condition'%len(rset)
    now = time.time()
    days = days or 60
    tlimit = fandango.time2str(now-days*24*3600);
    now = fandango.time2str(now);
    for item in rset :
        try:
            if fandango.isSequence(item): 
                item = item[0]
            if force: 
                raise Exception,'force=True, all tables to be checked'
            elif 'att_' in item: 
                q = "select count(*) from %s where time between '%s' and '%s' order by time"%(item,tlimit,now)
                cursor.execute(q)
                count = cursor.fetchone()[0]
                q = "select * from %s where time between '%s' and '%s' order by time"%(item,tlimit,now)
                print q
                cursor.execute(q)  # desc limit 1'%item);
                l = len(cursor.fetchall())
                if abs(count-l)>5: 
                  raise Exception('%d!=%d'%(count,l))
            else: 
              raise Exception,'%s is a config table'%item
        except Exception,e:
            print e
            print 'Repairing %s ...' % item
            cursor.execute('repair table %s' % item)
            print '[OK]\n'
        time.sleep(.001)
    def __init__(self,
                 parent=None,
                 domains=None,
                 regexp='*pnv-*',
                 USE_SCROLL=True,
                 USE_TREND=False):
        print('%s: ArchivingBrowser()' % fn.time2str())
        Qt.QWidget.__init__(self, parent)
        self.setupUi(USE_SCROLL=USE_SCROLL,
                     USE_TREND=USE_TREND,
                     SHOW_OPTIONS=False)
        self.load_all_devices()
        try:
            import PyTangoArchiving
            self.reader = PyTangoArchiving.Reader('*')
            self.archattrs = sorted(set(self.reader.get_attributes()))
            self.archdevs = list(
                set(a.rsplit('/', 1)[0] for a in self.archattrs))
        except:
            traceback.print_exc()

        self.extras = []
        #self.domains = domains if domains else ['MAX','ANY','LI/LT','BO/BT']+['SR%02d'%i for i in range(1,17)]+['FE%02d'%i for i in (1,2,4,9,11,13,22,24,29,34)]
        #self.combo.addItems((['Choose...']+self.domains) if len(self.domains)>1 else self.domains)
        self.connectSignals()
        print('%s: ArchivingBrowser(): done' % fn.time2str())
Ejemplo n.º 5
0
def check_table_data(db, att_id, table, start, stop, gap, period):
    """
    db must be a fandango.FriendlyDB object
    
    this method will check different intervals within the table to 
    see whether there is available data or not for the attribute
    
    start/stop must be epoch times
    """
    cols = db.getTableCols(table)
    if 'att_conf_id' in cols:
        query = ("select count(*) from %s where att_conf_id = %s and "
                 "data_time between " % (table, att_id))
    else:
        query = ("select count(*) from %s where " "time between " % (table))

    tend = start + period
    while tend < stop:
        tq = '"%s" and "%s"' % (fn.time2str(start), fn.time2str(tend))
        try:
            r = db.Query(query + ' ' + tq)
            print('%s:%s' % (tq, r[0][0]))
        except:
            traceback.print_exc()
            break
            print('%s: failed' % tq)
        start, tend = start + gap, tend + gap

    return
Ejemplo n.º 6
0
def get_attributes_row_counts(db,attrs='*',start=-86400,stop=-1,limit=0):
    """
    DUPLICATED BY HDBPP.get_attribute_rows !!!
    
    It will return matching $attrs that recorded more than $limit values in 
    the $start-$stop period::
    
      countsrf = get_attributes_row_counts('hdbrf',start=-3*86400,limit=20000)
      
    """
    db = pta.api(db) if fn.isString(db) else db
    start = start if fn.isString(start) else fn.time2str(start) 
    stop = stop if fn.isString(stop) else fn.time2str(stop)
    
    if fn.isString(attrs):
        attrs = [a for a in db.get_attributes() if fn.clmatch(attrs,a)]
        
    r = {}
    for a in attrs:
        i,t,b = db.get_attr_id_type_table(a)
        l = db.Query("select count(*) from %s where att_conf_id = %d"
            " and data_time between '%s' and '%s'"  % (b,i,start,stop))
        c = l[0][0] if len(l) else 0
        if c >= limit:
            r[a] = c
    return r
Ejemplo n.º 7
0
def get_attributes_row_counts(db, attrs='*', start=-86400, stop=-1, limit=0):
    """
    DUPLICATED BY HDBPP.get_attribute_rows !!!
    
    It will return matching $attrs that recorded more than $limit values in 
    the $start-$stop period::
    
      countsrf = get_attributes_row_counts('hdbrf',start=-3*86400,limit=20000)
      
    """
    db = pta.api(db) if fn.isString(db) else db
    start = start if fn.isString(start) else fn.time2str(start)
    stop = stop if fn.isString(stop) else fn.time2str(stop)

    if fn.isString(attrs):
        attrs = [a for a in db.get_attributes() if fn.clmatch(attrs, a)]

    r = {}
    for a in attrs:
        i, t, b = db.get_attr_id_type_table(a)
        l = db.Query("select count(*) from %s where att_conf_id = %d"
                     " and data_time between '%s' and '%s'" %
                     (b, i, start, stop))
        c = l[0][0] if len(l) else 0
        if c >= limit:
            r[a] = c
    return r
Ejemplo n.º 8
0
def decimate_db_table_by_time(db,table,att_id,tstart,tend,period=1,
        id_column="att_conf_id",time_column='data_time',min_to_delete=3,
        optimize = False):
    """
    This simplified method will remove all values in a table that are nearer than a given period
    It doesnt analyze values, it just gets the last value within the interval
    
    It is the most suitable for hdb++ and arrays
    
    Partition optimization and repair should be called afterwards
    
    https://dev.mysql.com/doc/refman/5.6/en/partitioning-maintenance.html
    
    ALTER TABLE t1 REBUILD PARTITION p0, p1;
    ALTER TABLE t1 OPTIMIZE PARTITION p0, p1;
    ALTER TABLE t1 REPAIR PARTITION p0,p1;
    """
    t0 = fn.now()
    s0 = db.getTableSize(table)
    if fn.isNumber(tstart):
        tstart,tend = fn.time2str(tstart),fn.time2str(tend)
    q = "select distinct CAST(UNIX_TIMESTAMP(%s) AS DOUBLE) from %s where %s = %s and %s between '%s' and '%s'" % (
        time_column, table, id_column, att_id, time_column, tstart, tend)
    partitions = get_partitions_from_query(db,q)
    print('Query: '+q)
    print('table size is %s, partitions affected: %s' % (s0, partitions))
    vals = db.Query(q)
    t1 = fn.now()
    print('query took %d seconds, %d rows returned' % ((t1-t0), len(vals)))
    if not vals: 
        return
    goods,p = [vals[0][0]],vals[0][0]
    for i,v in enumerate(vals):
        v = v[0]
        if v > period+goods[-1] and p!=goods[-1]:
            goods.append(p)
        p = v
        
    print(fn.now()-t1)
    print('%d rows to delete, %d to preserve' % (len(vals)-len(goods), len(goods))) 
    for i in range(len(goods)-1):
        s,e = goods[i],goods[i+1]
        s,e = fn.time2str(s,us=True),fn.time2str(e,us=True)
        dq = "delete from %s where %s = %s and %s > '%s' and %s < '%s'" % (
            table, id_column, att_id, time_column, s, time_column, e)
        if not i%1000: print(dq)
        db.Query(dq)
        
    t2 = fn.now()
    s1 = db.getTableSize(table)
    print('deleting %d rows took %d seconds' % (s0-s1, t2-t1))
    if optimize:# or (goods[-1] - goods[0]) > 86400*5:
        rq = 'alter table %s optimize partition %s' % (table,partitions)
        print(rq)
        db.Query(rq)
        print('Optimizing took %d seconds' % (fn.now()-t2))
        
    return s1-s0
Ejemplo n.º 9
0
 def update_tasks(self):
   while not self.event.isSet():
     self.last_check = time.time()
     self.info('-'*70)
     self.info( 'In WorkerDS::updateTasks ...')
     self._state = (PyTango.DevState.RUNNING)
     for task,commands in self.tasks.items():
       if task not in self.conditions:
         self.warning('\t%s not scheduled!'%task)
         continue
       if not commands[-1].startswith(task) and ' = ' not in commands[-1]:
         commands[-1] = commands[-1].replace('return ','')
         commands[-1] = task+'_result = '+commands[-1]
       try:
         self.worker.get(commands[-1])
         self.dones[task] = time.time()
       except:
         pass
       try:
         self.update_locals(task=task)
         self.info( 'In WorkerDS::updateTasks(%s): %s'%(task,commands))
         if functional.evalX(self.conditions[task],_locals=self.locals()):
           if not self.dones[task]>=self.sends[task]:
             self.warning('In WorkerDS::updateTasks(%s): still running since %s!!!'%(task,fandango.time2str(self.sends[task])))
           else:
             self.info( 'In WorkerDS::updateTasks(%s)'%task)
             map(self.worker.put,commands)
             self.sends[task] = time.time()
       except:
         self.error(traceback.format_exc())
       wait(.1,self.event)
   
     self.info(' ---- Waiting %s seconds ---- '%self.PollingSeconds)
     self._state = PyTango.DevState.ON
     while not self.waiter.isSet() and time.time()<(self.last_check+int(self.PollingSeconds)):
       status = ['Worker DS waiting %s s for next cycle ..'%self.PollingSeconds]
       status.append('Last check was at %s'%fandango.time2str(self.last_check))
       status.append('')
       for task,commands in sorted(self.tasks.items()):
         if not commands[-1].startswith(task) and ' = ' not in commands[-1]:
           commands[-1] = commands[-1].replace('return ','')
           commands[-1] = task+'_result = '+commands[-1]            
         try:
           self.worker.get(commands[-1])
           self.dones[task] = time.time()
         except:
           pass
         if self.dones[task]>self.sends[task]:
           status.append('%s: Finished at %s'%(task,fandango.time2str(self.dones[task])))
         elif self.sends[task]>self.dones[task]:
           status.append('%s: Launched at %s'%(task,fandango.time2str(self.sends[task])))
       self._status = ('\n'.join(status))
       wait(1.,self.waiter)
     self.waiter.clear()
       
   print '#'*80
   print '#'*80
Ejemplo n.º 10
0
 def get_mysqlsecsdiff(self, date=None):
     """
     Returns the value to be added to dates when querying int_time tables
     """
     if date is None:
         date = fn.time2str()
     if isinstance(date, (int, float)):
         date = fn.time2str(date)
     return self.Query(
         "select (TO_SECONDS('%s')-62167222800) - UNIX_TIMESTAMP('%s')" %
         (date, date))[0][0]
Ejemplo n.º 11
0
def mysqldump_by_date(schema, user, passwd, folder, start, stop,
                      compress = True, delete = True):
    """
    This method creates a backup between selected dates for each table 
    of the selected database.
    
    All dump files are exported to the same folder, and a compressed file
    is created at the end.
    
    Deleting of temporary files created (folder/*dmp) must be done manually.
    """
    print('mysqldump_by_date(%s,,,folder=%s,%s,%s,compress=%s,delete=%s)'
          % (schema, folder, start, stop, compress, delete))
    db = FriendlyDB(schema,user=user,passwd=passwd)
    t,e = start,stop
    print(t,e)
    start = start if fn.isString(start) else fn.time2str(start)
    stop = stop if fn.isString(stop) else fn.time2str(stop)
    tables = db.getTables()

    print('mysqldump_by_date(%s): %d tables to backup between %s and %s' 
          % (schema,len(tables),start,stop))

    if not os.path.isdir(folder):
        print('mkdir %s' % folder)
        os.mkdir(folder)
        
    for t in tables:
        filename = ('%s/%s-%s-%s-%s.dmp' 
            % (folder,schema,t,start.split()[0],stop.split()[0]))
        cols = db.getTableCols(t)
        col = [c for c in ('time','data_time') if c in cols] 
        if col:
            where = " %s >= '%s' and %s < '%s' " % (col[0],start,col[0],stop)
        else:
            where = ""
        mysqldump(schema,user,passwd,filename,t,where)
        
    ext = ('part.' if fn.str2time(stop) > fn.now() else '') + 'tgz'
    if compress:
        filename = ('%s/%s-%s-%s.%s' 
            % (folder,schema,start.split()[0],stop.split()[0],ext))
        cmd = 'tar zcvf %s %s/*.dmp' % (filename,folder)
        print(cmd)
        fn.linos.shell_command(cmd)
    if compress and delete:
        cmd = 'rm -rf %s/*.dmp' % folder
        print(cmd)
        fn.linos.shell_command(cmd)
    return filename
    def always_executed_hook(self):
        self.debug_stream("In always_excuted_hook()")
        #----- PROTECTED REGION ID(PanicViewDS.always_executed_hook) ENABLED START -----#
        now = fd.now()
        self.Update(force=False)
        n = len(self.attr_AlarmList_read)

        if not self.view.last_event_time:
            self.set_state(PyTango.DevState.INIT)
        elif now - self.view.last_event_time > 60.:
            self.set_state(PyTango.DevState.UNKNOWN)
        elif len(self.attr_DisabledAlarms_read) == n:
            self.set_state(PyTango.DevState.DISABLED)
        elif len(self.attr_FailedAlarms_read) == n:
            self.set_state(PyTango.DevState.FAULT)
        elif any((self.attr_ActiveAlarms_read, self.attr_FailedAlarms_read)):
            self.set_state(PyTango.DevState.ALARM)
        else:
            self.set_state(PyTango.DevState.ON)

        status = 'AlarmView(%s): %s alarms' % (self.Scope, n)
        status += '\nupdated at %s' % fd.time2str(self.view.last_event_time)
        status = '\nDescription: %s' % '\n'.join(self.Description)
        status += '\n\nActive Alarms:\n%s' % ('\n'.join(
            self.attr_ActiveAlarms_read))

        self.set_status(status)
Ejemplo n.º 13
0
 def state_machine(self):
     try:
         s = self.get_state()
         if len(self.attributes):
             if len(self.arch_off):
                 ns = PyTango.DevState.FAULT
             elif len(self.attr_ok)<=0.75*len(self.attr_on):
                 ns = PyTango.DevState.ALARM
             elif not self.threadDict.cycle_count:
                 ns = PyTango.DevState.MOVING
             else:
                 ns = PyTango.DevState.ON
         else:
             ns = PyTango.DevState.INIT
         if ns != s:
             self.set_state(ns)
             self.push_change_event('State')
             self.info_stream('%s => %s'%(s,ns))
             
         s = '%s status is %s, updated at %s' % (
             self.schema, ns, fn.time2str(self.update_time))
         for a in ('ArchiverOnList','ArchiverOffList','AttributeList',
                 'AttributeOnList','AttributeOffList','AttributeOkList',
                 'AttributeNokList','AttributeLostList','AttributeWrongList',
                 'AttributeNoevList','AttributeStalledList'):
             try:
                 v = str(len(getattr(self,'read_%s'%a)()))
             except Exception,e:
                 v = str(e)
             s+='\n%s = %s' % (a,v)
         self.set_status(s)        
         self.push_change_event('Status')
Ejemplo n.º 14
0
    def trigger_callbacks(self, regs=None):
        """
        regs = list of addresses that changed
        """
        if not self.callbacks:
            return
        for key, cb in self.callbacks.items():
            try:
                rs, push, org = regs, False, cb
                if fn.isSequence(cb):
                    rs = regs and [r for r in regs if r in cb]
                    if not regs or rs or (len(cb) == 1
                                          and fn.isCallable(cb[0])):
                        cb = cb[-1]
                    else:
                        continue
                msg = ('%s: %s.trigger_callbacks(%s,%s): %s:%s' %
                       (fn.time2str(), self.name, fn.shortstr(
                           regs, 40), rs, key, org))
                if self.plc_obj is not None:
                    self.plc_obj.debug(msg)
                else:
                    print(msg)
                if fn.isCallable(cb):
                    cb(key)  #,push=push)
                else:
                    cb = getattr(cb, 'push_event',
                                 getattr(cb, 'event_received', None))
                    cb and cb(key)

                fn.wait(1.e-4)

            except Exception as e:
                print(fn.except2str())
                print('%s.callback(%s,%s): %s' % (self, key, cb, e))
def main():
  print(__doc__)
  
  args = map(str.lower,sys.argv[1:])
  schemas = pta.Schemas.load()
  schemas = [a for a in args if a in schemas]
  
  options = dict((k.strip('-'),v) for k,v in 
                 ((a.split('=',1) if '=' in a else (a,""))
                    for a in args if a not in schemas))
                    #for a in args if a.startswith('--')))
   
  import platform
  host = platform.node()

  folder = options.get('folder',LOG_FOLDER)
  configs = options.get('config',None)
  restart = str(options.get('restart',False)).lower() not in ('false','no')
  email = options.get('email',False)
  if configs is not None: 
      configs = configs or get_csv_folder()
  
  for schema in schemas:
    date = fun.time2str(time.time(),'%Y%m%d_%H%M%S')
    if 'bl' not in host and configs:
      try:
        done = check_config_files(schema,restart=False,
                save='%s/missing_%s_%s_%s.pck'%(folder,host,schema,''),
                    email=email,csvfolder=configs)
      except:
        traceback.print_exc()
        
    done = check_schema_information(schema,restart=restart,email=email,
      save='%s/lost_%s_%s_%s.pck'%(folder,host,schema,''))
Ejemplo n.º 16
0
def import_into_db(db,table,data,delete=False,offset=0):
    """
    db = a FriendlyDB instance
    table = table name
    data = [(time,value)] array
    offset = offset to apply to time values
    delete = boolean, if True the data between t0 and t-1 will be deleted from db before inserting.
    """
    #raise '@TODO:TEST THIS IN ARCHIVING02 BEFORE COMMIT'
    from fandango import time2str,date2str,date2time
    print 'import_into_db(%s,%s,[%s],%s,%s)'%(db,table,len(data),delete,offset)
    if delete: 
        limits = data[0][0],data[-1][0]
        t = db.Query("select count(*) from %s where time between '%s' and '%s'"%(table,time2str(limits[0]),time2str(limits[1])))[0]
        print('deleting %s values from %s'%(t,table))
        db.Query("delete from %s where time between '%s' and '%s'"%(table,time2str(limits[0]),time2str(limits[1])))
    if not db.Query('SHOW INDEX from %s'%table):
        try: db.Query('create index time on  %s (time)'%table)
        except: pass
    print('inserting %d values into %s ...'%(len(data),table))
    #for i,d in enumerate(data):
        #t = (fandango.time2str(d[0]+offset),d[1])
        #q = "INSERT INTO %s VALUES('%s',%s)"%(table,t[0],t[1])
        #db.Query(q)
    l,total = [],0
    for i,d in enumerate(data):
        l.append(d)
        if not (len(data)-(i+1))%100:
            q = "INSERT INTO `%s` VALUES %s ;"%(table,', '.join("('%s',%s)"%(fun.time2str(d[0]+offset),d[1] if 'none' not in str(d[1]).lower() else 'NULL') for d in l))
            #print q[:160]
            db.Query(q)
            total += len(l)
            print i,len(l),total
            l = []
    return total,len(data)
Ejemplo n.º 17
0
 def log(self,severity,msg,remote=None):
   """
   The remote flag should allow operations done outside of QMainThread to be logged
   """
   remote = remote if remote is not None else self.remote
   if remote:
     self.emit(Qt.SIGNAL('logging'),severity,msg,False)
     return
   if msg == self.last_msg: 
       msg = '+1'
   else: 
       self.last_msg = msg
       if self.logger:
           try:
               if severity not in self.log_objs: self.log_objs[severity] = \
                   getattr(self.logger,severity,(lambda m,s=severity:'%s:%s: %s'%(s.upper(),F.time2str(),m)))
               self.log_objs[severity](msg)
           except: pass
   if self.dialog():
       if msg!='+1': 
           msg = '%s:%s: %s'%(severity.upper(),F.time2str(),msg)
       if self.filters:
           msg = (F.filtersmart(msg,self.filters) or [''])[0]
       if msg:
           self.dialog().append(msg)
def main():
    print 'Usage: archiver_health_check.py hdb tdb restart [email protected]'
    import platform
    host = platform.node()
    folder = '/intranet01mounts/controls/intranet/archiving'
    args = map(str.lower, sys.argv[1:])  # or ('hdb','tdb'))
    restart = any('restart' in s and 'false' not in s.lower() for s in args)
    email = ([s.split('=')[-1] for s in args if 'email=' in s] or [''])[0]
    schemas = [s for s in args if 'restart' not in s and '=' not in s]
    for schema in schemas:
        date = fun.time2str(time.time(), '%Y%m%d_%H%M%S')
        if 'bl' not in host:
            try:
                done = check_config_files(schema,
                                          restart=False,
                                          save='%s/missing_%s_%s_%s.pck' %
                                          (folder, host, schema, ''),
                                          email=email)
            except:
                traceback.print_exc()
        done = check_schema_information(schema,
                                        restart=restart,
                                        email=email,
                                        save='%s/lost_%s_%s_%s.pck' %
                                        (folder, host, schema, ''))
Ejemplo n.º 19
0
    def state_machine(self):
        try:
            s = self.get_state()
            if len(self.attributes):
                if len(self.arch_off):
                    ns = PyTango.DevState.FAULT
                elif len(self.attr_ok) <= 0.75 * len(self.attr_on):
                    ns = PyTango.DevState.ALARM
                elif not self.threadDict.cycle_count:
                    ns = PyTango.DevState.MOVING
                else:
                    ns = PyTango.DevState.ON
            else:
                ns = PyTango.DevState.INIT
            if ns != s:
                self.set_state(ns)
                self.push_change_event('State')
                self.info_stream('%s => %s' % (s, ns))

            s = '%s status is %s, updated at %s' % (
                self.schema, ns, fn.time2str(self.update_time))
            for a in ('ArchiverOnList', 'ArchiverOffList', 'AttributeList',
                      'AttributeOnList', 'AttributeOffList', 'AttributeOkList',
                      'AttributeNokList', 'AttributeLostList',
                      'AttributeWrongList', 'AttributeNoevList',
                      'AttributeStalledList'):
                try:
                    v = str(len(getattr(self, 'read_%s' % a)()))
                except Exception, e:
                    v = str(e)
                s += '\n%s = %s' % (a, v)
            self.set_status(s)
            self.push_change_event('Status')
Ejemplo n.º 20
0
    def get_table_timestamp(self,
                            table,
                            method='max',
                            epoch=None,
                            ignore_errors=False):  #, tref = -180*86400):
        """
        method should be min() for first value and max() for last
        this query goes directly to table indexes
        this doesn't access values (but it is much faster)
        
        if table is an attribute name, only these attribute is checked
        
        ignore_errors=True, it will ignore dates out of 1970-NOW interval
        
        epoch=timestamp, gets last timestamp before epoch
        
        Returns a tuple containing:
            (the first/last value stored, in epoch and date format, 
                size of table, time needed)
        """
        t0, last, size = fn.now(), 0, 0
        #print('get_last_value_in_table(%s, %s)' % (self.self_name, table))

        if table in self.get_data_tables():
            ids = self.get_attributes_by_table(table, as_id=True)
        else:
            aid, atype, table = self.get_attr_id_type_table(table)
            ids = [aid]

        int_time = any('int_time' in v
                       for v in self.getTableIndex(table).values())
        # If using UNIX_TIMESTAMP THE INDEXING FAILS!!
        field = 'int_time' if int_time else 'data_time'
        q = 'select %s(%s) from %s ' % (method, field, table)
        size = self.getTableSize(table)
        r = []
        part = None  #self.get_last_partition(table)
        if part is not None and method == 'max':
            q += 'partition (%s)' % part

        for i in ids:
            qi = q + ' where att_conf_id=%d' % i
            #if tref and int_time: where += ('int_time <= %d'% (tref))
            r.extend(self.Query(qi))

        method = {'max': max, 'min': min}[method]
        r = [
            self.mysqlsecs2time(l[0]) if int_time else fn.date2time(l[0])
            for l in r if l[0] not in (None, 0)
        ]
        r = [l for l in r if l if (ignore_errors or 1e9 < l < fn.now())]

        if len(r):
            last = method(r) if len(r) else 0
            date = fn.time2str(last)
        else:
            self.debug('No values in %s' % table)
            last, date = None, ''

        return (last, date, size, fn.now() - t0)
Ejemplo n.º 21
0
 def get_alarm_date(self, alarm=None, attr_value=None, null=' NaN '):
     try:
         return ('%' + str(self.DATE_SIZE) + 's') % fandango.time2str(
             self.get_alarm_time(alarm, attr_value))
     except:
         print traceback.format_exc()
         return str(null)
def do_repair(user,
              passwd,
              condition="engine is null",
              database="information_schema",
              force=False,
              days=0,
              db_host='localhost'):
    sql = "select CONCAT(table_schema, '.', table_name) from tables where %s" % condition
    db_con = MySQLdb.connect(db_host,
                             port=3306,
                             user=user,
                             passwd=passwd,
                             db=database)
    cursor = db_con.cursor()
    cursor.execute(sql)
    rset = cursor.fetchall()
    print '%d tables match condition' % len(rset)
    now = time.time()
    days = days or 60
    tlimit = fandango.time2str(now - days * 24 * 3600)
    now = fandango.time2str(now)
    for item in rset:
        try:
            if fandango.isSequence(item):
                item = item[0]
            if force:
                raise Exception, 'force=True, all tables to be checked'
            elif 'att_' in item:
                q = "select count(*) from %s where time between '%s' and '%s' order by time" % (
                    item, tlimit, now)
                cursor.execute(q)
                count = cursor.fetchone()[0]
                q = "select * from %s where time between '%s' and '%s' order by time" % (
                    item, tlimit, now)
                print q
                cursor.execute(q)  # desc limit 1'%item);
                l = len(cursor.fetchall())
                if abs(count - l) > 5:
                    raise Exception('%d!=%d' % (count, l))
            else:
                raise Exception, '%s is a config table' % item
        except Exception, e:
            print e
            print 'Repairing %s ...' % item
            cursor.execute('repair table %s' % item)
            print '[OK]\n'
        time.sleep(.001)
Ejemplo n.º 23
0
 def setModel(self,trend=None):
     trend = trend or self.trend
     if not trend: 
         self.panel.setText('')
         return
     models = []
     for n,ts in trend.trendSets.iteritems():
         model = ts.getModel()
         modes = self.reader.is_attribute_archived(model)
         buff = getattr(ts,'_xBuffer',[])
         if buff is None or not len(buff): buff = [0]
         models.append((model,modes,len(buff),buff[0],buff[-1]))
         
     self.panel.setText('\n'.join(sorted(
         '%s\n\t%s\n\t%d values\n\t%s - %s\n'
         %(m,n,l,time2str(b),time2str(e)) for m,n,l,b,e
         in models)))
Ejemplo n.º 24
0
    def setModel(self, trend=None):
        trend = trend or self.trend
        if not trend:
            self.panel.setText('')
            return
        models = []
        for n, ts in trend.trendSets.iteritems():
            model = ts.getModel()
            modes = self.reader.is_attribute_archived(model)
            buff = getattr(ts, '_xBuffer', [])
            if buff is None or not len(buff): buff = [0]
            models.append((model, modes, len(buff), buff[0], buff[-1]))

        self.panel.setText('\n'.join(
            sorted('%s\n\t%s\n\t%d values\n\t%s - %s\n' %
                   (m, n, l, time2str(b), time2str(e))
                   for m, n, l, b, e in models)))
Ejemplo n.º 25
0
def check_table(db_name,table,tstart,tend):
    print('check_table(%s,%s,%s,%s)' % (db_name,table,tstart,tend))
    api = pta.api(db_name)
    tstart,tend = fn.time2str(tstart),fn.time2str(tend-1)
    rows = dict()
    for a in api:
        api.get_attr_id_type_table(a)
        if api[a].table == table:
            r = api.Query("select count(*) from %s where att_conf_id = %s and "
                "data_time between '%s' and '%s'" % (table,api[a].id,tstart,tend))
            #if r[0][0] > 1e3:
            #print(a,r)
            rows[a] = r[0][0]
            
    print('%d attributes found'  % len(rows))
    for n,k in sorted((s,a) for a,s in rows.items()):
        print('%s id=%s rows=%s' % (k, api[k].id, n))
Ejemplo n.º 26
0
def check_table(db_name, table, tstart, tend):
    print('check_table(%s,%s,%s,%s)' % (db_name, table, tstart, tend))
    api = pta.api(db_name)
    tstart, tend = fn.time2str(tstart), fn.time2str(tend - 1)
    rows = dict()
    for a in api:
        api.get_attr_id_type_table(a)
        if api[a].table == table:
            r = api.Query("select count(*) from %s where att_conf_id = %s and "
                          "data_time between '%s' and '%s'" %
                          (table, api[a].id, tstart, tend))
            #if r[0][0] > 1e3:
            #print(a,r)
            rows[a] = r[0][0]

    print('%d attributes found' % len(rows))
    for n, k in sorted((s, a) for a, s in rows.items()):
        print('%s id=%s rows=%s' % (k, api[k].id, n))
Ejemplo n.º 27
0
 def __init__(self,parent=None,domains=None,regexp='*pnv-*',USE_SCROLL=True,USE_TREND=False):
     print('%s: ArchivingBrowser()' % fn.time2str())
     Qt.QWidget.__init__(self,parent)
     self.setupUi(USE_SCROLL=USE_SCROLL, USE_TREND=USE_TREND, SHOW_OPTIONS=False)
     self.load_all_devices()
     try:
         import PyTangoArchiving
         self.reader = PyTangoArchiving.Reader('*') 
         self.archattrs = sorted(set(self.reader.get_attributes()))
         self.archdevs = list(set(a.rsplit('/',1)[0] for a in self.archattrs))
     except:
         traceback.print_exc()
         
     self.extras = []
     #self.domains = domains if domains else ['MAX','ANY','LI/LT','BO/BT']+['SR%02d'%i for i in range(1,17)]+['FE%02d'%i for i in (1,2,4,9,11,13,22,24,29,34)]
     #self.combo.addItems((['Choose...']+self.domains) if len(self.domains)>1 else self.domains)
     self.connectSignals()
     print('%s: ArchivingBrowser(): done' % fn.time2str())
Ejemplo n.º 28
0
 def updateAlarms(self):
     # Sorting will be kept at every update
     c = 0
     for i in range(self.rows):
         for j in range(self.cols):
             if c >= len(self.tags): break
             self.updateCell(i, j, self.alarms[self.tags[c]])
             c += 1
     self.setWindowTitle(self._title + ': ' + fd.time2str())
     self.refreshTimer.setInterval(self.REFRESH_TIME)
Ejemplo n.º 29
0
 def onRefreshButton(self):
     self._trend.applyNewDates()
     try:
         date = str2time(str(self.xEditStart.text()))
     except:
         try:
             date = getTrendBounds(self._trend)[0]
             self.xEditStart.setText(time2str(date))
         except:
             traceback.print_exc()
Ejemplo n.º 30
0
 def generate_partition_name_for_date(self, table, date):
     """
     generates the matching partition name for the date given
     """
     if not fn.isString(date):
         date = fn.time2str(date)
     p = partition_prefixes.get(table, None)
     if p:
         p += ''.join(date.split('-')[0:2]) + '01'
     return p
Ejemplo n.º 31
0
 def refreshAction(self):
     self._trend.applyNewDates()
     try:
         date = str2time(str(self.xEditStart.text()))
     except:
         try:
             date = getTrendBounds(self._trend)[0]
             self.xEditStart.setText(time2str(date))
         except:
             traceback.print_exc()
Ejemplo n.º 32
0
 def decimate_table(att_id,table):
     """
     @TODO
     """
     hours = [t0+i*3600 for i in range(24*30)]
     days = [t0+i*86400 for i in range(30)]
     dvalues = {}
     q = ("select count(*) from %s where att_conf_id = %d "
         "and data_time between '%s' and '%s'")
     for d in days:
         s = fn.time2str(d)
         q = hdbpp.Query(q%(table,att_id,s,fn.time2str(d+86400))
                         +" and (data_time %% 5) < 2;")
     sorted(values.items())
     3600/5
     for h in hours:
         s = fn.time2str(h)
         q = hdbpp.Query("select count(*) from att_scalar_devdouble_ro "
             "where att_conf_id = 1 and data_time between '%s' and '%s' "
             "and (data_time %% 5) < 2;"%(s,fn.time2str(h+3600)))
Ejemplo n.º 33
0
def decimate_table(att_id,table):
    """
    @TODO
    """
    hours = [t0+i*3600 for i in range(24*30)]
    days = [t0+i*86400 for i in range(30)]
    dvalues = {}
    q = ("select count(*) from %s where att_conf_id = %d "
        "and data_time between '%s' and '%s'")
    for d in days:
        s = fn.time2str(d)
        q = hdbpp.Query(q%(table,att_id,s,fn.time2str(d+86400))
                        +" and (data_time %% 5) < 2;")
    sorted(values.items())
    3600/5
    for h in hours:
        s = fn.time2str(h)
        q = hdbpp.Query("select count(*) from att_scalar_devdouble_ro "
            "where att_conf_id = 1 and data_time between '%s' and '%s' "
            "and (data_time %% 5) < 2;"%(s,fn.time2str(h+3600)))
def check_schema_with_queries(schema):
    api = pta.api(schema)
    pending = []
    done = []

    trace('check_schema(%s)' % schema)
    #Check IDLE devices
    for s, t in api.servers.items():
        if 'archiver' not in s: continue
        for d in t.get_device_list():
            if not fun.check_device(d):
                pending.append(s)
                break
    trace('\t%d servers have idle devices' % len(pending))

    for s, t in sorted(api.servers.items()):
        if s in pending or s in done: continue
        #Check current server attributes
        now = time.time()
        devs = map(str.lower, t.get_device_list())
        attrs = [a for a in api if api[a].archiver.lower() in devs]
        for a in attrs:
            if 'errorcode' in a: continue
            api.load_last_values(a)
            if api[a].last_date < now - (MAX_ERROR +
                                         api[a].modes['MODE_P'][0] / 1000.):
                pending.append(s)
                trace('\t%s marked to restart (%s not updated since %s)' %
                      (s, a, fun.time2str(api[a].last_date)))
                break

        #Then check servers pending to restart
        now = time.time()
        if pending and now > last_restart + WAIT_TIME:
            done.append(restart_server(api, pending.pop(0)))

    trace('\tAttribute check finished, %d/%d servers pending to restart' %
          (len(pending), len(pending) + len(done)))

    #Emptying the queue
    while len(pending):
        if pending and now > last_restart + WAIT_TIME:
            done.append(restart_server(api, pending.pop(0)))
        else:
            time.sleep(1.)
    trace('%s check finished, %d/%d servers have idle devices' %
          (schema, len(pending), len(servers)))

    #Now checking for attributes in .csvs that are not archived!
    #or attributes with dedicated != '' but not archived
    ##@todo...

    return done
Ejemplo n.º 35
0
 def log(self,severity,msg):
     if msg == self.last_msg: 
         msg = '+1'
     else: 
         self.last_msg = msg
         if self.logger:
             try:
                 if severity not in self.log_objs: self.log_objs[severity] = \
                     getattr(self.logger,severity.lower(),
                             lambda m,s=severity:'%s:%s: %s'%
                             (s.upper(),fn.time2str(),m))
                 self.log_objs[severity](msg)
             except: pass
     if self.dialog():
         if msg!='+1': 
             msg = '%s:%s: %s'%(severity.upper(),fn.time2str(),msg)
         if self.filters:
             msg = (fn.filtersmart(msg,self.filters) or [''])[0]
         if msg:
             if len(self.instances())>1: msg = self.tango_host+':'+msg
             self.dialog().append(msg)
Ejemplo n.º 36
0
    def log(self, severity, msg):
        if msg == self.last_msg:
            msg = '+1'
        else:
            self.last_msg = msg
            if self.logger:
                try:
                    if severity not in self.log_objs:                        self.log_objs[severity] = \
getattr(self.logger,severity.lower(),
                        lambda m,s=severity:'%s:%s: %s'%
                        (s.upper(),fn.time2str(),m))
                    self.log_objs[severity](msg)
                except:
                    pass
        if self.dialog():
            if msg != '+1':
                msg = '%s:%s: %s' % (severity.upper(), fn.time2str(), msg)
            if self.filters:
                msg = (fn.filtersmart(msg, self.filters) or [''])[0]
            if msg:
                if len(self.instances()) > 1: msg = self.tango_host + ':' + msg
                self.dialog().append(msg)
def check_schema_with_queries(schema):
  api = pta.api(schema)
  pending = []
  done = []

  trace('check_schema(%s)'%schema)
  #Check IDLE devices  
  for s,t in api.servers.items():
    if 'archiver' not in s: continue
    for d in t.get_device_list():
      if not fun.check_device(d):
        pending.append(s)
        break
  trace('\t%d servers have idle devices'%len(pending))

  for s,t in sorted(api.servers.items()):
    if s in pending or s in done: continue
    #Check current server attributes
    now = time.time()
    devs = map(str.lower,t.get_device_list())
    attrs = [a for a in api if api[a].archiver.lower() in devs]
    for a in attrs:
      if 'errorcode' in a: continue
      api.load_last_values(a)
      if api[a].last_date<now-(MAX_ERROR+api[a].modes['MODE_P'][0]/1000.):
        pending.append(s)
        trace('\t%s marked to restart (%s not updated since %s)'%(s,a,fun.time2str(api[a].last_date)))
        break

    #Then check servers pending to restart
    now = time.time()
    if pending and now > last_restart+WAIT_TIME:
      done.append(restart_server(api,pending.pop(0)))

  trace('\tAttribute check finished, %d/%d servers pending to restart'%(
        len(pending),len(pending)+len(done)))
      
  #Emptying the queue
  while len(pending):
    if pending and now > last_restart+WAIT_TIME:
      done.append(restart_server(api,pending.pop(0)))
    else:
      time.sleep(1.)
  trace('%s check finished, %d/%d servers have idle devices'%(schema,len(pending),len(servers)))
  
  #Now checking for attributes in .csvs that are not archived!
  #or attributes with dedicated != '' but not archived
  ##@todo...
  
  return done    
Ejemplo n.º 38
0
def trace(msg,head='',level=0,clean=False,use_taurus=False):
    if level > TRACE_LEVEL: return
    if type(head)==int: head,level = '',head
    msg = fandango.time2str()+':'+str(head)+('\t'*level or ' ')+str(msg)
    if use_taurus:
        if not dummies: 
            dummies.append(Logger())
            dummies[0].setLogLevel('INFO')
        print dummies[0]
        dummies[0].info(msg)
        dummies[0].error(msg)
    else:
        (print_clean if clean else fandango.printf)(msg)
    return
Ejemplo n.º 39
0
 def get_last_attribute_values(self,table,n=1,
                               check_table=False,epoch=None):
     if epoch is None:
         start,epoch = None,fn.now()+600
     elif epoch < 0:
         start,epoch = fn.now()+epoch,fn.now()+600
     if start is None:
         #Rounding to the last month partition
         start = fn.str2time(
             fn.time2str().split()[0].rsplit('-',1)[0]+'-01')
     vals = self.get_attribute_values(table, N=n, human=True, desc=True,
                                         start_date=start, stop_date=epoch)
     if len(vals):
         return vals[0] if abs(n)==1 else vals
     else: 
         return vals
Ejemplo n.º 40
0
 def get_last_attribute_values(self,table,n,check_table=False,epoch=fn.END_OF_TIME):
     """
     Check table set to False as sometimes order of insertion is not the same as expected, BE CAREFUL WITH THIS ARGUMENT!
     """
     query,where = table,''
     if check_table:
         table_size = self.getTableSize(table)
         if table_size>1e3:
             x = max((2*n,20))
             query = '(select * from %s limit %d,%d)'%(table,table_size-x,x)
     epoch = fn.str2time(epoch) if fn.isString(epoch) else epoch
     if epoch not in (None, fn.END_OF_TIME):
         where = " where T.time < '%s' " % (fn.time2str(epoch))
     what = 'SELECT time'
     what += (',value',',read_value')['read_value' in self.getTableCols(table)]
     return self.Query('%s from %s T %s order by T.time desc limit %d' % (
         what, query, where, n))
Ejemplo n.º 41
0
def main():
    print(__doc__)

    args = map(str.lower, sys.argv[1:])
    schemas = pta.Schemas.load()
    schemas = [a for a in args if a in schemas]

    options = dict((k.strip('-'), v)
                   for k, v in ((a.split('=', 1) if '=' in a else (a, ""))
                                for a in args if a not in schemas))
    #for a in args if a.startswith('--')))

    import platform
    host = platform.node()

    folder = options.get('folder', LOG_FOLDER)
    configs = options.get('config', None)
    restart = str(options.get('restart', False)).lower() not in ('false', 'no')
    email = options.get('email', False)
    if configs is not None:
        configs = configs or get_csv_folder()

    for schema in schemas:
        date = fun.time2str(time.time(), '%Y%m%d_%H%M%S')
        if 'bl' not in host and configs:
            try:
                done = check_config_files(schema,
                                          restart=False,
                                          save='%s/missing_%s_%s_%s.pck' %
                                          (folder, host, schema, ''),
                                          email=email,
                                          csvfolder=configs)
            except:
                traceback.print_exc()

        done = check_schema_information(schema,
                                        restart=restart,
                                        email=email,
                                        save='%s/lost_%s_%s_%s.pck' %
                                        (folder, host, schema, ''))
Ejemplo n.º 42
0
    def save(self, device, filename, data, add_timestamp=False, asynch=True):
        """
        FolderDS device, filename WITHOUT PATH!, data to be saved
        add_timestamp: whether to add or not timestamp at the end of filename
        asynch: if True, the save() call will not wait for the command to complete
        """
        # Remove device path from filename
        filename = (filename or device).split('/')[-1]
        if add_timestamp:
            t = fn.time2str().replace(' ', '_').replace(':',
                                                        '').replace('-', '')
            p, s = (filename.rsplit('.', 1)) if '.' in filename else (filename,
                                                                      '')
            filename = '.'.join(filter(bool, (p, t, s)))
        d = self.get_device(device)  #
        assert tango.check_device(d)
        if fn.isSequence(data): data = '\n'.join(data)

        if self.char_mode:
            cmd = 'SaveCharBuffer'
            argin = filename + '\n' + data
            #print(argin)
            argin = map(ord, filename + '\n' + data)
            #print(argin)
        else:
            cmd = 'SaveFile'
            argin = [filename, data]
        if asynch:
            #d.command_inout_asynch('SaveFile',argin,True)
            d.command_inout_asynch(cmd, argin, True)
            r = len(data)
        elif self.char_moode:
            r = d.SaveCharBuffer(argin).split()[-1]
        else:
            r = d.SaveFile(argin)

        print('%s.%s() : %s)' % (device, cmd, r))
        print('mem: %s' % get_memory())
        return r
Ejemplo n.º 43
0
def save_schema_values(schema, filename='', folder=''):
    """
    This method saves all last values from a given schema into a file
    it can be called from crontab to generate daily reports
    """
    t0 = fn.now()
    print('Saving %s attribute values' % schema)
    date = fn.time2str().split()[0].replace('-','')
    filename = filename or '%s_%s_values.pck' % (schema,date)
    if folder: 
        filename = '/'.join((folder,filename))

    api = pta.api(schema)
    attrs = api.keys() if hasattr(api,'keys') else api.get_attributes()
    print('%d attributes in %s' % (len(attrs),schema))
    values = dict.fromkeys(filter(api.is_attribute_archived,attrs))
    print('%d attributes archived' % (len(values)))
    values.update((a,api.load_last_values(a)) for a in values.keys())
    pickle.dump(values,open(filename,'w'))

    print('%s written, %d seconds ellapsed' % (filename,fn.now()-t0))
    print(os.system('ls -lah %s' % filename))
Ejemplo n.º 44
0
 def save(self, device, filename, data, add_timestamp=False, asynch=True):
     """
     FolderDS device, filename WITHOUT PATH!, data to be saved
     add_timestamp: whether to add or not timestamp at the end of filename
     asynch: if True, the save() call will not wait for the command to complete
     """
     # Remove device path from filename
     filename = (filename or device).split('/')[-1]
     if add_timestamp:
         t = fn.time2str().replace(' ', '_').replace(':',
                                                     '').replace('-', '')
         p, s = (filename.rsplit('.', 1)) if '.' in filename else (filename,
                                                                   '')
         filename = '.'.join(filter(bool, (p, t, s)))
     d = self.get_device(device)  #
     if asynch:
         d.command_inout_asynch('SaveFile', [filename, data], True)
         r = len(data)
     else:
         r = d.SaveFile([filename, data])
     print('%s.SaveFile() : %s)' % (device, r))
     return r
Ejemplo n.º 45
0
    def refreshCurves(self,check_buffers=False):
        names =  self.trend.getCurveNames()
        self.warning('%s: In refreshCurves(%s,%s) ...'%
                     (fn.time2str(),names,check_buffers))

        if check_buffers:
            self.checkBuffers()
            
        try:
            self.forceReadings(emit=False)
        except:
            self.warning(traceback.format_exc())
            
        for n in names:
            c = self.trend.getCurve(n)
            v = c.isVisible()
            if v:
                c.setVisible(False)
                c.setYAxis(c.yAxis())
                c.setVisible(True)
            else:
                self.warning('%s curve is hidden'%v)
        return
Ejemplo n.º 46
0
 def get_alarm_date(self, alarm=None, attr_value=None, null = ' NaN '):
     try:
         return ('%'+str(self.DATE_SIZE)+'s')%fandango.time2str(self.get_alarm_time(alarm,attr_value))
     except:
         print traceback.format_exc()
         return str(null)
def transfer_table(db, db2, table, bunch = 16*16*1024, is_str = False,
                   per_value = 60, min_tdelta = 0.2, ids = []):
    
    t0 = fn.now()       
    tq = 0
    cols = db.getTableCols(table)
    
    has_int = 'int_time' in cols
    
    cols = sorted(c for c in cols 
                  if c not in ('recv_time','insert_time','int_time'))
    it, iv, ii = (cols.index('data_time'), cols.index('value_r'), 
        cols.index('att_conf_id'))
    ix = cols.index('idx') if 'idx' in cols else None
    
    is_float = 'double' in table or 'float' in table
    
    #if is_array:
        #print("%s: THIS METHOD IS NO SUITABLE YET FOR ARRAYS!" % table)
        ## dim_x/dim_y dim_x_r/dim_y_r columns should be taken into account
        ## when array should be stored?  only when value changes, or on time/fixed basis?
        #return
    
    lasts = dict()
    
    qcols = (','.join(cols)).replace('data_time',
        'CAST(UNIX_TIMESTAMP(data_time) AS DOUBLE)')
    query = 'select %s from %s' % (qcols, table)
    if has_int:
        where = " where int_time >= %d and int_time < %d "
    else:
        where = " where data_time >= '%s'"
        where += " and data_time < '%s'"
    
    order = ' order by data_time'
    if has_int:
        #order = ' order by int_time' #It may put NULL/error values FIRST!!
        if min_tdelta > 1:
            order = ' group by int_time DIV %d'%int(min_tdelta) + order
    else:
        if min_tdelta > 1:
            order = ' group by data_time DIV %d'%int(min_tdelta) + order
        
    limit = ' limit %s' % bunch
    
    print('inserting data ...')
    
    count,done,changed,periodic = 0,0,0,0
    attr_ids = get_table_attr_ids(db, table)
    for aii,ai in enumerate(attr_ids):

        if ids and ai not in ids:
            continue
        
        print('attr: %s (%s/%s)' % (ai,aii,len(attr_ids)))
        
        print('getting limits ...')
        last = db2.Query('select UNIX_TIMESTAMP(data_time) from %s '
            ' where att_conf_id = %d order by '
            'att_conf_id, data_time desc limit 1' % (table,ai))
        last = last and last[0][0] or 0
        if not last:
            last = db.Query('select CAST(UNIX_TIMESTAMP(data_time) AS DOUBLE) from %s '
                ' where att_conf_id = %d '
                'order by att_conf_id,data_time limit 1' % (table,ai))
            last = last and last[0][0] or 0
        last = fn.time2str(last)
        
        print(last)
        end = db.Query('select CAST(UNIX_TIMESTAMP(data_time) AS DOUBLE) from %s '
            ' where att_conf_id = %d '
            'order by att_conf_id,data_time desc limit 1' % (table,ai))
        end = end and end[0][0] or fn.now()
        if end > fn.now(): end = fn.now()
        end = fn.time2str(end, us = True)         
        print(end)
        
        #return
        while True:        
            print('attr: %s (%s/%s)' % (ai,aii,len(attr_ids)))
            values = ''
            #.split('.')[0]
            prev = last
            print('last: %s' % last)
            nxt = fn.time2str(fn.str2time(last)+4*86400)
            
            if fn.str2time(last) >= fn.now() or fn.str2time(nxt) >= fn.now():
                break            
            if fn.str2time(last)+60 >= fn.str2time(end):
                break            
            if has_int:
                qr = query+(where%(int(str2time(last)),int(str2time(nxt))))
            else:
                qr = query+(where%(last,nxt))
                
            qr += ' and att_conf_id = %s' % ai
            qr += order+limit
            print(qr)
            tq = fn.now()
            cursor = db.Query(qr, export=False)
            print(fn.now()-tq)
            v = cursor.fetchone()
            if v is None:
                last = nxt
            else:
                last = fn.time2str(v[it],us=True)
                
            if fn.str2time(last)+60 >= fn.str2time(end):
                break #It must be checked before and after querying
            if v is None:
                continue
            
            curr = 0
            for _i in range(bunch):
                #print(_i,bunch)
                curr += 1
                count += 1
                i,t,w = v[ii], v[it], v[iv]
                x = v[ix] if ix is not None else None

                last = fn.time2str(t,us=True)
                if i not in lasts:
                    diff = True
                elif t < lasts[i][0]+min_tdelta:
                    diff = False
                else:
                    diff = (w != lasts[i][1])
                    if is_float:
                        if w and None not in (w,lasts[i][1]):
                            diff = diff and abs((w-lasts[i][1])/w)>1e-12
                            
                if ix is None and diff:
                    # changed scalar value
                    lasts[i] = (t,w)
                    v = map(str,v)
                    v[2] = repr(last)
                    if values:
                        values += ','
                    values += '(%s)' % ','.join(v)
                    changed += 1
                    done += 1
                    v = cursor.fetchone()
                    if v is None:
                        break
                    
                elif ix is None and (t-lasts[i][0]) >= per_value:
                    # periodic scalar value
                    lasts[i] = (t,w)
                    v = map(str,v)
                    v[2] = repr(last)
                    if values:
                        values += ','
                    values += '(%s)' % ','.join(v)
                    periodic += 1
                    done += 1                    
                    v = cursor.fetchone()
                    if v is None:
                        break
                    
                elif ix is not None and ((i,x) not in lasts 
                        or (t-lasts[(i,x)][0]) >= per_value):
                    # periodic array value
                    lasts[(i,x)] = (t,w)
                    v = map(str,v)
                    v[2] = repr(last)
                    if values:
                        values += ','
                    values += '(%s)' % ','.join(v)
                    done += 1
                    v = cursor.fetchone()
                    if v is None:
                        break
                    
                else:
                    v = cursor.fetchone()
                    if v is None:
                        break

            if values:        
                values = values.replace('None','NULL')
                insert = "insert into %s (%s) VALUES %s" % (
                    table, ','.join(cols), values)
                print(insert[:80],insert[-80:])
                db2.Query(insert)
            #else:
                #print('NO VALUES TO INSERT')
                #break
                
            print(curr,changed,periodic,done,count)
            #print(last,nxt,end)
            if last == prev:
                last = nxt
            if fn.str2time(last) >= fn.now():
                break
    
    print('%d/%d values inserted in %d seconds'  % (done,count,fn.now()-t0))
Ejemplo n.º 48
0
def decimate(db_name,keys,tstart,tend,period=10,dry=False):
    """
    time arguments are strings
    BUT!, this method seems to not work anymore to free space in TDB
    Maybe a mysqld restart is needed, I don't know; 
    but up to now space is not freed
    """
    api = pta.api(db_name)
    
    if '/' in keys[0]:
        print('Decimating by attribute names')
        tables = fn.defaultdict(list)
        for a in keys:
            api.get_attr_id_type_table(a)
            tables[api[a].table].append(a)
            
        print('tables: %s' % (tables.keys()))
        for table,attrs in tables.items():
            for a in attrs:
                pta.dbs.decimate_db_table_by_time(api,
                    table,api[a].id,tstart,tend,period,
                    optimize=(a==attrs[-1]))
    
    if not '/' in keys[0]:
        print('Decimating by data_type')

        data_types = keys
        if not data_types:
            data_types = [r[0] for r in api.Query('select data_type from att_conf_data_type')]
        else:
            data_types = [d.replace('att_','') for d in data_types]
            
        print('Decimating %s types between %s and %s: %s'%(db_name,tstart,tend,data_types))

        for data_type in data_types:

            attrs = api.Query('select att_conf_id from att_conf,att_conf_data_type '
                    'where att_conf.att_conf_data_type_id = att_conf_data_type.att_conf_data_type_id '
                    'and data_type = "%s"'%data_type)
            attrs = [r[0]  for r in attrs]

            q = ("select partition_name,table_name"
                    " from information_schema.partitions where"
                    " partition_name is not NULL"
                    " and table_schema = '%s'"%db_name +
                    " and table_name like '%"+data_type+"'" )
            print(q)
            partitions = api.Query(q)
            if partitions:
                table = partitions[0][1]
            else:
                table = 'att_'+data_type
            print('%s has %d attributes in %d partitions'%(table,len(attrs),len(partitions)))
            c0 = api.Query('select count(*) from %s '%table)

            import re
            intervals = []

            for p in partitions:
                p = p[0]
                r = '(?P<year>[0-9][0-9][0-9][0-9])(?P<month>[0-9][0-9])'
                md = re.search(r,p).groupdict()
                t0 = '%s-%s-01 00:00:00'%(md['year'],md['month'])
                m,y = int(md['month']),int(md['year'])
                
                if m == 12:
                    m,y = 1, y+1
                else:
                    m+=1
                    
                t1 = '%04d-%02d-01 00:00:00'%(y,m)
                if fn.str2time(t0)<fn.str2time(tend) and \
                fn.str2time(t1)>fn.str2time(tstart):
                    intervals.append((t0,t1,p))

            if not partitions:
                ts,te = fn.str2time(tstart),fn.str2time(tend)
                tinc = (te-ts)/10.
                for i in range(1,11):
                    intervals.append((fn.time2str(ts+(i-1)*tinc),
                                    fn.time2str(ts+i*tinc),None))
                
            print('%d intervals in %s'%(len(intervals),table))
                
            for t0,t1,p in intervals:
                
                print((t0,t1))
                if dry: continue
                for a in attrs:
                    c0 = api.getTableSize(table)
                    pta.dbs.decimate_db_table(db=api,table=table,
                        start=fn.str2time(t0),end=fn.str2time(t1),
                        period=600 if 'string' in table else 300,
                        condition=' att_conf_id = %s '%a,
                        iteration=2000,cols=['data_time','value_r'],
                        us=True, repeated=True)
                    
                if p: api.Query('alter table %s optimize partition %s'%(table,p))

            if not dry:
                q = 'repair table %s;'%table
                print('\n'+q)
                api.Query(q)
                c1 = api.getTableSize(table)
                print('\n\n%s size reduced from %s to %s'%(table,c0,c1))
            
        print('ellapsed %d seconds'%(time.time()-tt0))
Ejemplo n.º 49
0
    if not args or args[0] not in "help check decimate":
        print(__doc__)
        sys.exit(-1)
        
    action,args = args[0],args[1:]
    
    if action == 'help':
        print(__doc__)

    elif action == 'check':
        db_name = args[0]
        if len(args) == 1:
            check_db(db_name)
        else:
            tstart = fn.str2time(args[1])
            tend = fn.str2time(args[2])
            table = args[3]
            check_table(db_name,table,tstart,tend)
        sys.exit(0)
      
    elif action == 'decimate':
        db_name = args[0] #'hdbmiras'
        tstart = fn.time2str(fn.str2time(args[1]))
        tend = fn.time2str(fn.str2time(args[2]))
        period = int(args[3])
        keys = args[4:] #['scalar_devdouble_ro']
        decimate(db_name,keys,tstart,tend, period)

    
    
Ejemplo n.º 50
0
diffs = [vals[0]]
for i,r in enumerate(vals[1:]):
    if r[1]!=vals[i][1]:     
        if vals[i] != diffs[-1]:
            diffs.append(vals[i])
        diffs.append(r)       

print('At least, %d rows will be kept' % len(diffs))

if float(len(diffs))/len(vals) < 0.7 :
    
    if 'd' in flags: sys.exit(0)
    
    for i,d in enumerate(diffs[1:]):                                                      
        t0 = fn.time2str(diffs[i][0]+1)                                                   
        t1 = fn.time2str(d[0]-1)                                                          
        if fn.str2time(t1)-fn.str2time(t0) >= abs((int(tdiff) or int(mlimit))-2):
            q = ("delete from %s where time between '%s' and '%s'" 
                % (table, t0, t1))     
            query(q,aid)    
else:
    print('standard decimation doesnt pay off')
    
    if tdiff:
        print('decimating t < %s' % tdiff)
        tfirst = vals[0][0] #query(q)[0][0]
        
        trange = 3600*12
        for tt in range(int(tfirst),int(fn.str2time(tend)),int(trange)):
            q = ("select count(*) from %s where (UNIX_TIMESTAMP(%s) between %s and %s) "
Ejemplo n.º 51
0
    def get_attribute_values(self,table,start_date=None,stop_date=None,
                             desc=False,N=0,unixtime=True,
                             extra_columns='quality',decimate=0,human=False,
                             as_double=True,aggregate='MAX',int_time=True,
                             **kwargs):
        """
        This method returns values between dates from a given table.
        If stop_date is not given, then anything above start_date is returned.
        desc controls the sorting of values
        
        unixtime = True enhances the speed of querying by a 60%!!!! 
            #(due to MySQLdb implementation of datetime)
        
        If N is specified:
        
            * Query will return last N values if there's no stop_date
            * If there is, then it will return the first N values (windowing?)
            * IF N is negative, it will return the last N values instead
            
        start_date and stop_date must be in a format valid for SQL
        """
        t0 = time.time()
        self.debug('HDBpp.get_attribute_values(%s,%s,%s,%s,decimate=%s,%s)'
              %(table,start_date,stop_date,N,decimate,kwargs))
        if fn.isSequence(table):
            aid,tid,table = table
        else:
            aid,tid,table = self.get_attr_id_type_table(table)
            
        if not all((aid,tid,table)):
            self.warning('%s is not archived' % table)
            return []
            
        human = kwargs.get('asHistoryBuffer',human)
            
        what = 'UNIX_TIMESTAMP(data_time)' if unixtime else 'data_time'
        if as_double:
            what = 'CAST(%s as DOUBLE)' % what
            
        if 'array' in table: what+=",idx"
        value = 'value_r' if 'value_r' in self.getTableCols(table) \
                                else 'value'
                            
        if decimate and aggregate in ('AVG','MAX','MIN'):
            value = '%s(%s)' % (aggregate,value)
            
        what += ', ' + value
        if extra_columns: 
            what+=','+extra_columns

        interval = 'where att_conf_id = %s'%aid if aid is not None \
                                                else 'where att_conf_id >= 0 '
                                            
        int_time = int_time and 'int_time' in self.getTableCols(table)
        if int_time:
            self.info('Using int_time indexing for %s' % table)
        if start_date or stop_date:
            start_date,start_time,stop_date,stop_time = \
                Reader.get_time_interval(start_date,stop_date)
            
            if int_time:
                
                def str2mysqlsecs(date):
                    rt = fn.str2time(date)
                    return int(rt+self.get_mysqlsecsdiff(date))
                
                if start_date and stop_date:
                    interval += (" and int_time between %d and %d"
                            %(str2mysqlsecs(start_date),
                              str2mysqlsecs(stop_date)))
                
                elif start_date and fandango.str2epoch(start_date):
                    interval += (" and int_time > %d" 
                                 % str2mysqlsecs)
                
            else:
                if start_date and stop_date:
                    interval += (" and data_time between '%s' and '%s'"
                            %(start_date,stop_date))
                
                elif start_date and fandango.str2epoch(start_date):
                    interval += " and data_time > '%s'"%start_date
            
        query = 'select %s from %s %s' % (what,table,interval)
        if decimate:
            if isinstance(decimate,(int,float)):
                d = int(decimate) or 1
            else:
                d = int((stop_time-start_time)/10800) or 1
            # decimation on server side
            query += ' group by FLOOR(%s/%d)' % (
                'int_time' if int_time else 'UNIX_TIMESTAMP(data_time)',d)
        query += ' order by %s' % ('int_time' if int_time else 'data_time')
                    
        if N == 1:
            human = 1
        if N < 0 or desc: 
            query+=" desc" # or (not stop_date and N>0):
        if N: 
            query+=' limit %s'%abs(N if 'array' not in table else N*1024)
        
        ######################################################################
        # QUERY
        t0 = time.time()
        self.warning(query.replace('where','\nwhere').replace(
            'group,','\ngroup'))
        try:
            result = self.Query(query)
            self.warning('read [%d] in %f s'%(len(result),time.time()-t0))
        except MySQLdb.ProgrammingError as e:
            result = []
            if 'DOUBLE' in str(e) and "as DOUBLE" in query:
                return self.get_attribute_values((aid,tid,table),start_date,
                    stop_date,desc,N,unixtime,extra_columns,decimate,human,
                    as_double=False,**kwargs)
            else:
                traceback.print_exc()
            
        if not result or not result[0]: 
            return []
        ######################################################################
        
        t0 = time.time()
        if 'array' in table:
            data = fandango.dicts.defaultdict(list)
            for t in result:
                data[float(t[0])].append(t[1:])
            result = []
            for k,v in sorted(data.items()):
                l = [0]*(1+max(t[0] for t in v))
                for i,t in enumerate(v):
                    if None in t: 
                        l = None
                        break
                    l[t[0]] = t[1] #Ignoring extra columns (e.g. quality)
                result.append((k,l))
            if N > 0: 
                #for k,l in result:
                    #print((k,l and len(l)))
                result = result[-N:]
            if N < 0 or desc:
                result = list(reversed(result))
            self.debug('array arranged [%d] in %f s'
                         % (len(result),time.time()-t0))
            t0 = time.time()
          
        # Converting the timestamp from Decimal to float
        # Weird results may appear in filter_array comparison if not done
        # Although it is INCREDIBLY SLOW!!!
        #result = []
        #nr = []
        #if len(result[0]) == 2: 
            #for i,t in enumerate(result):
                #result[i] = (float(t[0]),t[1])
        #elif len(result[0]) == 3: 
            #for i,t in enumerate(result):
                #result[i] = (float(t[0]),t[1],t[2])
        #elif len(result[0]) == 4: 
           #for i,t in enumerate(result):
                #result[i] = ((float(t[0]),t[1],t[2],t[3]))
        #else:
            #for i,t in enumerate(result):
                #result[i] = ([float(t[0])]+t[1:])
        
        self.debug('timestamp arranged [%d] in %f s'
                     % (len(result),time.time()-t0))
        t0 = time.time()
            
        # Decimation to be done in Reader object
        #if decimate:
            ## When called from trends, decimate may be the decimation method
            ## or the maximum sample number
            #try:
                #N = int(decimate)
                ##decimate = data_has_changed
                #decimate = 
                #result = PyTangoArchiving.reader.decimation(
                                        #result,decimate,window=0,N=N)                
            #except:
                ##N = 1080
                #result = PyTangoArchiving.reader.decimation(result,decimate) 
        
        if human: 
            result = [list(t)+[fn.time2str(t[0])] for t in result]

        if not desc and ((not stop_date and N>0) or (N<0)):
            #THIS WILL BE APPLIED ONLY WHEN LAST N VALUES ARE ASKED
            self.warning('reversing ...' )
            result = list(reversed(result))
        #else:
            ## why
            #self.getCursor(klass=MySQLdb.cursors.SSCursor)

        self.debug('result arranged [%d]'%len(result))            
        return result
Ejemplo n.º 52
0
def decimate_db_table(db,table,host='',user='',passwd='',start=0,end=0,period=300,iteration=1000,condition='',cols=None,us=True,test=False, repeated = False):
    """ 
    This method will remove all values from a MySQL table that seem duplicated 
    in time or value.
    All values with a difference in time lower than period will be kept.
    
    To use it with hdb++:
    
    decimate_db_table('hdbpp',user='******',passwd='...',
      table = 'att_scalar_devdouble_ro',
      start = 0,
      end = now()-600*86400,
      period = 60, #Keep a value every 60s
      condition = 'att_conf_id = XX',
      iteration = 1000,
      columns = ['data_time','value_r'],
      us=True,
      )
    """
    print('Decimating all repeated values in %s(%s) with less '
      'than %d seconds in between.'%(table,condition,period))
    
    db = FriendlyDB(db,host,user,passwd) if not isinstance(db,FriendlyDB) else db
    #rw = 'write_value' in ','.join([l[0] for l in db.Query("describe %s"%table)]).lower()
    #date,column = 'read_value,write_value' if rw else 'value'
    columns = cols or ['time','value']
    date,column = columns[0],columns[1:]
    start = time2date(start) if isNumber(start) else time2date(str2time(start))
    t0,vw0,now = start,None,time2date(time.time())
    end = time2date(end) if isNumber(end) else time2date(str2time(end))
    removed,pool,reps = 0,[],[]
    count = 0
    
    ## WHY T0 AND END ARE DATES!?!? : to be easy to compare against read values

    while t0<(end or now):

        query = "select %s,%s from %s where" %(date,','.join(column),table)
        query += " '%s' < %s"%(date2str(t0,us=True),date)#,date2str(end))
        if condition: query+=' and %s'%condition
        query += ' order by %s'%date
        query += ' limit %d'%iteration
        values = db.Query(query)
        #print(query+': %d'%len(values))
        #print('inspecting %d values between %s and %s'%(len(values),date2str(t0),date2str(end)))
        
        if not values: 
            break
          
        for i,v in enumerate(values):
            count += 1
            t1,vw1 = v[0],v[1:1+len(column)] #v[1],(rw and v[2] or None)
            #print((i,count,t1,vw0,vw1))
            e0,e1 = 1e-3*int(1e3*date2time(t0)),1e-3*int(1e3*date2time(t1)) #millisecs
            tdelta = e1-e0
            is_last = i >= (len(values)-1) or t1 >= end
            buff = len(pool)

            if is_last or tdelta>=period or vw0!=vw1:
                #if tdelta>=period: print('%s >= %s'%(tdelta,period))
                #elif vw0!=vw1: print('%s != %s'%(vw0,vw1))
                #else: print('i = %s/%s'%(i,len(values)))
                # End of repeated values, apply decimation ...
                if buff:
                    # Dont apply remove on windows < 1 second
                    e1 = date2time(values[i-1][0]) #previous value
                    if True: #(int(e1)-int(e0))>1:
                        #print('remove %d values in pool'%len(pool))
                        if not test:
                            #Don't use the between syntax!!
                            q = "delete from %s where "%table
                            if condition:
                                q+= condition+' and '
                            #e0,e1 = e0+1,e1-1 #t0 should not be removed!
                            q+= "%s > '%s' and "%(date,time2str(e0,us=us)) 
                            q+= "%s < '%s'"%(date,time2str(e1,us=us))
                            #print(q)
                            #removed += buff
                            db.Query(q)

                        #print('t0: %s; removed %d values' % (date2str(t0),buff-1))
                        #print('pool:%s'%str(pool))
                        
                if reps:
                    if not test:
                        #print('repeated timestamp: %s,%s == %s,%s'%(t0,vw0,t1,vw1))
                        q = "delete from %s where "%(table)
                        if condition:
                            q+= condition+' and '
                        q+= "%s = '%s' limit %d" % (
                          date,date2str(reps[-1],us=us),len(reps))
                        #print(q)
                        db.Query(q)                
 
                pool,reps = [],[]
                #print('%s => %s'%(t0,t1))
                t0,vw0 = t1,vw1

            else:
                # repeated values with tdiff<period will be removed in a single query
                    
                # This should apply only if values are different and timestamp equal?
                # if timestamp is repeated the condition t < d < t is useless
                # repeated timestamps are removed directly
                #print(tdelta)
                if repeated and not tdelta:
                    reps.append(t1)
                    #print(('reps',t1))
                        
                elif vw0 == vw1:
                    #if buff and not buff%100:
                    #    print('%s repeated values in %s seconds'%(buff,tdelta))
                    pool.append(t1)

                    #removed +=1  
                
                else: pass
                #print((vw0,vw1))                  
                    
            if is_last: break
    
    query = "select count(*) from %s where" %(table)
    query += " '%s' < %s and %s < '%s'"%(date2str(start,us=us),date,date,date2str(end,us=us))
    if condition: query+=' and %s'%condition   
    cur =  db.Query(query)[0][0]
    removed = count-cur

    print('decimate_db_table(%s,%s) took %d seconds to remove %d = %d - %d values'%(
      table,condition,time.time()-date2time(now),removed,count,cur))

    return removed
Ejemplo n.º 53
0
    def UpdateValues(self):
        # PROTECTED REGION ID(SchemaManager.UpdateValues) ENABLED START #
        try:

            t0 = t1 = fn.now()
            self.info_stream('UpdateValues()')
            
            if (self.ValuesFile or '').strip():
                self.info_stream('Load values from: %s ...' % self.ValuesFile)
                if self.ValuesFile.endswith('json'):
                    self.values = fn.json2dict(self.ValuesFile)
                else:
                    with open(self.ValuesFile) as f:
                        self.values = pickle.load(f)
                        
                self.values = dict((a,self.get_last_value(a,v)) 
                                   for a,v in self.values.items())
                t1 = max(v[0] for v in self.values.values() if v)
                t1 = min((t1,fn.now()))
                self.info_stream('reference time is %s' % fn.time2str(t1))

            elif self.Threaded:
                self.info_stream('Loading values from thread cache ...')
                self.values = dict((a,v) for a,v in self.threadDict.items()
                    if self.threadDict._updates.get(a,0))
            else:
                self.info_stream('Loading values from db ...')
                self.values = self.api.load_last_values(self.attr_on)

            self.info_stream('Updating %d values: %s' % (
                len(self.values),str(len(self.values) 
                                     and self.values.items()[0])))
            self.attr_ok = []
            self.attr_nok = []
            self.attr_lost = []
            self.attr_err = []
            for a,v in sorted(self.values.items()):
                try:
                    a = fn.tango.get_full_name(a)
                    if self.Threaded:
                        t1 = self.threadDict._updates.get(a,0)
                    self.check_attribute_ok(a,v,t=t1)
                except Exception as e:
                    self.attr_err.append(a)
                    traceback.print_exc()
                    m = str("%s: %s: %s" % (a, str(v), str(e)))
                    #self.error_stream(m)
                    print('*'*80)
                    print(fn.time2str()+' '+self.get_name()+'.ERROR!:'+m)
                    fn.wait(1e-6)
                    
            for a in ['AttributeValues','AttributeOkList','AttributeNokList',
                    'AttributeWrongList','AttributeLostList',
                    'AttributeNoevList','AttributeStalledList']:
                self.push_change_event(a,getattr(self,'read_%s'%a)())
                
            self.update_time = fn.now()
            self.state_machine()
            self.info_stream(self.get_status())
            self.info_stream('UpdateValues() took %f seconds' % (fn.now()-t0))
            
        except Exception as e:
            traceback.print_exc()
            self.error_stream(fn.except2str())
            raise e            
Ejemplo n.º 54
0
 def eventReceived(self,evt_src,evt_type,evt_value):
     try:
         debug = 'debug' in str(evt_src).lower()
         now = fandango.time2str()
         evtype = str(TaurusEventType.reverseLookup[evt_type])
         evvalue = getattr(evt_value,'value',None)
         if debug: 
             print '\n'
             #trace('%s: In AlarmRow(%s).eventReceived(%s,%s,%s)'%(fandango.time2str(),self.alarm.tag,evt_src,evtype,evvalue),clean=True)
         disabled,acknowledged,quality,value = self.alarmDisabled,self.alarmAcknowledged,self.quality,bool(self.alarm.active)
         if self.qtparent and getattr(self.qtparent,'api',None): self.alarm = self.qtparent.api[self.tag] #Using common api object
         
         #Ignoring Config Events
         if evt_type==TaurusEventType.Config:
             if debug: trace('%s: AlarmRow(%s).eventReceived(CONFIG): %s' % (now,self.alarm.tag,str(evt_value)[:20]),clean=True)
             return
         #Filtering Error Events
         elif evt_type==TaurusEventType.Error or not hasattr(evt_value,'value'):
             error = True
             self.errors+=1
             if self.errors>=self.MAX_ERRORS: 
                 self.alarm.active,self.quality = None,PyTango.AttrQuality.ATTR_INVALID
             if not self.errors%self.MAX_ERRORS:
                 if 'EventConsumer' not in str(evt_value): 
                     trace('%s: AlarmRow(%s).eventReceived(ERROR): %s' %(now,self.alarm.tag,'ERRORS=%s!:\n\t%s'%(self.errors,fandango.except2str(evt_value,80))),clean=True)
                 #if self.value is None: taurus.Attribute(self.model).changePollingPeriod(5*REFRESH_TIME)
                 if not self.changed and self.errors==self.MAX_ERRORS or 'Exception' not in self.status: 
                     print '%s : %s.emitValueChanged(ERROR!)'%(now,self.alarm.tag)
                     print 'ERROR: %s(%s)' % (type(evt_value),clean_str(evt_value))
                     self.qtparent.emitValueChanged()
                     self.changed = True #This flag is set here, and set to False after updating row style
                     self.updateStyle(event=True,error=fandango.except2str(evt_value)) #It seems necessary to update the row text, color and icon
             else: 
                 if debug: trace('In AlarmRow(%s).eventReceived(%s,%s,%d/%d)' % (self.alarm.tag,evt_src,evtype,self.errors,self.MAX_ERRORS),clean=True)
                 pass
         #Change Events
         elif evt_type==TaurusEventType.Change or evt_type==TaurusEventType.Periodic:
             self.errors = 0
             
             # Refresh period not changed as these lines slows down a lot!!
             #ta = taurus.Attribute(self.model)
             #if self.value is None: ta.changePollingPeriod(5*REFRESH_TIME)
             #elif ta.getPollingPeriod()!=REFRESH_TIME: ta.changePollingPeriod(REFRESH_TIME)
             
             disabled = self.get_disabled()
             acknowledged = self.get_acknowledged()
             if str(self.model).endswith('/ActiveAlarms'):
                 value,quality = any(s.startswith(self.alarm.tag+':') for s in (evt_value.value or [])),self.alarm.get_quality()
             else:
                 value,quality = evt_value.value,evt_value.quality
             
             if debug: trace('In AlarmRow(%s).eventReceived(%s,%s,%s)' % (self.alarm.tag,evt_src,str(TaurusEventType.reverseLookup[evt_type]),evvalue),clean=True)
             if debug: trace('\t%s (%s), dis:%s, ack:%s'%(value,quality,disabled,acknowledged))
             
             if  value!=bool(self.alarm.active) or quality!=self.quality or disabled!=self.alarmDisabled or acknowledged!=self.alarmAcknowledged:
                 if not self.changed: 
                     #print '%s : %s.emitValueChanged(%s)'%(fandango.time2str(),self.alarm.tag,value)
                     self.qtparent.emitValueChanged()
                 self.changed = True #This flag is set here, and set to False after updating row style
             
             self.alarmDisabled = disabled
             self.alarmAcknowledged = acknowledged
             self.quality = quality
             self.alarm.active = getAlarmTimestamp(self.alarm) if value else 0
             
             self.updateStyle(event=True,error=False)
         else: 
             print '\tUnknown event type?!? %s' % evt_type
     except:
         try: print 'Exception in eventReceived(%s,...): \n%s' %(evt_src,fandango.log.except2str())
         except : print 'eventReceived(...)!'*80+'\n'+traceback.format_exc()
Ejemplo n.º 55
0
 def setStartDate(self, start):
     if isinstance(start,(int,float)):
         start = time2str(start)
     self.xEditStart.setText(start)
Ejemplo n.º 56
0
def trace(msg,head='',level=0,clean=False):
    if level > TRACE_LEVEL: return
    if type(head)==int: head,level = '',head
    (print_clean if clean else fandango.printf)(
        fandango.time2str()+':'+str(head)+('\t'*level or ' ')+str(msg))
    return
Ejemplo n.º 57
0
def jqplot(title,vals,y2vals=None,xvals=None):
    #USING jqPlot instead of Qt
    ats = sorted(vals.keys())
    print 'JQPlot(%s,%s)'%(len(ats),','.join(ats))
    js = JS_PATH
    includes = JS_INCLUDES
    jqplot = """
        <div id="chartdiv" style="height:100%;width:100%; "></div>
        <script class="code" type="text/javascript">
        //var line1=[['2008-08-12 4:00',4], ['2008-09-12 4:00',6.5], ['2008-10-12 4:00',5.7], ['2008-11-12 4:00',9], ['2008-12-12 4:00',8.2]];
        //var line1 = [['2012-09-17 16:44', -0.24086535644531001], ['2012-09-17 16:44', -0.166169769287108], ['2012-09-17 16:45', -0.097435409545898494]];
        //var line1 = [['2012-09-17 16:41:25', -0.0238617248535157], ['2012-09-17 16:45:34', 0.058192413330078102], ['2012-09-17 16:49:34', 0.19318386840820501], ['2012-09-17 16:49:45', 0.61706387329101398], ['2012-09-17 16:49:55', 1.0387241058349601], ['2012-09-17 16:50:15', 1.54242512512208], ['2012-09-17 16:50:35', 2.4866759948730399], ['2012-09-17 16:51:34', 4.2881499938964902], ['2012-09-17 17:39:05', 2.0870143585204999], ['2012-09-17 17:39:15', -0.115877944946289], ['2012-09-17 17:43:55', -0.216508895874022], ['2012-09-17 17:50:45', -0.12760966491699099], ['2012-09-17 17:51:05', 0.00132557678222655], ['2012-09-17 17:51:14', 0.093648117065429706], ['2012-09-17 17:51:25', 0.17557904052734499], ['2012-09-17 17:51:35', 0.27481381225586199], ['2012-09-17 17:51:45', 0.45713497924804802], ['2012-09-17 17:52:05', 0.70768925476073896], ['2012-09-17 17:52:24', 1.0928863220214899], ['2012-09-17 17:52:55', 1.6552524261474699], ['2012-09-17 17:54:24', 2.6534446411132699], ['2012-09-17 17:57:35', 4.5955463104248198], ['2012-09-17 18:00:45', 7.7440131835937498], ['2012-09-17 19:02:15', 12.3284885101318], ['2012-09-17 19:15:35', 7.1876571350097702], ['2012-09-17 19:15:45', 0.51328236389160098], ['2012-09-17 19:16:35', 0.82798764038085604], ['2012-09-17 19:18:05', 1.2241496734619199], ['2012-09-17 19:18:25', 1.97309834289551], ['2012-09-17 19:18:45', 3.0986022644042799], ['2012-09-17 19:19:25', 4.5590980224609501], ['2012-09-17 19:19:55', 6.3049014739990499], ['2012-09-17 19:21:15', 10.757810562133701], ['2012-09-17 19:22:15', -0.19701274108886499], ['2012-09-17 19:22:25', 0.40055233764648701], ['2012-09-17 19:22:35', 1.10074002075196], ['2012-09-17 19:22:45', 1.6407546691894599], ['2012-09-17 19:23:15', 3.0096213989257699], ['2012-09-17 19:23:45', 4.4580032043457098], ['2012-09-17 19:24:25', 6.4163531951904602], ['2012-09-17 19:25:55', 10.671424835205], ['2012-09-17 19:27:35', 16.0758376770019], ['2012-09-17 19:36:35', -0.088338500976562498], ['2012-09-17 19:37:25', -0.0030716247558593901], ['2012-09-17 19:38:05', 0.081846588134765599], ['2012-09-17 19:38:45', 0.18200032043457201], ['2012-09-17 19:39:25', 0.24985005187988499]];
        line1 = $DATA;
        //var ticks = [[1,'Dec 10'], [2,'Jan 11'], [3,'Feb 11'], [4,'Mar 11'], [5,'Apr 11'], [6,'May 11'], [7,'Jun 11'], [8,'Jul 11'], [9,'Aug 11'], [10,'Sep 11'], [11,'Oct 11'], [12,'Nov 11'], [13,'Dec 11']]; 
        $(document).ready(function(){
            var plot1 = $.jqplot('chartdiv',  line1,
            { title:'$TITLE',
                //axes:{yaxis:{min:-10, max:240}},
                axes:{
                    xaxis:{
                        //ticks: ticks,
                        renderer:$.jqplot.DateAxisRenderer,
                        //min: "09-01-2008 16:00",
                        //max: "06-22-2009 16:00",
                        //rendererOptions:{
                        //        tickInset: 0,
                        //        tickRenderer:$.jqplot.CanvasAxisTickRenderer
                        //    },                        
                        tickOptions:{
                            formatString:'%b %e',
                            angle: -40
                            },
                                // For date axes, we can specify ticks options as human
                                // readable dates.  You should be as specific as possible,
                                // however, and include a date and time since some
                                // browser treat dates without a time as UTC and some
                                // treat dates without time as local time.
                                // Generally, if  a time is specified without a time zone,
                                // the browser assumes the time zone of the client.
                        //tickInterval: "2 weeks",
                        //tickRenderer: $.jqplot.CanvasAxisTickRenderer,
    
                        label:'Time(s)',
                        labelRenderer: $.jqplot.CanvasAxisLabelRenderer
                        },
                    yaxis:{
                        label:'Am',
                        labelRenderer: $.jqplot.CanvasAxisLabelRenderer
                        }
                    },
                $SERIES,
                legend:{
                    show:true,
                    placement: 'outsideGrid',
                    //location: 'e',
                    }
            });
        });
        </script>
        """
    serie = """
            {
                label:'$ATTR',
                lineWidth: 3,
                //color:'#5FAB78',
                color: "$COLOR",
                showMarker:false,
                //fill:true,
                //fillAndStroke:true,
            }
        """#.replace('$ATTR',CURRENT).replace('$COLOR','rgba(255, 0, 0, 0.5)')
    series = 'series:[\n%s\n]'%',\n'.join([
        serie.replace('$ATTR',a).replace('$COLOR','rgba(%d,%d,%d,1)'%DEFCOLORS[i].getRgb()[:3])
        for i,a in enumerate(ats)
        ])
    #data = """[[[1, 2],[3,5.12],[5,13.1],[7,33.6],[9,85.9],[11,219.9]]]"""
    max_size = int(float(MAX_DATA_SIZE)/len(vals))
    for k,v in vals.items():
        if len(v)>max_size:
            raise Exception('a warning must be added to notify that value are filtered or decimated')
            'the filter_array window should depend of times requested and max_size'
            vals[k] = decimate_array(v,fixed_size=max_size,fixed_rate=100)
    data = str([
        list([fandango.time2str(t[0]),t[1]] for t in vals[k]) for k in ats]
        )
    return jqplot.replace('$DATA',data).replace('$SERIES',series).replace('$TITLE',title)