コード例 #1
0
    def start_devices(self,
                      regexp='*',
                      force=False,
                      do_init=False,
                      do_restart=False):
        #devs = fn.tango.get_class_devices('HdbEventSubscriber')
        devs = self.get_archivers()
        if regexp:
            devs = fn.filtersmart(devs, regexp)
        off = sorted(set(d for d in devs if not fn.check_device(d)))

        if off and do_restart:
            print('Restarting %s Archiving Servers ...' % self.db_name)
            astor = fn.Astor()
            astor.load_from_devs_list(list(off))
            astor.stop_servers()
            fn.wait(3.)
            astor.start_servers()
            fn.wait(3.)

        for d in devs:
            try:
                dp = fn.get_device(d)
                if do_init:
                    dp.init()
                if force or dp.attributenumber != dp.attributestartednumber:
                    off.append(d)
                    print('%s.Start()' % d)
                    dp.start()
            except Exception, e:
                self.warning('start_archivers(%s) failed: %s' % (d, e))
コード例 #2
0
ファイル: config.py プロジェクト: sergirubio/PyTangoArchiving
    def start_devices(self,regexp = '*', force = False, 
                      do_init = False, do_restart = False):
        #devs = fn.tango.get_class_devices('HdbEventSubscriber')
        devs = self.get_archivers()
        if regexp:
            devs = fn.filtersmart(devs,regexp)
        off = sorted(set(d for d in devs if not fn.check_device(d)))

        if off and do_restart:
            print('Restarting %s Archiving Servers ...'%self.db_name)
            astor = fn.Astor()
            astor.load_from_devs_list(list(off))
            astor.stop_servers()
            fn.wait(3.)
            astor.start_servers()
            fn.wait(3.)

        for d in devs:
            try:
                dp = fn.get_device(d, keep=True)
                if do_init:
                    dp.init()
                if force or dp.attributenumber != dp.attributestartednumber:
                    off.append(d)
                    print('%s.Start()' % d)
                    dp.start()
            except Exception,e:
                self.warning('start_archivers(%s) failed: %s' % (d,e))
コード例 #3
0
def export_attributes_to_pck(filein='ui_exported_devices.txt',
                             fileout='ui_attribute_values.pck'):
    print('export_attributes:' + str((filein, fileout)))
    if fandango.isSequence(filein):
        devs = filein
    else:
        devs = map(str.strip, open(filein).readlines())
    proxies = dict((d, PyTango.DeviceProxy(d)) for d in devs)
    devs = defaultdict(Struct)

    for d, dp in sorted(proxies.items()):
        print('%s (%d/%d)' % (d, 1 + len(devs), len(proxies)))
        obj = devs[d]
        obj.dev_class, obj.attrs, obj.comms = '', defaultdict(Struct), {}
        obj.props = dict(
            (k, v if not 'vector' in str(type(v)).lower() else list(v))
            for k, v in fandango.tango.get_matching_device_properties(
                d, '*').items() if 'dynamicattributes' not in k.lower())
        if fandango.check_device(d):
            devs[d].name = d
            devs[d].dev_class = dp.info().dev_class
            for c in dp.command_list_query():
                if c.cmd_name.lower() not in ('state', 'status', 'init'):
                    obj.comms[c.cmd_name] = (str(c.in_type), str(c.out_type))
            for a in dp.get_attribute_list():
                if a.lower() == 'status':
                    continue
                obj.attrs[a] = fandango.tango.export_attribute_to_dict(
                    d, a, as_struct=True)

    pickle.dump(devs, open(fileout, 'w'))
    return (fileout)
コード例 #4
0
ファイル: plot.py プロジェクト: MikeFalowski/VACCA
 def setModel(self, model):
     print '*' * 80
     self.info('VaccaProfilePlot.setModel(%s)' % model)
     print '*' * 80
     try:
         #if self._profile_loaded: return
         if fandango.isSequence(
                 model
         ) or 'attributename' in fandango.tango.parse_tango_model(model):
             self.info('setting an attribute model')
             TaurusPlot.setModel(self, model)  # model = model[0]# str(
             # model).rsplit('/',1)[0]
         else:
             self.info('setting a composer model')
             assert fandango.check_device(model)
             dev = taurus.Device(model)
             if all(a in map(str.lower, dev.get_attribute_list())
                    for a in ('ccgaxxis', 'ccgpressures', 'ipaxxis',
                              'ippressures', 'thermoaxxis', 'thermocouples',
                              'axxispositions', 'axxislabels')):
                 TaurusPlot.setModel(self, [])
                 setup_profile_plot(self, model)
             else:
                 self.warning('%s has not all required attributes' % model)
         if len(self._positions) and len(self._labels):
             self.info('Setting CustomXLabels ...')
             self.setAxisCustomLabels(Qwt5.QwtPlot.xBottom,
                                      zip(self._positions, self._labels),
                                      60)
     except Exception, e:
         self.warning('VaccaProfilePlot.setModel(%s) failed!: %s' %
                      (model, e))
コード例 #5
0
ファイル: multi.py プロジェクト: dvjdjvu/PyTangoArchiving
def get_hdbpp_databases(active=True):  #archivers=[],dbs={}):
    """
    Method to obtain list of dbs/archivers; it allows to match any 
    archiver list against existing dbs.
    
    This method can be used in cached mode executed like:
    
        dbs = get_hdbpp_databases()
        for a in archivers:
            db = get_hdbpp_databases(a,dbs).keys()[0]
      
    """
    schemas = pta.Schemas.load()
    hdbpp = sorted(k for k in schemas if fn.clsearch('hdbpp', str(schemas[k])))
    if active:
        r = []
        for h in hdbpp:
            try:
                if fn.check_device(pta.api(h).manager):
                    r.append(h)
            except:
                pass
        return r
    else:
        return hdbpp
コード例 #6
0
ファイル: config.py プロジェクト: dvjdjvu/PyTangoArchiving
    def restart_attributes(self, attributes=None, timewait=0.5):
        if attributes is None:
            attributes = self.get_attributes_not_updated()

        todo = []
        for a in attributes:
            a = self.is_attribute_archived(a)
            if a:
                todo.append(a)
            else:
                self.warning('%s is not archived!' % a)

        devs = dict(fn.kmap(self.get_attribute_archiver, todo))

        for a, d in fn.randomize(sorted(devs.items())):
            if not fn.check_device(d):
                self.start_devices('(.*/)?' + d, do_restart=True)
            else:
                dp = fn.get_device(d, keep=True)
                dp.AttributeStop(a)
            fn.wait(timewait)

        fn.wait(10. * timewait)

        for a, d in devs.items():
            dp = fn.get_device(d, keep=True)
            dp.AttributeStart(a)
            fn.wait(timewait)

        print('%d attributes restarted' % len(attributes))
コード例 #7
0
def get_idle_servers(api='hdb'):
    idle = dict()
    if fun.isString(api): api = pta.api(api)
    for s, t in api.servers.items():
        if 'archiver' not in s: continue
        for d in t.get_device_list():
            if not fun.check_device(d):
                idle[s] = [d]
                break
    trace('\t%d servers have idle devices' % len(idle))
    return idle
コード例 #8
0
def get_idle_servers(api='hdb'):
  idle = dict()
  if fun.isString(api): api = pta.api(api)
  for s,t in api.servers.items():
    if 'archiver' not in s: continue
    for d in t.get_device_list():
      if not fun.check_device(d):
        idle[s] = [d]
        break
  trace('\t%d servers have idle devices'%len(idle))  
  return idle
コード例 #9
0
def attribute_name_check(attribute):
    dev,attr = a.rsplit('/',1)
    all_devs = fandango.get_all_devices()
    if dev not in all_devs: 
        return False #Device does not exist
    elif not fandango.check_device(dev): 
        return True #If we can't check the attribute we assume that exists.
    elif attr.lower() in map(str.lower,PyTango.DeviceProxy(dev).get_attribute_list()):
        return True
    else:
        return False
コード例 #10
0
def check_schema_with_queries(schema):
    api = pta.api(schema)
    pending = []
    done = []

    trace('check_schema(%s)' % schema)
    #Check IDLE devices
    for s, t in api.servers.items():
        if 'archiver' not in s: continue
        for d in t.get_device_list():
            if not fun.check_device(d):
                pending.append(s)
                break
    trace('\t%d servers have idle devices' % len(pending))

    for s, t in sorted(api.servers.items()):
        if s in pending or s in done: continue
        #Check current server attributes
        now = time.time()
        devs = map(str.lower, t.get_device_list())
        attrs = [a for a in api if api[a].archiver.lower() in devs]
        for a in attrs:
            if 'errorcode' in a: continue
            api.load_last_values(a)
            if api[a].last_date < now - (MAX_ERROR +
                                         api[a].modes['MODE_P'][0] / 1000.):
                pending.append(s)
                trace('\t%s marked to restart (%s not updated since %s)' %
                      (s, a, fun.time2str(api[a].last_date)))
                break

        #Then check servers pending to restart
        now = time.time()
        if pending and now > last_restart + WAIT_TIME:
            done.append(restart_server(api, pending.pop(0)))

    trace('\tAttribute check finished, %d/%d servers pending to restart' %
          (len(pending), len(pending) + len(done)))

    #Emptying the queue
    while len(pending):
        if pending and now > last_restart + WAIT_TIME:
            done.append(restart_server(api, pending.pop(0)))
        else:
            time.sleep(1.)
    trace('%s check finished, %d/%d servers have idle devices' %
          (schema, len(pending), len(servers)))

    #Now checking for attributes in .csvs that are not archived!
    #or attributes with dedicated != '' but not archived
    ##@todo...

    return done
コード例 #11
0
ファイル: config.py プロジェクト: sergirubio/PyTangoArchiving
    def restart_attribute(self,attr, d=''):
        try:
            d = self.get_attribute_archiver(attr)
            print('%s.restart_attribute(%s)' % (d,attr))
            dp = fn.get_device(d, keep=True)

            if not fn.check_device(dp):
                self.start_devices('(.*/)?'+d,do_restart=True)
                
            dp.AttributeStop(attr)
            fn.wait(.1)
            dp.AttributeStart(attr)
        except:
            print('%s.AttributeStart(%s) failed!'%(d,attr))
コード例 #12
0
def check_schema_with_queries(schema):
  api = pta.api(schema)
  pending = []
  done = []

  trace('check_schema(%s)'%schema)
  #Check IDLE devices  
  for s,t in api.servers.items():
    if 'archiver' not in s: continue
    for d in t.get_device_list():
      if not fun.check_device(d):
        pending.append(s)
        break
  trace('\t%d servers have idle devices'%len(pending))

  for s,t in sorted(api.servers.items()):
    if s in pending or s in done: continue
    #Check current server attributes
    now = time.time()
    devs = map(str.lower,t.get_device_list())
    attrs = [a for a in api if api[a].archiver.lower() in devs]
    for a in attrs:
      if 'errorcode' in a: continue
      api.load_last_values(a)
      if api[a].last_date<now-(MAX_ERROR+api[a].modes['MODE_P'][0]/1000.):
        pending.append(s)
        trace('\t%s marked to restart (%s not updated since %s)'%(s,a,fun.time2str(api[a].last_date)))
        break

    #Then check servers pending to restart
    now = time.time()
    if pending and now > last_restart+WAIT_TIME:
      done.append(restart_server(api,pending.pop(0)))

  trace('\tAttribute check finished, %d/%d servers pending to restart'%(
        len(pending),len(pending)+len(done)))
      
  #Emptying the queue
  while len(pending):
    if pending and now > last_restart+WAIT_TIME:
      done.append(restart_server(api,pending.pop(0)))
    else:
      time.sleep(1.)
  trace('%s check finished, %d/%d servers have idle devices'%(schema,len(pending),len(servers)))
  
  #Now checking for attributes in .csvs that are not archived!
  #or attributes with dedicated != '' but not archived
  ##@todo...
  
  return done    
コード例 #13
0
    def restart_attribute(self, attr, d=''):
        try:
            d = self.get_attribute_archiver(attr)
            print('%s.restart_attribute(%s)' % (d, attr))
            dp = fn.get_device(d, keep=True)

            if not fn.check_device(dp):
                self.start_devices('(.*/)?' + d, do_restart=True)

            dp.AttributeStop(attr)
            fn.wait(.1)
            dp.AttributeStart(attr)
        except:
            print('%s.AttributeStart(%s) failed!' % (d, attr))
コード例 #14
0
 def UpdateArchivers(self):
     # PROTECTED REGION ID(SchemaManager.UpdateArchivers) ENABLED START #
     self.info_stream('UpdateArchivers()')
     self.klass,self.is_hpp = ('HdbArchiver',0) if self.schema == 'hdb' \
         else ('TdbArchiver',0) if self.schema == 'tdb' \
             else ('HdbEventSubscriber',1)
         
     self.arch_on, self.arch_off = [],[]
     self.archivers = map(fn.tango.get_full_name,self.api.get_archivers())
     for d in sorted(self.archivers):
         if fn.check_device(d) not in (None,PyTango.DevState.FAULT):
             self.arch_on.append(d)
         else:
             self.arch_off.append(d)
     self.state_machine()
コード例 #15
0
    def UpdateArchivers(self):
        # PROTECTED REGION ID(SchemaManager.UpdateArchivers) ENABLED START #
        self.info_stream('UpdateArchivers()')
        self.klass,self.is_hpp = ('HdbArchiver',0) if self.schema == 'hdb' \
            else ('TdbArchiver',0) if self.schema == 'tdb' \
                else ('HdbEventSubscriber',1)

        self.arch_on, self.arch_off = [], []
        self.archivers = map(fn.tango.get_full_name, self.api.get_archivers())
        for d in sorted(self.archivers):
            if fn.check_device(d) not in (None, PyTango.DevState.FAULT):
                self.arch_on.append(d)
            else:
                self.arch_off.append(d)
        self.state_machine()
コード例 #16
0
def getAlarmTimestamp(alarm,attr_value=None,use_taurus=True):
    """
    Returns alarm activation timestamp (or 0) of an alarm object
    """
    trace('panic.gui.getAlarmTimestamp(%s(%s),%s,%s)'%(type(alarm),alarm,attr_value,use_taurus))
    #Not using API method, reusing last Taurus polled attribute instead
    try:
        if attr_value is None and use_taurus:
            attr_value = taurus.Attribute(alarm.device+'/ActiveAlarms').read()
            attr_value = getAttrValue(attr_value)
        return alarm.get_time(attr_value=attr_value)
    except:
        trace('getAlarmTimestamp(%s/%s): Failed!'%(alarm.device,alarm.tag))
        trace(fandango.check_device(alarm.device) and traceback.format_exc())
        return 0 #In case of error it must always return 0!!! (as it is used to set alarm.active)
コード例 #17
0
def get_servers_status(regexp='*', exclude=['bpms', 'test', 'sr_vc_']):
    servers = fandango.Astor()
    servers.load_by_name('PyAlarm/*%s*' % regexp)
    servers.load_by_name('Panic*/*%s*' % regexp)
    print('%d servers loaded' % len(servers))
    states = servers.states()
    [states.pop(k) for k in states.keys() if any(e in k for e in exclude)]
    exported = fandango.get_all_devices(exported=True)
    exported = [s for s in states if 'dserver/' + s in exported]
    zombies = sorted(d for d, s in states.items()
                     if d in exported and s is None)
    off = sorted(d for d, s in states.items()
                 if d not in zombies and s is None)
    on = sorted(s for s in states if states[s] is not None)

    print('\n')
    for s in off:
        print('%s : %s : OFF' % (servers[s].host, s))
    for s in zombies:
        print('%s : %s : ZOMBIE!' % (servers[s].host, s))
    print('\n')

    failed = []
    for s in on:
        for d in sorted(servers[s].get_device_list()):
            if not fandango.matchCl('(sys|dserver)/*', d):
                ss = fandango.check_device(d)
                p = fandango.tango.get_device_property(d, 'pollingperiod')
                if not p: print('%s has no polling defined' % d)
                elif float(p) > 1000:
                    print('%s has a wrong polling! %s' % (d, p))
                if str(ss) not in ('ALARM', 'ON'):
                    failed.append(s)
                    print('%s : %s : %s : %s' %
                          (servers[s].host, s, d, str(ss)))

    print('\n%d servers have failed devices' % len(failed))
    restart = sorted(set(d for l in (off, zombies, failed) for d in l))
    print('%d servers should be restarted' % len(restart))
    print('')

    return {
        'off': off,
        'on': on,
        'zombies': zombies,
        'failed': failed,
        'restart': restart
    }
コード例 #18
0
ファイル: config.py プロジェクト: dvjdjvu/PyTangoArchiving
    def restart_attribute(self, attr, d=''):
        """
        execute AttributeStop/Start on subscriber device
        """
        try:
            a = self.is_attribute_archived(attr)
            if not a:
                raise Exception('%s is not archived!' % attr)
            attr = a
            d = self.get_attribute_archiver(attr)
            print('%s.restart_attribute(%s)' % (d, attr))
            dp = fn.get_device(d, keep=True)

            if not fn.check_device(dp):
                self.start_devices('(.*/)?' + d, do_restart=True)

            dp.AttributeStop(attr)
            fn.wait(10.)
            dp.AttributeStart(attr)
        except:
            print('%s.AttributeStart(%s) failed!' % (d, attr))
コード例 #19
0
ファイル: gen_simulation.py プロジェクト: PhilLAL/SimulatorDS
def export_attributes_to_pck(filein='ui_exported_devices.txt',
                             fileout='ui_attribute_values.pck'):

    print('export_attributes from:' + str((filein, fileout)))
    assert fileout.endswith('.pck'), 'output must be a pickle file!'

    all_devs = fd.tango.get_all_devices()
    filein = fd.toList(filein)
    if all(d.lower() in all_devs for d in filein):
        devs = filein
    else:
        devs = export_devices_from_sources(*filein, check=True)

    print('devices to export: %s' % str(devs))

    proxies = dict((d, PyTango.DeviceProxy(d)) for d in devs)
    devs = defaultdict(Struct)

    for d, dp in sorted(proxies.items()):
        print('%s (%d/%d)' % (d, 1 + len(devs), len(proxies)))
        obj = devs[d]
        obj.dev_class, obj.attrs, obj.comms = '', defaultdict(Struct), {}
        obj.props = dict(
            (k, v if not 'vector' in str(type(v)).lower() else list(v))
            for k, v in fd.tango.get_matching_device_properties(
                d, '*').items() if 'dynamicattributes' not in k.lower())
        if fd.check_device(d):
            devs[d].name = d
            devs[d].dev_class = dp.info().dev_class
            for c in dp.command_list_query():
                if c.cmd_name.lower() not in ('state', 'status', 'init'):
                    obj.comms[c.cmd_name] = (str(c.in_type), str(c.out_type))
            for a in dp.get_attribute_list():
                if a.lower() == 'status':
                    continue
                obj.attrs[a] = fd.tango.export_attribute_to_dict(
                    d, a, as_struct=True)

    pickle.dump(devs, open(fileout, 'w'))
    return (fileout)
コード例 #20
0
ファイル: config.py プロジェクト: dvjdjvu/PyTangoArchiving
    def add_event_subscriber(self, srv, dev, libname=''):

        if not fn.check_device(self.manager):
            raise Exception('%s not running!' % self.manager)

        if '/' not in srv:
            srv = 'hdb++es-srv/' + srv
        libname = libname or self.get_hdbpp_libname()

        dev = parse_tango_model(dev, fqdn=True).fullname
        add_new_device(srv, 'HdbEventSubscriber', dev)
        manager, dp = self.manager, self.get_manager()
        props = Struct(get_matching_device_properties(manager, '*'))

        prev = get_device_property(dev, 'AttributeList') or ''
        put_device_property(dev, 'AttributeList', prev)
        put_device_property(dev, 'DbHost', self.host)
        put_device_property(dev, 'DbName', self.db_name)
        #put_device_property(dev,'DbUser',self.user)
        #put_device_property(dev,'DbPassword',self.passwd)
        #put_device_property(dev,'DbPort','3306')
        #put_device_property(dev,'DbStartArchivingAtStartup','true')

        put_device_property(dev, 'LibConfiguration', [
            'user='******'password='******'port=' + getattr(self, 'port', '3306'),
            'host=' + self.host,
            'dbname=' + self.db_name,
            'libname=' + libname,
            'lightschema=1',
        ])
        if 'ArchiverList' not in props:
            props.ArchiverList = []

        #put_device_property(manager,'ArchiverList',
        #list(set(list(props.ArchiverList)+[dev])))
        print(dev)
        dp.ArchiverAdd(dev)
        return dev
コード例 #21
0
ファイル: gen_simulation.py プロジェクト: PhilLAL/SimulatorDS
def set_push_events(filein, period=3000, diff=1e-5):
    print('set_push_events(%s,%s,%s)' % (filein, period, diff))
    devs = fd.get_matching_devices(filein)
    for d in devs[:]:
        if not check_device(d):
            q = raw_input('Unable to configure events for %s, '
                          'do you wish to continue?' % d).lower()
            if 'y' not in q: return
            devs.remove(d)

    if devs:
        devs = dict(
            (d, fd.Struct({'attrs': fd.get_device(d).get_attribute_list()}))
            for d in devs)
    else:
        devs = pickle.load(open(filein))
    for d, t in sorted(devs.items()):
        print('Setting events (%s,%s) for %s' % (period, diff, d))
        try:
            dp = PyTango.DeviceProxy(d)
            for a in t.attrs:
                dp.poll_attribute(a, int(period))
                if period > 0:
                    ac = dp.get_attribute_config(a)
                    cei = PyTango.ChangeEventInfo()
                    cei.rel_change = str(diff)
                    ac.events.ch_event = cei
                    try:
                        dp.set_attribute_config(ac)
                    except:
                        pass
        except:
            q = raw_input('Unable to configure events for %s, '
                          'do you wish to continue?' % d)
            if 'y' not in q.lower():
                break
    print('done')
コード例 #22
0
def create_simulators(filein,
                      instance='',
                      path='',
                      domains={},
                      tango_host='controls02',
                      filters='',
                      override=True):  #domains = {'wr/rf':'test/rf'}
    path = path or os.path.abspath(os.path.dirname(filein)) + '/'
    print('create_simulators:' + str(
        (filein, instance, path, domains, tango_host)))
    ## THIS CHECK IS MANDATORY, YOU SHOULD EXPORT AND THEN LAUNCH IN DIFFERENT CALLS
    assert tango_host in str(
        fandango.tango.get_tango_host()), 'Use Controls02 for tests!!!'
    devs, org = {}, pickle.load(
        open(filein if '/' in filein else path + filein))
    done = []
    all_devs = fandango.get_all_devices()

    print('>' * 80)

    if not filters:
        print('%d devices in %s: %s' % (len(org), filein, sorted(org.keys())))
        filters = raw_input('Do you want to filter devices? [*/*/*]').lower()

    for d, t in org.items():
        k = ('/'.join(
            d.split('/')[-3:])).lower()  #Removing tango host from the name
        for a, b in domains.items():
            if k.startswith(a): k = k.replace(a, b)
        if not filters or fandango.matchCl(filters, d) or fandango.matchCl(
                filters, org[d].dev_class):
            devs[k] = t

    if override is not False:
        dds = [
            d for d in devs
            if ('/'.join(d.split('/')[-3:])).lower() in all_devs
        ]
        if dds:
            print('%d devices already exist: %s' % (len(dds), sorted(dds)))
            override = raw_input('Do you want to override existing properties?'
                                 ).lower().startswith('y')
        else:
            override = False

    if not instance:
        instance = raw_input(
            'Enter your instance name for the simulated DynamicServer (use "instance-" to use multiple instances):'
        )

    print('>' * 80)

    for d, t in sorted(devs.items()):
        t.dev_class = t.dev_class or d.split('/')[-1]
        klass = 'PyStateComposer' if t.dev_class == 'PyStateComposer' else 'SimulatorDS'
        server = 'DynamicDS'
        instance_temp = '%s%s' % (instance,
                                  t.dev_class) if '-' in instance else instance
        print('%s/%s:%s , "%s" => %s ' %
              (server, instance_temp, d, t.dev_class, klass))
        its_new = ('/'.join(('dserver', server, instance_temp))
                   ).lower() not in all_devs or d.lower() not in all_devs

        if its_new or override:
            print('writing ... %s(%s)' % (type(t), d))
            fandango.tango.add_new_device('%s/%s' % (server, instance_temp),
                                          klass, d)
            for p, v in t.props.items():
                if not p.startswith(
                        '__'
                ):  #p not in ('DynamicCommands','DynamicStates','LoadFromFile','DevicesList') and
                    fandango.tango.put_device_property(d, p, v)
            #Overriding Dynamic* properties
            try:
                fandango.tango.put_device_property(
                    d, 'LoadFromFile',
                    path + '%s_attributes.txt' % t.dev_class)
                fandango.tango.put_device_property(
                    d, 'DynamicAttributes',
                    filter(
                        bool,
                        map(
                            str.strip,
                            open(path + '%s_attributes.txt' %
                                 t.dev_class).readlines())))
                fandango.tango.put_device_property(
                    d, 'DynamicCommands',
                    filter(
                        bool,
                        map(
                            str.strip,
                            open(path + '%s_commands.txt' %
                                 t.dev_class).readlines())))
                fandango.tango.put_device_property(
                    d, 'DynamicStates',
                    filter(
                        bool,
                        map(
                            str.strip,
                            open(path +
                                 '%s_states.txt' % t.dev_class).readlines())))
            except:
                print('Unable to configure %s(%s) properties ' %
                      (d, t.dev_class))
                #traceback.print_exc()

        fandango.tango.put_device_property(d, 'OFFSET',
                                           random.randint(0, len(devs)))
        done.append(d)

    exported = fandango.get_all_devices(exported=True)
    update = [d for d in done if d in exported]
    print('Updating %d Devices ...' % len(update))
    for d in update:
        if fandango.check_device(d):
            print('Updating %s ...' % d)
            try:
                fandango.get_device(d).updateDynamicAttributes()
            except Exception, e:
                print(e)
        else:
            print('%s failed!' % d)
        time.sleep(.2)
コード例 #23
0
ファイル: check.py プロジェクト: dvjdjvu/PyTangoArchiving
def check_db_schema(schema, attributes = None, values = None,
                    tref = -12*3600, n = 1, filters = '*', export = 'json',
                    restart = False, subscribe = False):
    """
    tref is the time that is considered updated (e.g. now()-86400)
    n is used to consider multiple values
    
    attrs: all attributes in db
    on: archived
    off: in db but not currently archived
    ok: updated   
    
    known error causes (attrs not lost but not updated):
    
    nok: attributes are not currently readable
    noevs: attributes not sending events
    novals: attributes never recorded a value
    stall: not updated, but current value matches archiving
    lost: not updated, and values doesn't match with current
    """
    
    t0 = fn.now()
    if hasattr(schema,'schema'):
        api,schema = schema,api.schema
    else:
        api = pta.api(schema)

    r = fn.Struct(api=api,schema=schema)    
    if isString(tref): 
        tref = fn.str2time(tref)
    r.tref = fn.now()+tref if tref < 0 else tref
    r.attrs = [a for a in (attributes or api.get_attributes())
                if fn.clmatch(filters,a)]
    print('check_db_schema(%s,attrs[%s],tref="%s",export as %s)' 
          % (schema,len(r.attrs),fn.time2str(r.tref),export))
    
    if restart and schema!='hdbpc':
        archs = [a for a in api.get_archivers() if not fn.check_device(a)]
        if archs:
            try:
                print('Restarting archivers: %s' % str(archs))
                astor = fn.Astor(archs)
                astor.stop_servers()
                astor.start_servers()
            except:
                traceback.print_exc()
        
        stopped = api.get_stopped_attributes()
        print('Restarting %d stopped attributes' % len(stopped))
        api.restart_attributes(stopped)
    
    r.on = [a for a in api.get_archived_attributes() if a in r.attrs]
    r.off = [a for a in r.attrs if a not in r.on]
    
    r.archs = fn.defaultdict(list)
    r.pers = fn.defaultdict(list)
    r.values = load_schema_values(api,r.on,values,n,tref=tref)
    
    if schema in ('tdb','hdb'):
        [r.archs[api[k].archiver].append(k) for k in r.on]
    else:
        r.rvals = r.values
        r.freq, r.values = {}, {}
        for k,v in r.rvals.items():
            try:
                if n > 1:
                    v = v[0] if isSequence(v) and len(v) else v
                    r.values[k] = v[0] if isSequence(v) and len(v) else v
                    r.freq[k] = v and float(len(v))/abs(v[0][0]-v[-1][0])
                else:
                    r.values[k] = v
            except Exception as e:
                print(k,v)
                print(fn.except2str())
                
        for k in api.get_archivers():
            r.archs[k] = api.get_archiver_attributes(k)
        for k in api.get_periodic_archivers():
            r.pers[k] = api.get_periodic_archivers_attributes(k)

    # Get all updated attributes
    r.ok = [a for a,v in r.values.items() if v and v[0] > r.tref]
    # Try to read not-updated attributes
    r.check = dict((a,fn.check_attribute(a)
                    ) for a in r.on if a not in r.ok)
    #r.novals = [a for a,v in r.values.items() if not v]
    r.nok, r.stall, r.noevs, r.lost, r.novals, r.evs, r.rem = [],[],[],[],[],{},[]
    # Method to compare numpy values
    
    for a,v in r.check.items():
        state = check_archived_attribute(a, v, default=CheckState.LOST, 
            cache=r, tref=r.tref, 
            check_events = subscribe and not api.is_periodic_archived(a))
        {
            #CheckState.ON : r.on,
            #CheckState.OFF : r.off,
            CheckState.OK : r.ok, #Shouldn't be any ok in check list               
            CheckState.NO_READ : r.nok,
            CheckState.STALL : r.stall,
            CheckState.NO_EVENTS : r.noevs,
            CheckState.LOST : r.lost,
            CheckState.UNK : r.novals,
         }[state].append(a)
                
    # SUMMARY
    r.summary = schema +'\n'
    r.summary += ','.join(
        """on: archived
        off: not archived
        ok: updated   
        nok: not readable
        noevs: no events
        novals: no values
        stall: not changing
        lost: not updated
        """.split('\n'))+'\n'
    
    getline = lambda k,v,l: '\t%s:\t:%d\t(%s)' % (k,len(v),l)
    
    r.summary += '\n\t%s:\t:%d\tok+stall: %2.1f %%' % (
        'attrs',len(r.attrs),
        (100.*(len(r.ok)+len(r.stall))/(len(r.on) or 1e12)))
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'on','off',len(r.on),len(r.off))
    #if r.off > 20: r.summary+=' !!!'
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'ok','nok',len(r.ok),len(r.nok))
    if len(r.nok) > 10: 
        r.summary+=' !!!'
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'noevs','novals',len(r.noevs),len(r.novals))
    if len(r.novals) > 1: 
        r.summary+=' !!!'
    r.summary += '\n\t%s/%s:\t:%d/%d' % (
        'lost','stall',len(r.lost),len(r.stall))
    if len(r.lost) > 1: 
        r.summary+=' !!!'
    r.summary += '\n'
        
    r.archivers = dict.fromkeys(api.get_archivers())
    for d in sorted(r.archivers):
        r.archivers[d] = api.get_archiver_attributes(d)
        novals = [a for a in r.archivers[d] if a in r.novals]   
        lost = [a for a in r.archivers[d] if a in r.lost]
        if (len(novals)+len(lost)) > 2:
            r.summary += ('\n%s (all/novals/lost): %s/%s/%s' 
                % (d,len(r.archivers[d]),len(novals),len(lost)))
            
    if hasattr(api,'get_periodic_archivers'):
        r.periodics = dict.fromkeys(api.get_periodic_archivers())
        for d in sorted(r.periodics):
            r.periodics[d] = api.get_periodic_archiver_attributes(d)
            novals = [a for a in r.periodics[d] if a in r.novals]
            lost = [a for a in r.periodics[d] if a in r.lost]
            if len(novals)+len(lost) > 2:
                r.summary += ('\n%s (all/novals/lost): %s/%s/%s' % 
                    (d,len(r.periodics[d]),len(novals),len(lost)))
        
        r.perattrs = [a for a in r.on if a in api.get_periodic_attributes()]
        r.notper = [a for a in r.on if a not in r.perattrs]
        
        
    r.summary += '\nfinished in %d seconds\n\n'%(fn.now()-t0)
    print(r.summary)
    
    if restart:
        try:
            retries = r.lost+r.novals+r.nok
            print('restarting %d attributes' % len(retries))
            api.restart_attributes(retries)
        except:
            traceback.print_exc()
    
    if export is not None:
        if export is True:
            export = 'txt'
        for x in (export.split(',') if isString(export) else export):
            if x in ('json','pck','pickle','txt'):
                x = '/tmp/%s.%s' % (schema,x)
            print('Saving %s file with keys:\n%s' % (x,r.keys()))
            if 'json' in x:
                fn.dict2json(r.dict(),x)
            else:
                f = open(x,'w')
                if 'pck' in x or 'pickle' in x:
                    pickle.dump(r.dict(),f)
                else:
                    f.write(fn.dict2str(r.dict()))
                f.close()     
                
    for k,v in r.items():
        if fn.isSequence(v):
            r[k] = sorted(v)
                
    return r
コード例 #24
0
def check_archiving_schema(
        schema='hdb',
        attributes=[],values={},
        ti = None,
        period = 7200,
        old_period=24*3600*90,\
        exclude=['*/waveid','*/wavename','*/elotech-*'],
        use_index = True,
        loads = True,
        action=False,
        trace=True,
        export=None):

    ti = fn.now() if ti is None else str2time(ti) if isString(ti) else ti

    api = pta.api(schema)
    is_hpp = isinstance(api, pta.HDBpp)
    check = dict()
    old_period = 24*3600*old_period if old_period < 1000 \
        else (24*old_period if old_period<3600 else old_period)

    allattrs = api.get_attributes() if hasattr(
        api, 'get_attributes') else api.keys()
    print('%s contains %d attributes' % (schema, len(allattrs)))

    if attributes:
        if fn.isString(attributes) and fn.isRegexp(attributes):
            tattrs = [a for a in allattrs if clsearch(attributes, a)]
        else:
            attributes = map(fn.tango.get_normal_name, fn.toList(attributes))
            tattrs = [
                a for a in allattrs if fn.tango.get_normal_name(a) in allattrs
            ]

    else:
        tattrs = allattrs

    excluded = [a for a in tattrs if any(fn.clmatch(e, a) for e in exclude)]
    tattrs = [a for a in tattrs if a not in excluded]

    print('%d attributes to check' % len(tattrs))
    if not len(tattrs):
        return

    if excluded:
        print('\t%d attributes excluded' % len(excluded))

    archived = {}
    for a in tattrs:
        if hasattr(api, 'get_attribute_archiver'):
            arch = api.get_attribute_archiver(a)
        else:
            arch = api[a].archiver
        if arch:
            archived[a] = arch

    print('\t%d attributes are archived' % len(archived))

    #Getting Tango devices currently not running
    alldevs = set(t.rsplit('/', 1)[0] for t in tattrs)
    #tdevs = filter(fn.check_device,alldevs)
    #nodevs = [fn.tango.get_normal_name(d) for d in alldevs if d not in tdevs]
    #if nodevs:
    #print('\t%d devices are not running' % len(nodevs))

    archs = sorted(set(archived.values()))
    if loads:
        astor = fn.Astor()
        astor.load_from_devs_list(archs)
        loads = fn.defaultdict(list)
        for k, s in astor.items():
            for d in s.get_device_list():
                d = fn.tango.get_normal_name(d)
                for a in archived:
                    if fn.tango.get_normal_name(archived[a]) == d:
                        loads[k].append(a)
        for k, s in sorted(loads.items()):
            print('\t%s archives %d attributes' % (k, len(s)))

    noarchs = [
        fn.tango.get_normal_name(d) for d in archs if not fn.check_device(d)
    ]
    if noarchs:
        print('\t%d archivers are not running: %s' % (len(noarchs), noarchs))

    ###########################################################################

    if isString(values) and values.endswith('.pck'):
        print('\nLoading last values from %s file\n' % values)
        import pickle
        values = pickle.load(open(values))

    elif isString(values) and values.endswith('.json'):
        print('\nLoading last values from %s file\n' % values)
        values = fn.json2dict(values)

    elif not use_index or is_hpp:
        print('\nGetting last values ...\n')
        for a in tattrs:
            values[a] = api.load_last_values(a)

    else:
        print('\nGetting updated tables from database ...\n')
        tups = pta.utils.get_table_updates(schema)
        # Some tables do not update MySQL index tables
        t0 = [a for a in tarch if a in tattrs and not tups[api[a].table]]
        check.update((t, check_attribute(a, readable=True)) for t in t0
                     if not check.get(t))
        t0 = [t for t in t0 if check[t]]
        print('%d/%d archived attributes have indexes not updated ...' %
              (len(t0), len(tarch)))
        if t0 and len(t0) < 100:
            vs = api.load_last_values(t0)
            tups.update((api[t].table, api[t].last_date) for t in t0)

        for a in tattrs:
            if a in tups:
                values[a] = [tups[api[a].table], 0]

    for k, v in values.items():
        if (len(v) if isSequence(v) else v):
            if isinstance(v, dict):
                v = v.values()[0]
            if isSequence(v) and len(v) == 1:
                v = v[0]
            if v and not isNumber(v[0]):
                v = [date2time(v[0]), v[1]]
            values[k] = v
        else:
            values[k] = [] if isSequence(v) else None

    print('%d values obtained' % len(values))

    ###########################################################################

    now = fn.now()
    result = fn.Struct()
    times = [t[0] for t in values.values() if t]
    futures = [t for t in times if t > now]
    times = [t for t in times if t < now]
    tmiss = []
    tfutures = [k for k, v in values.items() if v and v[0] in futures]
    tmin, tmax = min(times), max(times)
    print('\toldest update was %s' % time2str(tmin))
    print('\tnewest update was %s' % time2str(tmax))
    if futures:
        print('\t%d attributes have values in the future!' % len(futures))

    tnovals = [a for a in archived if not values.get(a, None)]
    if tnovals:
        print('\t%d archived attributes have no values' % len(tnovals))
    try:
        tmiss = [
            a for a, v in values.items()
            if v and old_period < v[0] < ti - period and a not in archived
        ]
    except:
        print(values.items()[0])
    if tmiss:
        print('\t%d/%d attrs with values are not archived anymore' %
              (len(tmiss), len(tattrs)))

    result.Excluded = excluded
    result.Schema = schema
    result.All = tattrs
    result.Archived = values

    result.NoValues = tnovals
    result.MissingOrRemoved = tmiss

    result.TMin = tmin
    result.TMax = tmax
    result.Futures = tfutures

    tup = sorted(a for a in values if values[a] and values[a][0] > ti - period)
    tok = [a for a in tup if values[a][1] not in (None, [])]
    print('\n%d/%d archived attributes are updated since %s - %s' %
          (len(tup), len(archived), ti, period))
    print('%d archived attributes are fully ok\n' % (len(tok)))

    tnotup = sorted(a for a in values
                    if values[a] and values[a][0] < ti - period)
    print('\t%d archived attrs are not updated' % len(tnotup))
    tupnoread = [
        a for a in tup if not values[a][1] and fn.read_attribute(a) is None
    ]

    reads = dict((a, fn.read_attribute(a)) for a in tnotup)
    tnotupread = [a for a in tnotup if reads[a] is not None]
    print('\t%d not updated attrs are readable (Lost)' % len(tnotupread))
    print('\t%d of them are not floats' %
          len([t for t in tnotupread if not isinstance(reads[t], float)]))
    print('\t%d of them are states' %
          len([t for t in tnotupread if t.lower().endswith('/state')]))
    print('\t%d of them seem motors' %
          len([t for t in tnotupread if t.lower().endswith('/position')]))

    tnotupevs = [a for a in tnotupread if fn.tango.check_attribute_events(a)]
    print('\t%d not updated attrs are readable and have events (LostEvents)' %
          len(tnotupevs))

    tnotupnotread = [a for a in tnotup if a not in tnotupread]
    print('\t%d not updated attrs are not readable' % len(tnotupnotread))

    result.Lost = tnotupread
    result.LostEvents = tnotupevs

    losts = (tnotupevs if is_hpp else tnotupread)

    diffs = dict()
    for a in losts:
        try:
            v, vv = values.get(a, (None, ))[1], reads[a]
            if fn.isSequence(v): v = fn.toList(v)
            if fn.isSequence(vv): vv = fn.toList(vv)
            diffs[a] = v != vv
            if fn.isSequence(diffs[a]):
                diffs[a] = any(diffs[a])
            else:
                diffs[a] = bool(diffs[a])
        except:
            diffs[a] = None

    fams = fn.defaultdict(list)
    for a in tnotupread:
        fams['/'.join(a.split('/')[-4:-2])].append(a)
    for f in sorted(fams):
        print('\t%s: %d attrs not updated' % (f, len(fams[f])))

    print()

    differ = [a for a in losts if diffs[a]]  #is True]
    print('\t%d/%d not updated attrs have also wrong values!!!' %
          (len(differ), len(losts)))

    rd = pta.Reader()
    only = [a for a in tnotupread if len(rd.is_attribute_archived(a)) == 1]
    print('\t%d/%d not updated attrs are archived only in %s' %
          (len(only), len(losts), schema))
    result.LostDiff = differ
    print()

    archs = sorted(set(archived.values()))
    astor = fn.Astor()
    astor.load_from_devs_list(archs)
    badloads = fn.defaultdict(list)
    for k, s in astor.items():
        for d in s.get_device_list():
            d = fn.tango.get_normal_name(d)
            for a in losts:
                if fn.tango.get_normal_name(archived[a]) == d:
                    badloads[k].append(a)
    for k, s in badloads.items():
        if len(s):
            print('\t%s archives %d lost attributes' % (k, len(s)))

    print('\t%d updated attrs are not readable' % len(tupnoread))

    result.ArchivedAndReadable = tok
    result.Updated = tup
    result.NotUpdated = tnotup
    result.Unreadable = tnotupnotread
    #result.DeviceNotRunning = nodevs
    result.ArchiverNotRunning = noarchs

    result.LostFamilies = fams

    # Tnones is for readable attributes not being archived
    tnones = [
        a for a in archived
        if (a not in values or values[a] and values[a][1] in (None, []))
        and a not in tupnoread and a not in tnotupread
    ]
    tupnones = [a for a in tnones if a in tup]

    if tupnones:
        print('\t%d archived readable attrs record empty values' %
              len(tupnones))

    result.Nones = tnones

    if 0:

        get_ratio = lambda a, b: float(len(a)) / float(len(b))

        #result.ArchRatio = get_ratio([t for t in readarch if t not in tnotup],readarch)
        #result.ReadRatio = get_ratio(result.Readable,tattrs)
        #result.LostRatio = get_ratio([a for a in tread if a in tnotup],tread)
        #result.MissRatio = get_ratio([a for a in tread if a not in tarch],tread)
        #result.OkRatio = 1.0-result.LostRatio-result.MissRatio

        #result.Summary = '\n'.join((
        #('Checking archiving of %s attributes'%(len(attributes) if attributes else schema))
        #,('%d attributes in %s, %d are currently active'%(len(api),schema,len(tarch)))
        #,('%d devices with %d archived attributes are not running'%(len(nodevs),len([a for a in api if a.rsplit('/',1) in nodevs])))
        #,('%d archived attributes (%2.1f %%) are unreadable! (check and remove)'%(len(tnoread),1e2*get_ratio(tnoread,tarch)))
        #,('%d readable attributes are not archived'%(len(tmiss)))
        #,('%d attributes (readable or not) are updated (%2.1f %% of all readables)'%(len(tok),1e2*result.OkRatio))
        #,('-'*80)
        #,('%d archived attributes (readable or not) are not updated!'%len(tnotup))
        #,('%d archived and readable attributes are not updated! (check and restart?)'%len(treadnotup))
        #,('-'*80)
        #,('%d readable attributes have been removed in the last %d days!'%(len(removed),old_period/(24*3600)))
        #,('%d readable scalar attributes are not being archived (not needed anymore?)'%len(tmscalar))
        #,('%d readable array attributes are not being archived (Ok)'%len(tmarray))
        #,('%d readable array attributes are archived (Expensive)'%len(tarray))
        #,('')))

        #if trace: print(result.Summary)
        #print('%d readable lost,Ok = %2.1f%%, %2.1f %% over all Readables (%2.1f %% of total)'%\
        #(len(treadnotup),1e2*result.ArchRatio,1e2*result.OkRatio,1e2*result.ReadRatio))

    if action:
        if action == 'start_devices':
            print('Executing action %s' % action)
            api.start_devices()

        if action == 'restart_all':
            print('Executing action %s' % action)
            devs = api.get_archivers()
            astor = fn.Astor()
            print('Restarting %d devs:' % (len(devs), devs))
            astor.load_from_devs_list(devs)
            astor.stop_servers()
            fn.wait(10.)
            astor.start_servers()

        #print('NO ACTIONS ARE GONNA BE EXECUTED, AS THESE ARE ONLY RECOMMENDATIONS')
        #print("""
        #api = PyTangoArchiving.HDBpp(schema)
        #api.start_devices()

        #or

        #api = PyTangoArchiving.ArchivingAPI('%s')
        #lostdevs = sorted(set(api[a].archiver for a in result.NotUpdated))
        #print(lostdevs)
        #if lostdevs < a_reasonable_number:
        #astor = fn.Astor()
        #astor.load_from_devs_list(lostdevs)
        #astor.stop_servers()
        #fn.time.sleep(10.)
        #astor.start_servers()
        #"""%schema)

    print('\nfinished in %d seconds\n\n' % (fn.now() - ti))

    if export is not None:
        if export is True:
            export = 'txt'
        for x in (export.split(',') if isString(export) else export):
            if x in ('json', 'pck', 'pickle', 'txt'):
                x = '/tmp/%s.%s' % (schema, x)
            print('Saving %s file with keys:\n%s' % (x, result.keys()))
            if 'json' in x:
                fn.dict2json(result.dict(), x)
            else:
                f = open(x, 'w')
                if 'pck' in x or 'pickle' in x:
                    pickle.dump(result.dict(), f)
                else:
                    f.write(fn.dict2str(result.dict()))
                f.close()

    return result
コード例 #25
0
#!/usr/bin/env python

import fandango as fn

devs = fn.tango.get_class_devices('HdbEventSubscriber')

for d in devs:
    try:
        if not fn.check_device(d):
            fn.Astor(d).stop_servers()
            fn.Astor(d).start_servers()
        else:
            # Wait to next iteration before setting polling
            dp = fn.get_device(d)
            dp.poll_command('start',1200000)
            print(d,'done')
    except:
        print(fn.getLastException())
コード例 #26
0
#!/usr/bin/env python

import fandango as fn

devs = fn.tango.get_class_devices('HdbEventSubscriber')

for d in devs:
    try:
        if not fn.check_device(d):
            fn.Astor(d).stop_servers()
            fn.Astor(d).start_servers()
        else:
            # Wait to next iteration before setting polling
            dp = fn.get_device(d)
            dp.poll_command('start', 1200000)
            print(d, 'done')
    except:
        print(fn.getLastException())
コード例 #27
0
        tree = PanelDescription('Tree',
            classname = 'vacca.VaccaTree',#'vacca.VaccaTree',#'TaurusDevTree',
            model = CUSTOM_TREE or ','.join(DEVICES),
            sharedDataRead={'LoadItems':'addModels',
                ##DISABLED BECAUSE TRIGGERED RECURSIVE SELECTION, TO BE AVOIDED IN TREE
                #'SelectedInstrument':'findInTree',
                }, #It will load devices from synoptic
            sharedDataWrite={'SelectedInstrument':'deviceSelected(QString)'}
            )

    #: USE_DEVICE_PANEL:  True or False, To Show by default the DevicePanel
    USE_DEVICE_PANEL = USE_DEVICE_PANEL
    if USE_DEVICE_PANEL:
        print('\t>>> Loading Device panel (%s)...' % DEVICE)
        from vacca.panel import VaccaPanel
        m = DEVICE if DEVICE and fn.check_device(DEVICE) else None
        panel = VaccaPanel.getPanelDescription('Device',model=m)
        
    from vacca.panel import VaccaPanel
    if AttributeFilters: VaccaPanel.setAttributeFilters(AttributeFilters)
    if IconMap: VaccaPanel.setIconMap(IconMap)
    if CommandFilters: VaccaPanel.setCommandFilters(CommandFilters)

    #: JDRAW_FILE:  The JDRAW file to create the Synoptic, 
    #: it can be .jdw or .svg but the second option will require the svgsynoptic module from maxlab.
    JDRAW_FILE = JDRAW_FILE

    if JDRAW_FILE:
        print('\t>>> Loading Synoptic panel new ... %s, %s, %s'%(JDRAW_FILE,
                                                         JDRAW_HOOK, JDRAW_TREE))
        from vacca.synoptics import VaccaSynoptic
コード例 #28
0
ファイル: gen_simulation.py プロジェクト: PhilLAL/SimulatorDS
def create_simulators(filein,
                      instance='',
                      path='',
                      domains={},
                      server='',
                      tango_host='',
                      filters='',
                      override=True):
    #domains = {'wr/rf':'test/rf'}

    path = path or os.path.abspath(os.path.dirname(filein)) + '/'
    print('create_simulators:' + str(
        (filein, instance, path, domains, tango_host)))
    files = fd.listdir(path)
    if not any(f.endswith('_attributes.txt') for f in files):
        q = raw_input('Property files do not exist yet,\n'
                      'Do you want to generate them? (y/n)')
        if q.lower().startswith('y'):
            cur = os.path.abspath(os.curdir)
            os.chdir(path)
            generate_class_properties(filein)
            os.chdir(cur)

    ## CHECK IS MANDATORY, YOU SHOULD EXPORT AND SIMULATE IN DIFFERENT HOSTS
    assert tango_host and tango_host in str(fd.tango.get_tango_host()),\
                'Tango Host (%s!=%s) does not match!'%(tango_host,fd.tango.get_tango_host())

    devs, org = {}, pickle.load(
        open(filein if '/' in filein else path + filein))
    done = []

    all_devs = fd.get_all_devices()

    print('>' * 80)

    if not filters:
        print('%d devices in %s: %s' % (len(org), filein, sorted(org.keys())))
        filters = raw_input('Enter a filter for device names: [*/*/*]').lower()

    for d, t in org.items():
        k = ('/'.join(
            d.split('/')[-3:])).lower()  #Removing tango host from the name
        for a, b in domains.items():
            if k.startswith(a): k = k.replace(a, b)
        if not filters or fd.matchCl(filters, d) or fd.matchCl(
                filters, org[d].dev_class):
            devs[k] = t

    if override is not False:
        dds = [
            d for d in devs
            if ('/'.join(d.split('/')[-3:])).lower() in all_devs
        ]
        if dds:
            print('%d devices already exist: %s' % (len(dds), sorted(dds)))
            override = raw_input(
                'Do you want to override existing properties? (y/n)').lower(
                ).startswith('y')
        else:
            override = False

    if not instance:
        instance = raw_input(
            'Enter your instance name for the simulated server (use "instance-" to use multiple instances):'
        )
    elif '/' in instance:
        server, instance = instance.split('/')

    keepclass = 'y' in raw_input('Keep original Class names?').lower()

    if keepclass:
        server = 'SimulatorDS'
    elif not server:
        server = raw_input(
            'Enter your server name (SimulatorDS/DynamicDS): [SimulatorDS]') \
                or 'SimulatorDS'

    print('>' * 80)

    for d, t in sorted(devs.items()):
        t.dev_class = t.dev_class or d.split('/')[-1]
        if t.dev_class == 'PyStateComposer':
            klass = t.dev_class
        elif keepclass:
            klass = t.dev_class + '_sim'
        else:
            klass = 'SimulatorDS'

        instance_temp = '%s%s' % (instance,
                                  t.dev_class) if '-' in instance else instance
        print('%s/%s:%s , "%s" => %s ' %
              (server, instance_temp, d, t.dev_class, klass))
        its_new = ('/'.join(('dserver', server, instance_temp))
                   ).lower() not in all_devs or d.lower() not in all_devs

        if its_new or override:
            print('writing ... %s(%s)' % (type(t), d))
            fd.tango.add_new_device('%s/%s' % (server, instance_temp), klass,
                                    d)
            for p, v in t.props.items():
                if not p.startswith(
                        '__'
                ):  #p not in ('DynamicCommands','DynamicStates','LoadFromFile','DevicesList') and
                    fd.tango.put_device_property(d, p, v)
            #Overriding Dynamic* properties
            try:
                fd.tango.put_device_property(
                    d, 'LoadFromFile',
                    path + '%s_attributes.txt' % t.dev_class)
                fd.tango.put_device_property(
                    d, 'DynamicAttributes',
                    filter(
                        bool,
                        map(
                            str.strip,
                            open(path + '%s_attributes.txt' %
                                 t.dev_class).readlines())))
                fd.tango.put_device_property(
                    d, 'DynamicCommands',
                    filter(
                        bool,
                        map(
                            str.strip,
                            open(path + '%s_commands.txt' %
                                 t.dev_class).readlines())))
                fd.tango.put_device_property(
                    d, 'DynamicStates',
                    filter(
                        bool,
                        map(
                            str.strip,
                            open(path +
                                 '%s_states.txt' % t.dev_class).readlines())))
            except:
                print('Unable to configure %s(%s) properties ' %
                      (d, t.dev_class))
                #traceback.print_exc()

        fd.tango.put_device_property(d, 'OFFSET', random.randint(0, len(devs)))
        done.append(d)

    exported = fd.get_all_devices(exported=True)
    update = [d for d in done if d in exported]
    print('Updating %d Devices ...' % len(update))
    for d in update:
        if fd.check_device(d):
            print('Updating %s ...' % d)
            try:
                fd.get_device(d).updateDynamicAttributes()
            except Exception, e:
                print(e)
        else:
            print('%s failed!' % d)
        time.sleep(.2)