Пример #1
0
 def start(self):
     filename = '%s (%s)' % (self.name, 'triggers')
     self.manager = self.nodespace.as_node(self.manager)
     self._pdo_lock.acquire()
     try:
         if self._triggers is None:
             self._triggers = PersistentDictionary(filename,
                                                   encode=None,
                                                   decode=None)
         if not self._triggers:
             pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
             if os.path.exists(pdodata.filename()):
                 msglog.log('broadway', msglog.types.INFO,
                            "Migrating previous trigger data.")
                 pdodata.triggers = {}
                 pdodata.load()
                 self._triggers.update(pdodata.triggers)
                 pdodata.destroy()
             del (pdodata)
         self._loadtriggers()
         if self.secured:
             self.security_manager = self.as_node(
                 "/services/Security Manager")
         else:
             self.security_manager = None
     finally:
         self._pdo_lock.release()
     return super(TriggersConfigurator, self).start()
Пример #2
0
 def start(self):
     self._pdo = PersistentDataObject(self)
     self._pdo.last_dictionary = None
     self._pdo.load()
     self._started = 1
     self.parent.event_subscribe(self, AlarmTriggerEvent)
     CompositeNode.start(self)
Пример #3
0
 def start(self):
     if self.__running:
         return
     if self.debug:
         msglog.log('EnergywiseManager :', msglog.types.INFO,
                    'Inside start')
     CompositeNode.start(self)
     #        start_node = as_node('/services/EnergywiseManager/')
     #        self.configure_trend_in_switches(start_node, 60)
     self.__running = True
     self._pdo_lock.acquire()
     self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
     self._pdo.trends = {}
     self._pdo.load()
     self._pdo_lock.release()
     if self.has_child('trends'):
         self.trends = self.get_child('trends')
     else:
         self.trends = CompositeNode()
         self.trends.configure({'parent': self, 'name': 'trends'})
         self.trends.start()
     # start trending for saved domains
     for domain, freq in self._pdo.trends.items():
         try:
             start_node = as_node(domain)
             # self.configure_trend_in_switches( start_node,freq )
             start_node.new_trend(freq)
         except:
             self.delete_trend_configuration(domain)
     return
Пример #4
0
    def start(self):
        self._PDO = PersistentDataObject(self,dmtype=GC_NEVER)
        self._PDO.exception_log_last_time = 0.0
        self._PDO.load()
        # Scan subtree of grandparent logger for channel (column) 'fsg_attrs'
        # nodes containing info required for FSG Demo, so that we don't have
        # to do the scan every time format() is called:
        self._channels = {}
        columns_node = self.parent.parent.parent.get_child('columns')
        column_nodes = columns_node.children_nodes()
        for column_node in column_nodes:
            if column_node.name == 'timestamp':
                continue
            assert isinstance(column_node, ChannelAttrsColumn) \
                   or isinstance(column_node, ChannelAttrsDeltaColumn), \
                   'Column %s should be class ChannelAttrsColumn, but is class %s' \
                   % (column_node.name, column_node.__class__.__name__)
            self._channels[column_node.name] = {
                'channel_node':column_node,'values':[]

                }
            
        self._exception_log = None
        try:
            self._exception_log = as_node(self.exception_log_url)
        except ENoSuchName:
            pass
        return
Пример #5
0
 def start(self):
     ServiceNode.start(self)
     # this will correctly add the msglog as a child
     #  to the logger.
     if 'msglog' not in self.children_names():
         columns = mpx.lib.msglog.get_columns()
         log = Log()
         log.configure({'name': 'msglog', 'parent': self})
         for c in columns:
             column = mpx.lib.factory('mpx.service.logger.column')
             config = c.configuration()
             config['parent'] = log
             column.configure(config)
     self._logs = PersistentDataObject(self)
     self._logs.names = []
     self._logs.load()
     for name in self._logs.names:
         if ((not mpx.lib.log.log_exists(name))
                 and (name not in self.children_names())):
             log = mpx.lib.log.log(name)
             log.destroy()
             del (log)
     self._logs.names = []
     for child in self.children_nodes():
         if not isinstance(child, Alias):
             # Don't manage other managers' logs...
             self._logs.names.append(child.name)
     self._logs.save()
Пример #6
0
 def start(self):
     self.managernode = self.as_node(self.manager)
     self.synclock.acquire()
     try:
         alarmsname = '%s (%s)' % (self.name, 'alarms')
         eventsname = '%s (%s)' % (self.name, 'events')
         self.alarms = PersistentDictionary(alarmsname,
                                            encode=self.encode,
                                            decode=self.decode)
         self.events = PersistentDictionary(eventsname,
                                            encode=self.encode,
                                            decode=self.decode)
         # Migrate PDO data from old style persistence.
         pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
         if os.path.exists(pdodata.filename()):
             msglog.log('broadway', msglog.types.INFO,
                        "Migrating previous alarm and event data")
             pdodata.events = {}
             pdodata.alarms = {}
             pdodata.load()
             migrate(pdodata, self.decode)
             self.rebuildstorage()
             pdodata.destroy()
         del(pdodata)
     finally:
         self.synclock.release()
     self.securitymanager = self.as_node('/services/Security Manager')
     
     register = self.managernode.register_for_type
     self.sub = register(self.handle_event, StateEvent)
     self.running.set()
     super(AlarmConfigurator, self).start()
Пример #7
0
    def start(self):
        self.security_manager = self.nodespace.as_node(self.security_manager)
        self._pdo_lock.acquire()
        try:
            self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
            self._pdo.valuedrivers = {}
            self._pdo.load()
            valuedriverdumps = self._pdo.valuedrivers.items()
        finally:
            self._pdo_lock.release()
        super(DriverConfigurator, self).start()

        tstart = time.time()
        for drivername, driverdump in valuedriverdumps:
            try:
                IPickles(cPickle.loads(driverdump))()
            except:
                message = self.LOADFAILURE % (self.name, 'Value Driver',
                                              drivername)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix='Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log(
            'broadway', msglog.types.INFO, 'Value Driver Configurator loaded '
            '%s nodes in %s seconds.' % (len(valuedriverdumps), tlapse))
        return
Пример #8
0
    def start(self):
        self.nodes = []
        from mpx.service.time.time_zone import TimeZone
        if not IScheduleHolderParent.implementedBy(TimeZone):
            class_implements(TimeZone, IScheduleHolderParent)
        self.security_manager = self.nodespace.as_node(self.security_manager)
        self._pdo_lock.acquire()
        try:
            if not self._pdo:
                self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
                self._pdo.holders = {}
                self._pdo.schedules = {}
                self._pdo.load()
            holderdicts = self._pdo.holders.values()
            holderdumps = []
            for holderdict in holderdicts:
                holderdumps.extend(holderdict.items())
            # schedules = {'local': {'Holder1': {'Sched1': Sched1Dump}, 'Holder2': ...}, 'UTC': ...}
            scheduledicts = []
            [
                scheduledicts.extend(holderdict.values())
                for holderdict in self._pdo.schedules.values()
            ]
            scheduledumps = []
            for scheduledict in scheduledicts:
                scheduledumps.extend(scheduledict.items())
        finally:
            self._pdo_lock.release()
        super(ScheduleConfigurator, self).start()
        tstart = time.time()
        for holdername, holderdump in holderdumps:
            try:
                self.nodes.append(IPickles(cPickle.loads(holderdump))())
            except:
                message = self.LOADFAILURE % (self.name, 'Schedule Holder',
                                              holdername)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix='Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log(
            'broadway', msglog.types.INFO, 'Schedule Configurator loaded '
            '%s nodes in %s seconds.' % (len(holderdumps), tlapse))

        tstart = time.time()
        for schedulename, scheduledump in scheduledumps:
            try:
                self.nodes.append(IPickles(cPickle.loads(scheduledump))())
            except:
                message = self.LOADFAILURE % (self.name, 'Schedule Holder',
                                              schedulename)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix='Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log(
            'broadway', msglog.types.INFO, 'Schedule Configurator loaded '
            '%s nodes in %s seconds.' % (len(scheduledumps), tlapse))
        self.template = self.parent.read_resource(self.page_template)
Пример #9
0
 def configure(self, config):
     self._pdo = PersistentDataObject(self)
     self._pdo.stats_dict = {
     }  #used to detect changes to xml files based on timestamp. Tuple (modify time, pickle string)
     self._pdo.load()
     # write_priority can be set for the entire control service, though
     # it may be specialized at the individual application node level.
     set_attribute(self, 'write_priority', 9, config, int)
     CompositeNode.configure(self, config)
Пример #10
0
 def configure(self, config):
     set_attribute(self, 'directory', '/tmp', config)
     set_attribute(self, 'file_prefix', REQUIRED, config)
     set_attribute(self, 'file_suffix', REQUIRED, config)
     set_attribute(self, 'name_scheme', None, config)
     set_attribute(self, 'timestamp_format', '%s', config)
     Transporter.configure(self, config)
     self._last = PersistentDataObject(self)
     self._last.filename = None
     self._last.count = 1
     self._last.load()
Пример #11
0
 def start(self):
     try:
         self._pdo_lock.acquire()
         try:
             if self.__running:
                 return
             self.__running = True
             self._trendconfig = PersistentDictionary(filename(self),
                                                      encode=None,
                                                      decode=None)
             if not self._trendconfig:
                 pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                 if os.path.exists(pdodata.filename()):
                     msglog.log('broadway', msglog.types.INFO,
                                "Migrating previous trend data")
                     pdodata.trends = {}
                     pdodata.load()
                     self._trendconfig.update(pdodata.trends)
                 del (pdodata)
         finally:
             self._pdo_lock.release()
         super(TrendManager, self).start()
         self.logger = node.as_internal_node(self.logger_url)
         if self.has_child('trends'):
             self.trends = self.get_child('trends')
         else:
             self.trends = CompositeNode()
             self.trends.configure({'parent': self, 'name': 'trends'})
             self.trends.start()
         corrupt_trends = []
         for trendname, trenddump in self._trendconfig.items():
             msg = "Loading trend: %s" % trendname
             msglog.log('trendmanager', msglog.types.INFO, msg)
             try:
                 trend = unmarshal(trenddump)
             except:
                 corrupt_trends.append(trendname)
                 msg = "Failed to load trend: %s" % trendname
                 msglog.log('trendmanager', msglog.types.ERR, msg)
                 msglog.exception(prefix='Handled')
         for trendname in corrupt_trends:
             try:
                 msg = "Deleting trend information: %s" % trendname
                 msglog.log('trendmanager', msglog.types.INFO, msg)
                 self._delete_trend_configuration(trendname)
                 if self.trends.has_child(trendname):
                     trend = self.trends.get_child(trendname)
                     trend.prune(force=True)
             except:
                 msglog.exception(prefix='Handled')
     except:
         self.__running = False
         raise
     return
Пример #12
0
 def start(self):
     Exporter.start(self)
     if not self.running:
         self.running = 1
         self.connection = as_node(self.connection_node)
         self._time_keeper = PersistentDataObject(self)
         self._time_keeper.start_time = 0
         self._time_keeper.load()
         self._period = self.parent.parent.period
         self._setup_trigger()
     else:
         raise EAlreadyRunning
Пример #13
0
 def _certificate_maintenance(self):
     previous = PersistentDataObject(self)
     previous.cert_config = None
     previous.key_file = None
     previous.server_cert = None
     previous.cert_fingerprint = None
     previous.load()
     c = certificate.CertificateConfiguration(self)
     config = {'C':self.country}
     config['ST'] = self.state
     config['L'] = self.city
     config['O'] = self.organization
     config['OU'] = self.organizational_unit
     config['CN'] = self.common_name
     config['emailAddress'] = self.email
     c.configure(config)
     cert_fingerprint = makecert.get_fingerprint(self.server_cert)
     if previous.cert_fingerprint == cert_fingerprint:
        msglog.log('broadway', msglog.types.INFO, 'Certificate Fingerprint Match!!!!' )
     else:
        msglog.log('broadway', msglog.types.INFO, 'Certificate Fingerprint Mismatch!!!!' )
     if c == previous.cert_config and \
        previous.key_file == self.key_file and \
        previous.cert_fingerprint == cert_fingerprint and \
        not certificate.is_outdated(self.server_cert):
         msglog.log('broadway', msglog.types.INFO,
                    'Using existing certificate')
         return
     msglog.log('broadway', msglog.types.INFO, 'Generating new certificate')
     filename = os.path.join(properties.TEMP_DIR, 'cert_config.tmp')
     file = open(filename, 'w')
     c.formatted_output_to_file(file)
     try:
         failed = 1
         makecert.create_from_file(filename, self.key_file,
                                   self.server_cert)
         failed = 0
         msglog.log('broadway', msglog.types.INFO,
                    'Certificate generated')
     except:
         msglog.exception()
         msglog.log('broadway', msglog.types.WARN,
                    'Certificate generation failed')
     file.close()
     os.remove(filename)
     if not failed:
         previous.cert_config = c.configuration()
         previous.key_file = self.key_file
         previous.server_cert = self.server_cert
         previous.cert_fingerprint = makecert.get_fingerprint(self.server_cert)
         previous.save()
     return
Пример #14
0
 def configure(self, config):
     SimpleValue.configure(self, config)
     self._pdo = PersistentDataObject(self)
     self._pdo.value = None
     self._pdo.conversion = None
     self._pdo.load()
     conversion = _get_name(self.conversion)
     if (self._pdo.value == None or self._pdo.conversion != conversion):
         self._pdo.value = self.value
         self._pdo.conversion = conversion
         self._pdo.save()
     else:
         self.value = self._pdo.value
Пример #15
0
    def start(self):
        from mpx.lib.persistent import PersistentDataObject
        
        ServiceNode.start(self)
        if self.debug: print 'Garbage Collector Starting!'

        self._data = PersistentDataObject(self,dmtype=GC_NEVER)
  
        self._data.registered = []
        self._data.load()
        
        if self.debug: print 'GC Data is %s.' % self._data
        if self.debug: print 'GC Data._reg is %s.' % self._data.registered
Пример #16
0
 def start(self):
     Exporter.start(self)
     if not self.running:
         self.running = 1
         self.connection = as_node(self.connection_node)
         self._event_count = self.log_multiple - 1
         self._time_keeper = PersistentDataObject(self)
         self._time_keeper.start_time = 0
         self._time_keeper.load()
         self._period = self.parent.parent.period
         self.parent.parent.event_subscribe(self, LogAddEntryEvent)
     else:
         raise EAlreadyRunning
Пример #17
0
 def test_upgrade(self):
     from mpx.upgrade.persistent import persistent_0
     old = persistent_0.PersistentDataObject('upgrade_test')
     old.purpose = 'testing'
     old.save()
     old_filename = old._persistent.filename
     del (old.__dict__['_persistent'])
     del (old)
     new = PersistentDataObject('upgrade_test')
     self.failIf(os.path.exists(old_filename),
                 'Upgrade failed to remove old version')
     new.purpose = None
     new.load()
     self.failUnless(new.purpose == 'testing',
                     'Upgrade failed to get old value')
     new.destroy()
     del (new)
Пример #18
0
 def start(self):
     self.manager = self.nodespace.as_node(self.manager)
     self.security_manager = as_node(self.security_manager)
     self._pdo = PersistentDataObject(self)
     msg='The CloudConfigurator Persistent Object is in the file :%s' %str(self._pdo.filename())
     msglog.log('CloudConfigurator', msglog.types.INFO,msg)
     if os.path.exists(self._pdo.filename()):
         # Migration 
         msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration in Progress')
         self._pdo.formation = cPickle.dumps(IPickles(self.manager.formation))
         self._pdo.load()
         formation = IPickles(cPickle.loads(self._pdo.formation))()
         msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration for the Formation:%s' %str(formation))
         self.manager.update_formation(formation,None)
         self._pdo.destroy()
         del(self._pdo)
         msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration is Complete')           
     return super(CloudConfigurator, self).start()
Пример #19
0
 def start(self):
     self.container = self.nodespace.as_node(self.container)
     self._pdo_lock.acquire()
     try:
         self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
         self._pdo.exporters = {}
         self._pdo.load()
         exporterdumps = self._pdo.exporters.values()
     finally:
         self._pdo_lock.release()
     super(ExportersConfigurator, self).start()
     tstart = time.time()
     for exporterdump in exporterdumps:
         IPickles(cPickle.loads(exporterdump))()
     tend = time.time()
     tlapse = tend - tstart
     msglog.log(
         'broadway', msglog.types.INFO, 'Exporter Configurator loaded '
         '%s exporters in %s seconds.' % (len(exporterdumps), tlapse))
     self.manager = self.container
Пример #20
0
 def configure(self, config):
     set_attribute(self, 'host', REQUIRED, config)
     set_attribute(self, 'port', 21, config, int)
     set_attribute(self, 'directory', '', config)
     set_attribute(self, 'username', REQUIRED, config)
     set_attribute(self, 'password', REQUIRED, config)
     #CSCtn64870
     if (config.has_key('timeout') and config['timeout'] == ''):
         config['timeout'] = 'None'
     set_attribute(self, 'timeout', None, config, float)
     set_attribute(self, 'file_prefix', 'cisco', config)
     set_attribute(self, 'file_suffix', '.dat', config)
     set_attribute(self, 'name_scheme', 'timestamp', config)
     set_attribute(self, 'timestamp_format', '%s', config)
     set_attribute(self, 'passive_mode', 1, config, as_boolean)
     set_attribute(self, 'file_append', 0, config, as_boolean)
     Transporter.configure(self, config)
     if self._last is None:
         self._last = PersistentDataObject(self)
         self._last.filename = None
         self._last.count = 1
         self._last.load()
Пример #21
0
 def start(self):
     if self.is_running():
         raise TypeError("Equipment Monitor already running.")
     if TESTING and not self.test_machines:
         self.test_machines = setup_machines()
         machinecount = len(self.test_machines)
         self.debugout("Setup %d test machines" % machinecount)
     self.synclock.acquire()
     try:
         self.running.set()
         if self.subscriptions and not self.subscriptions.closed():
             self.subscriptions.close()
         self.formatter = None
         self.transporter = None
         children = self.children_nodes()
         for childnode in children:
             if IFormatter.providedBy(childnode):
                 if self.formatter is not None:
                     raise TypeError("Already has formatter child.")
                 self.formatter = childnode
             if ITransporter.providedBy(childnode):
                 if self.transporter is not None:
                     raise TypeError("Already has transporter child.")
                 self.transporter = childnode
         if not self.formatter:
             raise TypeError("Must have one formatter child node.")
         if not self.transporter:
             raise TypeError("Must have one transporter child node.")
         self.smservice = as_node(self.smnodeurl)
         self.subscriptions = PersistentDictionary(
             self.name,
             encode=self.serialize_subscription,
             decode=self.unserialize_subscription)
         pdodata = PersistentDataObject(self)
         if os.path.exists(pdodata.filename()):
             msglog.log('broadway', msglog.types.WARN,
                        "Equipment Monitor upgrading persistence.")
             migrate = frompdo(pdodata)
             self.subscriptions.update(migrate)
             message = "Equipment Monitor merged %d subscriptions."
             message = message % len(migrate)
             msglog.log('broadway', msglog.types.INFO, message)
             pdodata.destroy()
             msglog.log('broadway', msglog.types.WARN,
                        "Equipment Monitor destroyed old persistence.")
             msglog.log('broadway', msglog.types.INFO,
                        "Equipment Monitor persistence upgrade complete.")
         del (pdodata)
         message = 'Equipment Monitor startup: %s %s'
         for subscription in self.subscriptions.values():
             try:
                 subscription.setup_subscription()
             except:
                 msglog.exception(prefix="handled")
             else:
                 self.debugout(message % ('setup', subscription))
         skipcounts = []
         for i in range(0, 1 + len(self.subscriptions) / 30):
             skipcounts.extend([i + 1] * 30)
         self.setup_work_threads()
         for subscription in self.subscriptions.values():
             try:
                 subscription.start(skipcounts.pop())
             except:
                 msglog.exception(prefix="Handled")
             else:
                 self.debugout(message % ('started', subscription))
     except:
         self.cleanup_resources()
         self.running.clear()
         raise
     finally:
         self.synclock.release()
     super(EquipmentMonitor, self).start()