Example #1
0
class Logger(ServiceNode):

    ##
    # @author Craig Warren
    # @param config
    # @return None
    def configure(self,config):
        ServiceNode.configure(self,config)
    ##
    # @author Craig Warren
    #   starts the logger service
    # @return None
    def start(self):
        ServiceNode.start(self)
        # this will correctly add the msglog as a child
        #  to the logger.
        if 'msglog' not in self.children_names():
            columns = mpx.lib.msglog.get_columns()
            log = Log()
            log.configure({'name':'msglog', 'parent':self})
            for c in columns:
                column = mpx.lib.factory('mpx.service.logger.column')
                config = c.configuration()
                config['parent'] = log
                column.configure(config)
        self._logs = PersistentDataObject(self)
        self._logs.names = []
        self._logs.load()
        for name in self._logs.names:
            if ((not mpx.lib.log.log_exists(name)) and 
                (name not in self.children_names())):
                log = mpx.lib.log.log(name)
                log.destroy()
                del(log)
        self._logs.names = []
        for child in self.children_nodes():
            if not isinstance(child, Alias):
                # Don't manage other managers' logs...
                self._logs.names.append(child.name)
        self._logs.save()

    ##
    # @author Craig Warren
    #   stops the logger service
    # @return None
    def stop(self):
        return ServiceNode.stop(self)

    ##
    # @author Craig Warren
    # @param log_name
    #   the name of the log to return
    # @return Log
    #   returns the log if it can't find the log it
    #   returns None
    def get_log(self,log_name):
        for child in self.children_nodes():
            if child.name == log_name:
                return child
        return None
Example #2
0
 def start(self):
     self._pdo = PersistentDataObject(self)
     self._pdo.last_dictionary = None
     self._pdo.load()
     self._started = 1
     self.parent.event_subscribe(self, AlarmTriggerEvent)
     CompositeNode.start(self)
 def start(self):
     if self.__running:
         return
     if self.debug:
         msglog.log('EnergywiseManager :', msglog.types.INFO,
                    'Inside start')
     CompositeNode.start(self)
     #        start_node = as_node('/services/EnergywiseManager/')
     #        self.configure_trend_in_switches(start_node, 60)
     self.__running = True
     self._pdo_lock.acquire()
     self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
     self._pdo.trends = {}
     self._pdo.load()
     self._pdo_lock.release()
     if self.has_child('trends'):
         self.trends = self.get_child('trends')
     else:
         self.trends = CompositeNode()
         self.trends.configure({'parent': self, 'name': 'trends'})
         self.trends.start()
     # start trending for saved domains
     for domain, freq in self._pdo.trends.items():
         try:
             start_node = as_node(domain)
             # self.configure_trend_in_switches( start_node,freq )
             start_node.new_trend(freq)
         except:
             self.delete_trend_configuration(domain)
     return
Example #4
0
    def start(self):
        self.security_manager = self.nodespace.as_node(self.security_manager)
        self._pdo_lock.acquire()
        try:
            self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
            self._pdo.valuedrivers = {}
            self._pdo.load()
            valuedriverdumps = self._pdo.valuedrivers.items()
        finally:
            self._pdo_lock.release()
        super(DriverConfigurator, self).start()

        tstart = time.time()
        for drivername, driverdump in valuedriverdumps:
            try:
                IPickles(cPickle.loads(driverdump))()
            except:
                message = self.LOADFAILURE % (self.name, 'Value Driver',
                                              drivername)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix='Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log(
            'broadway', msglog.types.INFO, 'Value Driver Configurator loaded '
            '%s nodes in %s seconds.' % (len(valuedriverdumps), tlapse))
        return
Example #5
0
class LastAlarm(CompositeNode,EventConsumerMixin):
    def __init__(self):
        self._last_alarm = None
        self._started = 0
        CompositeNode.__init__(self)
        EventConsumerMixin.__init__(self,self._alarm_triggered)
    def configure(self, config):
        CompositeNode.configure(self, config)
    def configuration(self):
        config = CompositeNode.configuration(self)
        return config
    def start(self):
        self._pdo = PersistentDataObject(self)
        self._pdo.last_dictionary = None
        self._pdo.load()
        self._started = 1
        self.parent.event_subscribe(self,AlarmTriggerEvent)
        CompositeNode.start(self)
    def stop(self):
        selt._started = 0
        self.parent.cancel(self,AlarmTriggerEvent)
        CompositeNode.stop(self)
    def _alarm_triggered(self, alarm):
        self._last_alarm = alarm
        self._pdo.last_dictionary = alarm.dictionary()
        self._pdo.save()
    def get(self, skipCache=0):
        return self._last_alarm
    def get_dictionary(self):
        return self._pdo.last_dictionary
Example #6
0
    def start(self):
        self._PDO = PersistentDataObject(self,dmtype=GC_NEVER)
        self._PDO.exception_log_last_time = 0.0
        self._PDO.load()
        # Scan subtree of grandparent logger for channel (column) 'fsg_attrs'
        # nodes containing info required for FSG Demo, so that we don't have
        # to do the scan every time format() is called:
        self._channels = {}
        columns_node = self.parent.parent.parent.get_child('columns')
        column_nodes = columns_node.children_nodes()
        for column_node in column_nodes:
            if column_node.name == 'timestamp':
                continue
            assert isinstance(column_node, ChannelAttrsColumn) \
                   or isinstance(column_node, ChannelAttrsDeltaColumn), \
                   'Column %s should be class ChannelAttrsColumn, but is class %s' \
                   % (column_node.name, column_node.__class__.__name__)
            self._channels[column_node.name] = {
                'channel_node':column_node,'values':[]

                }
            
        self._exception_log = None
        try:
            self._exception_log = as_node(self.exception_log_url)
        except ENoSuchName:
            pass
        return
Example #7
0
 def start(self):
     ServiceNode.start(self)
     # this will correctly add the msglog as a child
     #  to the logger.
     if 'msglog' not in self.children_names():
         columns = mpx.lib.msglog.get_columns()
         log = Log()
         log.configure({'name': 'msglog', 'parent': self})
         for c in columns:
             column = mpx.lib.factory('mpx.service.logger.column')
             config = c.configuration()
             config['parent'] = log
             column.configure(config)
     self._logs = PersistentDataObject(self)
     self._logs.names = []
     self._logs.load()
     for name in self._logs.names:
         if ((not mpx.lib.log.log_exists(name))
                 and (name not in self.children_names())):
             log = mpx.lib.log.log(name)
             log.destroy()
             del (log)
     self._logs.names = []
     for child in self.children_nodes():
         if not isinstance(child, Alias):
             # Don't manage other managers' logs...
             self._logs.names.append(child.name)
     self._logs.save()
Example #8
0
    def start(self):
        self.nodes = []
        from mpx.service.time.time_zone import TimeZone
        if not IScheduleHolderParent.implementedBy(TimeZone):
            class_implements(TimeZone, IScheduleHolderParent)
        self.security_manager = self.nodespace.as_node(self.security_manager)
        self._pdo_lock.acquire()
        try:
            if not self._pdo:
                self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
                self._pdo.holders = {}
                self._pdo.schedules = {}
                self._pdo.load()
            holderdicts = self._pdo.holders.values()
            holderdumps = []
            for holderdict in holderdicts:
                holderdumps.extend(holderdict.items())
            # schedules = {'local': {'Holder1': {'Sched1': Sched1Dump}, 'Holder2': ...}, 'UTC': ...}
            scheduledicts = []
            [
                scheduledicts.extend(holderdict.values())
                for holderdict in self._pdo.schedules.values()
            ]
            scheduledumps = []
            for scheduledict in scheduledicts:
                scheduledumps.extend(scheduledict.items())
        finally:
            self._pdo_lock.release()
        super(ScheduleConfigurator, self).start()
        tstart = time.time()
        for holdername, holderdump in holderdumps:
            try:
                self.nodes.append(IPickles(cPickle.loads(holderdump))())
            except:
                message = self.LOADFAILURE % (self.name, 'Schedule Holder',
                                              holdername)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix='Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log(
            'broadway', msglog.types.INFO, 'Schedule Configurator loaded '
            '%s nodes in %s seconds.' % (len(holderdumps), tlapse))

        tstart = time.time()
        for schedulename, scheduledump in scheduledumps:
            try:
                self.nodes.append(IPickles(cPickle.loads(scheduledump))())
            except:
                message = self.LOADFAILURE % (self.name, 'Schedule Holder',
                                              schedulename)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix='Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log(
            'broadway', msglog.types.INFO, 'Schedule Configurator loaded '
            '%s nodes in %s seconds.' % (len(scheduledumps), tlapse))
        self.template = self.parent.read_resource(self.page_template)
Example #9
0
 def __init__(self, *args, **kw):
     self.integer = None
     self.integers = None
     self.letters = None
     self.letterset = None
     self.letterlist = None
     self.letterdict = None
     PDO.__init__(self, *args, **kw)
Example #10
0
 def __init__(self, node):
     self.__lock = Lock()
     self.__last_save = {}
     self.max_seq = -1
     self.pending_seqs = []
     self.inprocess_seqs = []
     PersistentDataObject.__init__(self, node, auto_load=True)
     return
Example #11
0
 def __init__(self, *args, **kw):
     self.integer = None
     self.integers = None
     self.letters = None
     self.letterset = None
     self.letterlist = None
     self.letterdict = None
     PDO.__init__(self, *args, **kw)
Example #12
0
 def __init__(self, node):
     self.__lock = Lock()
     self.__last_save = {}
     self.max_seq = -1
     self.pending_seqs = []
     self.inprocess_seqs = []
     PersistentDataObject.__init__(self, node, auto_load=True)
     return
Example #13
0
 def configure(self, config):
     self._pdo = PersistentDataObject(self)
     self._pdo.stats_dict = {
     }  #used to detect changes to xml files based on timestamp. Tuple (modify time, pickle string)
     self._pdo.load()
     # write_priority can be set for the entire control service, though
     # it may be specialized at the individual application node level.
     set_attribute(self, 'write_priority', 9, config, int)
     CompositeNode.configure(self, config)
class TriggeredExporter(SynchronizedExporter):
    def __init__(self):
        SynchronizedExporter.__init__(self)
        self._sid = None
        self.evt = None  #dil - debug

    def handle_log(self, event):
        self.debug_information('Log export triggered.')
        self.evt = event  #dil - debug
        value = event.results()[1]['value']
        if isinstance(value, Exception):
            raise value
        if value:  # only export when value is true
            self.debug_information('Going to start export thread.')
            if self._lock.acquire(0):
                try:
                    thread = Thread(name=self.name,
                                    target=self.go,
                                    args=(time.time(), ))
                    thread.start()
                finally:
                    self._lock.release()
            else:
                msglog.log('broadway', msglog.types.WARN,
                           ('Last export still active, ' +
                            'skipping current request.'))

    def configure(self, config):
        set_attribute(self, 'trigger', REQUIRED, config)
        SynchronizedExporter.configure(self, config)

    def configuration(self):
        config = SynchronizedExporter.configuration(self)
        get_attribute(self, 'trigger', config, str)
        return config

    def start(self):
        Exporter.start(self)
        if not self.running:
            self.running = 1
            self.connection = as_node(self.connection_node)
            self._time_keeper = PersistentDataObject(self)
            self._time_keeper.start_time = 0
            self._time_keeper.load()
            self._period = self.parent.parent.period
            self._setup_trigger()
        else:
            raise EAlreadyRunning

    def _setup_trigger(self):
        try:
            self._sid = SM.create_delivered(self, {1: as_node(self.trigger)})
        except ENotStarted, ENoSuchNode:
            msg = 'TriggeredExporter trigger: %s does not exist - could be nascent' % self._trigger
            msglog.log('broadway', msglog.types.WARN, msg)
            scheduler.seconds_from_now_do(60, self._setup_trigger)
class WritingTransporter(Transporter):
    def configure(self, config):
        set_attribute(self, 'directory', '/tmp', config)
        set_attribute(self, 'file_prefix', REQUIRED, config)
        set_attribute(self, 'file_suffix', REQUIRED, config)
        set_attribute(self, 'name_scheme', None, config)
        set_attribute(self, 'timestamp_format', '%s', config)
        Transporter.configure(self, config)
        self._last = PersistentDataObject(self)
        self._last.filename = None
        self._last.count = 1
        self._last.load()

    def configuration(self):
        config = Transporter.configuration(self)
        get_attribute(self, 'directory', config)
        get_attribute(self, 'file_prefix', config)
        get_attribute(self, 'file_suffix', config)
        get_attribute(self, 'name_scheme', config)
        get_attribute(self, 'timestamp_format', config)
        return config

    def transport(self, data):
        if type(data) == type(''):
            data = StringIO.StringIO(data)
        filename = self._generate_filename()
        tempname = filename + '.tmp'
        file = open(tempname, 'w')
        try:
            write = data.read(1024)
            while write:
                file.write(write)
                write = data.read(1024)
        finally:
            file.close()
        os.chmod(tempname, 0444)
        os.rename(tempname, filename)

    def _generate_filename(self):
        filename = self.file_prefix
        append = ''
        if self.name_scheme == 'incremental':
            append = '%s' % self._last.count
        elif self.name_scheme == 'timestamp':
            file_time = self.parent.time_function(self.parent.scheduled_time())
            filename = filename + time.strftime(self.timestamp_format,
                                                file_time)
            append = '_%s' % (self._last.count + 1)
            if filename != self._last.filename:
                self._last.count = 0
                append = ''
        self._last.count += 1
        self._last.filename = filename
        return os.path.join(self.directory,
                            filename + append + self.file_suffix)
Example #16
0
 def start(self):
     self.managernode = self.as_node(self.manager)
     self.synclock.acquire()
     try:
         alarmsname = '%s (%s)' % (self.name, 'alarms')
         eventsname = '%s (%s)' % (self.name, 'events')
         self.alarms = PersistentDictionary(alarmsname,
                                            encode=self.encode,
                                            decode=self.decode)
         self.events = PersistentDictionary(eventsname,
                                            encode=self.encode,
                                            decode=self.decode)
         # Migrate PDO data from old style persistence.
         pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
         if os.path.exists(pdodata.filename()):
             msglog.log('broadway', msglog.types.INFO,
                        "Migrating previous alarm and event data")
             pdodata.events = {}
             pdodata.alarms = {}
             pdodata.load()
             migrate(pdodata, self.decode)
             self.rebuildstorage()
             pdodata.destroy()
         del(pdodata)
     finally:
         self.synclock.release()
     self.securitymanager = self.as_node('/services/Security Manager')
     
     register = self.managernode.register_for_type
     self.sub = register(self.handle_event, StateEvent)
     self.running.set()
     super(AlarmConfigurator, self).start()
 def configure(self, config):
     set_attribute(self, 'directory', '/tmp', config)
     set_attribute(self, 'file_prefix', REQUIRED, config)
     set_attribute(self, 'file_suffix', REQUIRED, config)
     set_attribute(self, 'name_scheme', None, config)
     set_attribute(self, 'timestamp_format', '%s', config)
     Transporter.configure(self, config)
     self._last = PersistentDataObject(self)
     self._last.filename = None
     self._last.count = 1
     self._last.load()
Example #18
0
class TriggeredExporter(SynchronizedExporter):
    def __init__(self):
        SynchronizedExporter.__init__(self)
        self._sid = None
        self.evt = None #dil - debug
    def handle_log(self,event):
        self.debug_information('Log export triggered.')
        self.evt = event #dil - debug 
        value = event.results()[1]['value']
        if isinstance(value,Exception):
            raise value
        if value: # only export when value is true
            self.debug_information('Going to start export thread.')
            if self._lock.acquire(0):
                try:
                    thread = Thread(name=self.name, target=self.go,
                                    args=(time.time(),))
                    thread.start()
                finally:
                    self._lock.release()
            else:
                msglog.log('broadway',msglog.types.WARN, 
                           ('Last export still active, ' + 
                            'skipping current request.'))
                            
    def configure(self, config):
        set_attribute(self, 'trigger',REQUIRED,config)
        SynchronizedExporter.configure(self, config)
        
    def configuration(self):
        config = SynchronizedExporter.configuration(self)
        get_attribute(self,'trigger',config,str)
        return config
        
    def start(self):
        Exporter.start(self)
        if not self.running:
            self.running = 1
            self.connection = as_node(self.connection_node)
            self._time_keeper = PersistentDataObject(self)
            self._time_keeper.start_time = 0
            self._time_keeper.load()
            self._period = self.parent.parent.period
            self._setup_trigger()
        else: 
            raise EAlreadyRunning
            
    def _setup_trigger(self):
        try:
            self._sid = SM.create_delivered(self, {1:as_node(self.trigger)})
        except ENotStarted, ENoSuchNode:
            msg = 'TriggeredExporter trigger: %s does not exist - could be nascent' % self._trigger
            msglog.log('broadway',msglog.types.WARN,msg)
            scheduler.seconds_from_now_do(60, self._setup_trigger)
Example #19
0
 def __init__(self, name, password_file=PASSWD_FILE, group_file=GROUP_FILE, shadow_file=SHADOW_FILE):
     self.__lock = Lock()
     self.__password_file = password_file
     self.__group_file = group_file
     self.__shadow_file = shadow_file
     self.meta = {}
     self.USERS.load()
     if not self.USERS.has_key(self.name()):
         msglog.log("broadway", msglog.types.INFO, ("No profile for user %s found, creating" " new profile" % name))
         self.USERS[self.name()] = str(UUID())
     PersistentDataObject.__init__(self, self.USERS[self.name()])
     PersistentDataObject.load(self)
 def start(self):
     Exporter.start(self)
     if not self.running:
         self.running = 1
         self.connection = as_node(self.connection_node)
         self._time_keeper = PersistentDataObject(self)
         self._time_keeper.start_time = 0
         self._time_keeper.load()
         self._period = self.parent.parent.period
         self._setup_trigger()
     else:
         raise EAlreadyRunning
Example #21
0
 def configure(self, config):
     SimpleValue.configure(self, config)
     self._pdo = PersistentDataObject(self)
     self._pdo.value = None
     self._pdo.conversion = None
     self._pdo.load()
     conversion = _get_name(self.conversion)
     if (self._pdo.value == None or self._pdo.conversion != conversion):
         self._pdo.value = self.value
         self._pdo.conversion = conversion
         self._pdo.save()
     else:
         self.value = self._pdo.value
 def start(self):
     Exporter.start(self)
     if not self.running:
         self.running = 1
         self.connection = as_node(self.connection_node)
         self._event_count = self.log_multiple - 1
         self._time_keeper = PersistentDataObject(self)
         self._time_keeper.start_time = 0
         self._time_keeper.load()
         self._period = self.parent.parent.period
         self.parent.parent.event_subscribe(self, LogAddEntryEvent)
     else:
         raise EAlreadyRunning
Example #23
0
    def start(self):
        from mpx.lib.persistent import PersistentDataObject
        
        ServiceNode.start(self)
        if self.debug: print 'Garbage Collector Starting!'

        self._data = PersistentDataObject(self,dmtype=GC_NEVER)
  
        self._data.registered = []
        self._data.load()
        
        if self.debug: print 'GC Data is %s.' % self._data
        if self.debug: print 'GC Data._reg is %s.' % self._data.registered
Example #24
0
 def start(self):
     try:
         self._pdo_lock.acquire()
         try:
             if self.__running:
                 return
             self.__running = True
             self._trendconfig = PersistentDictionary(filename(self), encode=None, decode=None)
             if not self._trendconfig:
                 pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                 if os.path.exists(pdodata.filename()):
                     msglog.log("broadway", msglog.types.INFO, "Migrating previous trend data")
                     pdodata.trends = {}
                     pdodata.load()
                     self._trendconfig.update(pdodata.trends)
                 del (pdodata)
         finally:
             self._pdo_lock.release()
         super(TrendManager, self).start()
         self.logger = node.as_internal_node(self.logger_url)
         if self.has_child("trends"):
             self.trends = self.get_child("trends")
         else:
             self.trends = CompositeNode()
             self.trends.configure({"parent": self, "name": "trends"})
             self.trends.start()
         corrupt_trends = []
         for trendname, trenddump in self._trendconfig.items():
             msg = "Loading trend: %s" % trendname
             msglog.log("trendmanager", msglog.types.INFO, msg)
             try:
                 trend = unmarshal(trenddump)
             except:
                 corrupt_trends.append(trendname)
                 msg = "Failed to load trend: %s" % trendname
                 msglog.log("trendmanager", msglog.types.ERR, msg)
                 msglog.exception(prefix="Handled")
         for trendname in corrupt_trends:
             try:
                 msg = "Deleting trend information: %s" % trendname
                 msglog.log("trendmanager", msglog.types.INFO, msg)
                 self._delete_trend_configuration(trendname)
                 if self.trends.has_child(trendname):
                     trend = self.trends.get_child(trendname)
                     trend.prune(force=True)
             except:
                 msglog.exception(prefix="Handled")
     except:
         self.__running = False
         raise
     return
Example #25
0
class WritingTransporter(Transporter):
    def configure(self, config):
        set_attribute(self, 'directory', '/tmp', config)
        set_attribute(self, 'file_prefix', REQUIRED, config)
        set_attribute(self, 'file_suffix', REQUIRED, config)
        set_attribute(self, 'name_scheme', None, config)
        set_attribute(self, 'timestamp_format', '%s', config)
        Transporter.configure(self, config)
        self._last = PersistentDataObject(self)
        self._last.filename = None
        self._last.count = 1
        self._last.load()
    def configuration(self):
        config = Transporter.configuration(self)
        get_attribute(self, 'directory', config)
        get_attribute(self, 'file_prefix', config)
        get_attribute(self, 'file_suffix', config)
        get_attribute(self, 'name_scheme', config)
        get_attribute(self, 'timestamp_format', config)
        return config
    def transport(self, data):
        if type(data) == type(''):
            data = StringIO.StringIO(data)
        filename = self._generate_filename()
        tempname = filename + '.tmp'
        file = open(tempname,'w')
        try:
            write = data.read(1024)
            while write:
                file.write(write)
                write = data.read(1024)
        finally:
            file.close()
        os.chmod(tempname,0444)
        os.rename(tempname,filename)
    def _generate_filename(self):
        filename = self.file_prefix
        append = ''
        if self.name_scheme == 'incremental':
            append = '%s' % self._last.count
        elif self.name_scheme == 'timestamp':
            file_time = self.parent.time_function(self.parent.scheduled_time())
            filename = filename + time.strftime(self.timestamp_format,file_time)
            append = '_%s' % (self._last.count + 1)
            if filename != self._last.filename:
                self._last.count = 0
                append = ''
        self._last.count += 1
        self._last.filename = filename
        return os.path.join(self.directory,filename + append + self.file_suffix)
Example #26
0
 def start(self):
     filename = '%s (%s)' % (self.name, 'triggers')
     self.manager = self.nodespace.as_node(self.manager)
     self._pdo_lock.acquire()
     try:
         if self._triggers is None:
             self._triggers = PersistentDictionary(filename,
                                                   encode=None,
                                                   decode=None)
         if not self._triggers:
             pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
             if os.path.exists(pdodata.filename()):
                 msglog.log('broadway', msglog.types.INFO,
                            "Migrating previous trigger data.")
                 pdodata.triggers = {}
                 pdodata.load()
                 self._triggers.update(pdodata.triggers)
                 pdodata.destroy()
             del (pdodata)
         self._loadtriggers()
         if self.secured:
             self.security_manager = self.as_node(
                 "/services/Security Manager")
         else:
             self.security_manager = None
     finally:
         self._pdo_lock.release()
     return super(TriggersConfigurator, self).start()
Example #27
0
 def start(self):
     ServiceNode.start(self)
     # this will correctly add the msglog as a child
     #  to the logger.
     if 'msglog' not in self.children_names():
         columns = mpx.lib.msglog.get_columns()
         log = Log()
         log.configure({'name':'msglog', 'parent':self})
         for c in columns:
             column = mpx.lib.factory('mpx.service.logger.column')
             config = c.configuration()
             config['parent'] = log
             column.configure(config)
     self._logs = PersistentDataObject(self)
     self._logs.names = []
     self._logs.load()
     for name in self._logs.names:
         if ((not mpx.lib.log.log_exists(name)) and 
             (name not in self.children_names())):
             log = mpx.lib.log.log(name)
             log.destroy()
             del(log)
     self._logs.names = []
     for child in self.children_nodes():
         if not isinstance(child, Alias):
             # Don't manage other managers' logs...
             self._logs.names.append(child.name)
     self._logs.save()
Example #28
0
 def start(self):
     self._pdo = PersistentDataObject(self)
     self._pdo.last_dictionary = None
     self._pdo.load()
     self._started = 1
     self.parent.event_subscribe(self,AlarmTriggerEvent)
     CompositeNode.start(self)
Example #29
0
    def start(self):
        if self.__running:
            return
        if self.debug:
            msglog.log('EnergywiseManager :', msglog.types.INFO, 'Inside start' )
        CompositeNode.start(self)
#        start_node = as_node('/services/EnergywiseManager/')
#        self.configure_trend_in_switches(start_node, 60)
        self.__running = True
        self._pdo_lock.acquire()
        self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
        self._pdo.trends = {}
        self._pdo.load()
        self._pdo_lock.release()
        if self.has_child('trends'):
            self.trends = self.get_child('trends')
        else:
            self.trends = CompositeNode()
            self.trends.configure({'parent':self, 'name':'trends'})
            self.trends.start()
        # start trending for saved domains
        for domain,freq in self._pdo.trends.items():
            try:
                start_node = as_node(domain)
               # self.configure_trend_in_switches( start_node,freq )
                start_node.new_trend(freq)
            except:
                self.delete_trend_configuration(domain)
        return
Example #30
0
 def configure(self, config):
     self._pdo = PersistentDataObject(self)
     self._pdo.stats_dict = {} #used to detect changes to xml files based on timestamp. Tuple (modify time, pickle string)
     self._pdo.load()
     # write_priority can be set for the entire control service, though
     # it may be specialized at the individual application node level. 
     set_attribute(self, 'write_priority', 9, config, int)
     CompositeNode.configure(self, config)
Example #31
0
 def start(self):
     filename = '%s (%s)' % (self.name, 'triggers')
     self.manager = self.nodespace.as_node(self.manager)
     self._pdo_lock.acquire()
     try:
         if self._triggers is None:
             self._triggers = PersistentDictionary(
                 filename, encode=None, decode=None)
         if not self._triggers:
             pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
             if os.path.exists(pdodata.filename()):
                 msglog.log('broadway', msglog.types.INFO, 
                            "Migrating previous trigger data.")
                 pdodata.triggers = {}
                 pdodata.load()
                 self._triggers.update(pdodata.triggers)
                 pdodata.destroy()
             del(pdodata)
         self._loadtriggers()
         if self.secured:
             self.security_manager = self.as_node("/services/Security Manager")
         else:
             self.security_manager = None
     finally: 
         self._pdo_lock.release()
     return super(TriggersConfigurator, self).start()
Example #32
0
 def __init__(self,
              name,
              password_file=PASSWD_FILE,
              group_file=GROUP_FILE,
              shadow_file=SHADOW_FILE):
     self.__lock = Lock()
     self.__password_file = password_file
     self.__group_file = group_file
     self.__shadow_file = shadow_file
     self.meta = {}
     self.USERS.load()
     if not self.USERS.has_key(self.name()):
         msglog.log('broadway', msglog.types.INFO,
                    ('No profile for user %s found, creating'
                     ' new profile' % name))
         self.USERS[self.name()] = str(UUID())
     PersistentDataObject.__init__(self, self.USERS[self.name()])
     PersistentDataObject.load(self)
Example #33
0
 def start(self):
     self.manager = self.nodespace.as_node(self.manager)
     self.security_manager = as_node(self.security_manager)
     self._pdo = PersistentDataObject(self)
     msg='The CloudConfigurator Persistent Object is in the file :%s' %str(self._pdo.filename())
     msglog.log('CloudConfigurator', msglog.types.INFO,msg)
     if os.path.exists(self._pdo.filename()):
         # Migration 
         msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration in Progress')
         self._pdo.formation = cPickle.dumps(IPickles(self.manager.formation))
         self._pdo.load()
         formation = IPickles(cPickle.loads(self._pdo.formation))()
         msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration for the Formation:%s' %str(formation))
         self.manager.update_formation(formation,None)
         self._pdo.destroy()
         del(self._pdo)
         msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration is Complete')           
     return super(CloudConfigurator, self).start()
Example #34
0
    def start(self):
        self.manager = self.nodespace.as_node(self.manager)
        
        self._pdo_lock.acquire()
        try:
            self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
            self._pdo.users = {}
            self._pdo.roles = {}
            self._pdo.policies = {}
            self._pdo.load()
            userdumps = self._pdo.users.items()
            roledumps = self._pdo.roles.items()
            policydumps = self._pdo.policies.items()
        finally: self._pdo_lock.release()
        super(SecurityConfigurator, self).start()

        tstart = time.time()
        for rolename, roledump in roledumps:
            try: IPickles(cPickle.loads(roledump))()
            except:
                message = self.LOADFAILURE % (self.name, 'role', rolename)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix = 'Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log('broadway', msglog.types.INFO,
                   'Security Configurator loaded '
                   '%s roles in %s seconds.' % (len(roledumps), tlapse))

        tstart = time.time()
        for policyname, policydump in policydumps:
            try: IPickles(cPickle.loads(policydump))()
            except:
                message = self.LOADFAILURE % (self.name, 'policy', policyname)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix = 'Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log('broadway', msglog.types.INFO,
                   'Security Configurator loaded '
                   '%s policies in %s seconds.' % (len(policydumps), tlapse))

        tstart = time.time()
        for username, userdump in userdumps:
            try: IPickles(cPickle.loads(userdump))()
            except:
                message = self.LOADFAILURE % (self.name, 'user', username)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix = 'Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log('broadway', msglog.types.INFO,
                   'Security Configurator loaded '
                   '%s users in %s seconds.' % (len(userdumps), tlapse))
        return
Example #35
0
 def start(self):
     self.nodes = []
     from mpx.service.time.time_zone import TimeZone
     if not IScheduleHolderParent.implementedBy(TimeZone):
         class_implements(TimeZone, IScheduleHolderParent)
     self.security_manager = self.nodespace.as_node(self.security_manager)
     self._pdo_lock.acquire()
     try:
         if not self._pdo:
             self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
             self._pdo.holders = {}
             self._pdo.schedules = {}
             self._pdo.load()
         holderdicts = self._pdo.holders.values()
         holderdumps = []
         for holderdict in holderdicts:
             holderdumps.extend(holderdict.items())
         # schedules = {'local': {'Holder1': {'Sched1': Sched1Dump}, 'Holder2': ...}, 'UTC': ...}
         scheduledicts = []
         [scheduledicts.extend(holderdict.values()) 
          for holderdict in self._pdo.schedules.values()]
         scheduledumps = []
         for scheduledict in scheduledicts:
             scheduledumps.extend(scheduledict.items())
     finally: 
         self._pdo_lock.release()
     super(ScheduleConfigurator, self).start()
     tstart = time.time()
     for holdername, holderdump in holderdumps:
         try: 
             self.nodes.append(IPickles(cPickle.loads(holderdump))())
         except:
             message = self.LOADFAILURE % (self.name, 'Schedule Holder', holdername)
             msglog.log('broadway', msglog.types.ERR, message)
             msglog.exception(prefix = 'Handled')
     tend = time.time()
     tlapse = tend - tstart
     msglog.log('broadway', msglog.types.INFO,
                'Schedule Configurator loaded '
                '%s nodes in %s seconds.' % (len(holderdumps), tlapse))
     
     tstart = time.time()
     for schedulename, scheduledump in scheduledumps:
         try: self.nodes.append(IPickles(cPickle.loads(scheduledump))())
         except:
             message = self.LOADFAILURE % (self.name, 'Schedule Holder', schedulename)
             msglog.log('broadway', msglog.types.ERR, message)
             msglog.exception(prefix = 'Handled')
     tend = time.time()
     tlapse = tend - tstart
     msglog.log('broadway', msglog.types.INFO,
                'Schedule Configurator loaded '
                '%s nodes in %s seconds.' % (len(scheduledumps), tlapse))
     self.template = self.parent.read_resource(self.page_template)
Example #36
0
 def start(self):
     self.container = self.nodespace.as_node(self.container)
     self._pdo_lock.acquire()
     try:
         self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
         self._pdo.exporters = {}
         self._pdo.load()
         exporterdumps = self._pdo.exporters.values()
     finally:
         self._pdo_lock.release()
     super(ExportersConfigurator, self).start()
     tstart = time.time()
     for exporterdump in exporterdumps:
         IPickles(cPickle.loads(exporterdump))()
     tend = time.time()
     tlapse = tend - tstart
     msglog.log(
         'broadway', msglog.types.INFO, 'Exporter Configurator loaded '
         '%s exporters in %s seconds.' % (len(exporterdumps), tlapse))
     self.manager = self.container
Example #37
0
 def configure(self, config):
     set_attribute(self, 'directory', '/tmp', config)
     set_attribute(self, 'file_prefix', REQUIRED, config)
     set_attribute(self, 'file_suffix', REQUIRED, config)
     set_attribute(self, 'name_scheme', None, config)
     set_attribute(self, 'timestamp_format', '%s', config)
     Transporter.configure(self, config)
     self._last = PersistentDataObject(self)
     self._last.filename = None
     self._last.count = 1
     self._last.load()
Example #38
0
 def start(self):
     Exporter.start(self)
     if not self.running:
         self.running = 1
         self.connection = as_node(self.connection_node)
         self._time_keeper = PersistentDataObject(self)
         self._time_keeper.start_time = 0
         self._time_keeper.load()
         self._period = self.parent.parent.period
         self._setup_trigger()
     else: 
         raise EAlreadyRunning
Example #39
0
class SimplePersistentValue(SimpleValue):
    def configure(self, config):
        SimpleValue.configure(self, config)
        self._pdo = PersistentDataObject(self)
        self._pdo.value = None
        self._pdo.conversion = None
        self._pdo.load()
        conversion = _get_name(self.conversion)
        if (self._pdo.value == None or self._pdo.conversion != conversion):
            self._pdo.value = self.value
            self._pdo.conversion = conversion
            self._pdo.save()
        else:
            self.value = self._pdo.value

    def configuration(self):
        self.value = self._pdo.value
        return SimpleValue.configuration(self)

    def set(self, value, asyncOK=1):
        SimpleValue.set(self, value, asyncOK)
        self._pdo.value = self.value
        self._pdo.save()

    def get(self, skipCache=0):
        return self._pdo.value
Example #40
0
 def configure(self, config):
     set_attribute(self, 'host', REQUIRED, config)
     set_attribute(self, 'port', 21, config, int)
     set_attribute(self, 'directory', '', config)
     set_attribute(self, 'username', REQUIRED, config)
     set_attribute(self, 'password', REQUIRED, config)
     #CSCtn64870
     if (config.has_key('timeout') and config['timeout'] == ''):
         config['timeout'] = 'None'
     set_attribute(self, 'timeout', None, config, float)
     set_attribute(self, 'file_prefix', 'cisco', config)
     set_attribute(self, 'file_suffix', '.dat', config)
     set_attribute(self, 'name_scheme', 'timestamp', config)
     set_attribute(self, 'timestamp_format', '%s', config)
     set_attribute(self, 'passive_mode', 1, config, as_boolean)
     set_attribute(self, 'file_append', 0, config, as_boolean)
     Transporter.configure(self, config)
     if self._last is None:
         self._last = PersistentDataObject(self)
         self._last.filename = None
         self._last.count = 1
         self._last.load()
Example #41
0
 def start(self):
     Exporter.start(self)
     if not self.running:
         self.running = 1
         self.connection = as_node(self.connection_node)
         self._event_count = self.log_multiple - 1
         self._time_keeper = PersistentDataObject(self)
         self._time_keeper.start_time = 0
         self._time_keeper.load()
         self._period = self.parent.parent.period
         self.parent.parent.event_subscribe(self, LogAddEntryEvent)
     else: 
         raise EAlreadyRunning
Example #42
0
 def configure(self, config):
     SimpleValue.configure(self, config)
     self._pdo = PersistentDataObject(self)
     self._pdo.value = None
     self._pdo.conversion = None
     self._pdo.load()
     conversion = _get_name(self.conversion)
     if (self._pdo.value == None or 
         self._pdo.conversion != conversion):
         self._pdo.value = self.value
         self._pdo.conversion = conversion
         self._pdo.save()
     else:
         self.value = self._pdo.value
Example #43
0
class LastAlarm(CompositeNode, EventConsumerMixin):
    def __init__(self):
        self._last_alarm = None
        self._started = 0
        CompositeNode.__init__(self)
        EventConsumerMixin.__init__(self, self._alarm_triggered)

    def configure(self, config):
        CompositeNode.configure(self, config)

    def configuration(self):
        config = CompositeNode.configuration(self)
        return config

    def start(self):
        self._pdo = PersistentDataObject(self)
        self._pdo.last_dictionary = None
        self._pdo.load()
        self._started = 1
        self.parent.event_subscribe(self, AlarmTriggerEvent)
        CompositeNode.start(self)

    def stop(self):
        selt._started = 0
        self.parent.cancel(self, AlarmTriggerEvent)
        CompositeNode.stop(self)

    def _alarm_triggered(self, alarm):
        self._last_alarm = alarm
        self._pdo.last_dictionary = alarm.dictionary()
        self._pdo.save()

    def get(self, skipCache=0):
        return self._last_alarm

    def get_dictionary(self):
        return self._pdo.last_dictionary
Example #44
0
 def start(self):
     try:
         self._pdo_lock.acquire()
         try:
             if self.__running:
                 return
             self.__running = True
             self._trendconfig = PersistentDictionary(filename(self),
                                                      encode=None,
                                                      decode=None)
             if not self._trendconfig:
                 pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                 if os.path.exists(pdodata.filename()):
                     msglog.log('broadway', msglog.types.INFO,
                                "Migrating previous trend data")
                     pdodata.trends = {}
                     pdodata.load()
                     self._trendconfig.update(pdodata.trends)
                 del (pdodata)
         finally:
             self._pdo_lock.release()
         super(TrendManager, self).start()
         self.logger = node.as_internal_node(self.logger_url)
         if self.has_child('trends'):
             self.trends = self.get_child('trends')
         else:
             self.trends = CompositeNode()
             self.trends.configure({'parent': self, 'name': 'trends'})
             self.trends.start()
         corrupt_trends = []
         for trendname, trenddump in self._trendconfig.items():
             msg = "Loading trend: %s" % trendname
             msglog.log('trendmanager', msglog.types.INFO, msg)
             try:
                 trend = unmarshal(trenddump)
             except:
                 corrupt_trends.append(trendname)
                 msg = "Failed to load trend: %s" % trendname
                 msglog.log('trendmanager', msglog.types.ERR, msg)
                 msglog.exception(prefix='Handled')
         for trendname in corrupt_trends:
             try:
                 msg = "Deleting trend information: %s" % trendname
                 msglog.log('trendmanager', msglog.types.INFO, msg)
                 self._delete_trend_configuration(trendname)
                 if self.trends.has_child(trendname):
                     trend = self.trends.get_child(trendname)
                     trend.prune(force=True)
             except:
                 msglog.exception(prefix='Handled')
     except:
         self.__running = False
         raise
     return
 def test_upgrade(self):
     from mpx.upgrade.persistent import persistent_0
     old = persistent_0.PersistentDataObject('upgrade_test')
     old.purpose = 'testing'
     old.save()
     old_filename = old._persistent.filename
     del (old.__dict__['_persistent'])
     del (old)
     new = PersistentDataObject('upgrade_test')
     self.failIf(os.path.exists(old_filename),
                 'Upgrade failed to remove old version')
     new.purpose = None
     new.load()
     self.failUnless(new.purpose == 'testing',
                     'Upgrade failed to get old value')
     new.destroy()
     del (new)
Example #46
0
 def start(self):
     self.container = self.nodespace.as_node(self.container)
     self._pdo_lock.acquire()
     try:
         self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
         self._pdo.exporters = {}
         self._pdo.load()
         exporterdumps = self._pdo.exporters.values()
     finally: 
         self._pdo_lock.release()
     super(ExportersConfigurator, self).start()
     tstart = time.time()
     for exporterdump in exporterdumps:
         IPickles(cPickle.loads(exporterdump))()
     tend = time.time()
     tlapse = tend - tstart
     msglog.log('broadway', msglog.types.INFO,
                'Exporter Configurator loaded '
                '%s exporters in %s seconds.' % (len(exporterdumps), tlapse))
     self.manager = self.container
Example #47
0
    def test_upgrade(self):
        from mpx.upgrade.persistent import persistent_0

        old = persistent_0.PersistentDataObject("upgrade_test")
        old.purpose = "testing"
        old.save()
        old_filename = old._persistent.filename
        del (old.__dict__["_persistent"])
        del (old)
        new = PersistentDataObject("upgrade_test")
        self.failIf(os.path.exists(old_filename), "Upgrade failed to remove old version")
        new.purpose = None
        new.load()
        self.failUnless(new.purpose == "testing", "Upgrade failed to get old value")
        new.destroy()
        del (new)
Example #48
0
 def configure(self, config):
     set_attribute(self, 'host', REQUIRED, config)
     set_attribute(self, 'port', 21, config, int)
     set_attribute(self, 'directory', '', config)
     set_attribute(self, 'username', REQUIRED, config)
     set_attribute(self, 'password', REQUIRED, config)
     #CSCtn64870
     if (config.has_key('timeout') and config['timeout'] == ''):
         config['timeout'] = 'None'
     set_attribute(self, 'timeout', None, config, float)
     set_attribute(self, 'file_prefix', 'cisco', config)
     set_attribute(self, 'file_suffix', '.dat', config)
     set_attribute(self, 'name_scheme', 'timestamp', config)
     set_attribute(self, 'timestamp_format', '%s', config)
     set_attribute(self, 'passive_mode', 1, config, as_boolean)
     set_attribute(self, 'file_append', 0, config, as_boolean)
     Transporter.configure(self, config)
     if self._last is None:
         self._last = PersistentDataObject(self)
         self._last.filename = None
         self._last.count = 1
         self._last.load()
Example #49
0
    def start(self):
        self.security_manager = self.nodespace.as_node(self.security_manager)
        self._pdo_lock.acquire()
        try:
            self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
            self._pdo.valuedrivers = {}
            self._pdo.load()
            valuedriverdumps = self._pdo.valuedrivers.items()
        finally: self._pdo_lock.release()
        super(DriverConfigurator, self).start()

        tstart = time.time()
        for drivername, driverdump in valuedriverdumps:
            try: IPickles(cPickle.loads(driverdump))()
            except:
                message = self.LOADFAILURE % (self.name, 'Value Driver', drivername)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix = 'Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log('broadway', msglog.types.INFO,
                   'Value Driver Configurator loaded '
                   '%s nodes in %s seconds.' % (len(valuedriverdumps), tlapse))
        return
Example #50
0
class SimplePersistentValue(SimpleValue):
    def configure(self, config):
        SimpleValue.configure(self, config)
        self._pdo = PersistentDataObject(self)
        self._pdo.value = None
        self._pdo.conversion = None
        self._pdo.load()
        conversion = _get_name(self.conversion)
        if (self._pdo.value == None or 
            self._pdo.conversion != conversion):
            self._pdo.value = self.value
            self._pdo.conversion = conversion
            self._pdo.save()
        else:
            self.value = self._pdo.value
    def configuration(self):
        self.value = self._pdo.value
        return SimpleValue.configuration(self)
    def set(self,value,asyncOK=1):
        SimpleValue.set(self, value, asyncOK)
        self._pdo.value = self.value
        self._pdo.save()
    def get(self, skipCache=0):
        return self._pdo.value
Example #51
0
 def __init__(self, node):
     self.last_time = 0
     PersistentDataObject.__init__(self, node)
Example #52
0
class GarbageCollector(ServiceNode):
    def __init__(self):
        ServiceNode.__init__(self)
        self.debug = 0
        self._registered = ()
        self._did_save = 0
        self._post_configuration=0

        if self.debug: print 'Initialized _registered to [] for %s.' % self
    def singleton_unload_hook(self):
        return
    ##
    # @param config
    # @return None
    def configure(self,config):
        ServiceNode.configure(self,config)
    def configuration(self):
        if self.debug: print 'In GarbageCollector:configuration().'
        config = ServiceNode.configuration(self)
        return config
    ##
    #   starts the data manager service
    # @return None
    def start(self):
        from mpx.lib.persistent import PersistentDataObject
        
        ServiceNode.start(self)
        if self.debug: print 'Garbage Collector Starting!'

        self._data = PersistentDataObject(self,dmtype=GC_NEVER)
  
        self._data.registered = []
        self._data.load()
        
        if self.debug: print 'GC Data is %s.' % self._data
        if self.debug: print 'GC Data._reg is %s.' % self._data.registered

    ##
    #   stops the data manager service
    # @return None
    def stop(self):
        return ServiceNode.stop(self)

    ##
    # set_faillist is the hook which allows the system to inform the data
    # manager about which nodes failed to start up.  Each list item should
    # be a dictionary with the following members:
    # name - the name of the node (without parent information)
    # parent - the parent of the node (with any relevant parent information,
    #          e.g. /services/com1
    # type - what type of failure occured.  Acceptable values are
    #        load and config.
    def set_faillist(self, faillist):
        if self.debug: print 'Got faillist of %s.' % faillist
        if self.debug: print 'Got reglist of %s.' % self._registered

        old_registered = self._data.registered[:]

        # By now, everyone should have had a chance to start up.
        # @fixme (real soon now, I promise):  Use the cool new method that
        # Mark and Shane suggested to consume an event from the root node
        # when all nodes have been started as a trigger for starting
        # the garbage collection process.
        self._data.registered = list(self._registered)
  
        # OK, now process our lists and see who is naughty and who is
        # nice.
        if self.debug: print '---- Starting to Process Potential Reaping List ----'
        for regnode in old_registered:
            purge_type = regnode['type']
            filename = regnode['filename']
            nodename = regnode['nodename']
            
            # If we are never supposed to purge this data, then don't bother
            # to do any more checking
            if purge_type == GC_NEVER:
                if self.debug: print '%s: Skipping because it is GC_NEVER.' % nodename
                continue

            if self.debug: print '%s: Checking.' % nodename
            
            node_did_register = 0
            node_did_fail = 0
            node_did_fail_on_config = 0
            node_did_fail_on_load = 0
            node_did_fail_on_start = 0
            parent_did_fail = 0
            should_purge = 0

            # If this node has registered with us, then we assume that
            # it started up and is present, etc.  This might not always
            # be the correct thing to do, but for now it seems like the
            # correct enough thing to do and should keep performance
            # from becoming an issue.
            if regnode in self._registered:
                if self.debug: print '%s: Appears to be happy.' % nodename
                node_did_register = 1
            else:
                # Check to see if the node or one of it's ancestors failed
                for failnode in faillist:
                    curnode = failnode['name']
                    curpare = failnode['parent']
                    curtype = failnode['type']
                    if curpare == '/':
                        curpath = curpare + curnode
                    else:
                        curpath = curpare + '/' + curnode
                    if self.debug: print 'curpath is %s and nodename is %s.' % (curpath, nodename)
                    if nodename == curpath:
                        if self.debug: print 'We got a match, %s failed because of %s.' % (
                            nodename, curtype)
                        if curtype == 'load':
                            node_did_fail_on_load = 1
                        elif curtype == 'config':
                            node_did_fail_on_config = 1
                        else:
                            raise 'Unrecognized failure type: %s.' % curtype
                        # Don't need to check any further
                        break
                    else:
                        if self._path_is_parent(curpath, nodename):
                            if self.debug: print 'We found a parent who failed: %s.' % curpath
                            parent_did_fail = 1
                            # Don't need to check any further
                            break                        
                if node_did_fail_on_config or node_did_fail_on_load:
                    node_did_fail = 1

            # If the node didn't fail in load or config, but it didn't register either,
            # then check to see if perhaps it exists, but didn't start.  We detect this
            # by doing an as_node on it.  If this succeeds, we can check the node's state.
            # If it doesn't succeed, then we can pretty safely assume that the node
            # has been delete (or, unfortunately, is auto-discovered).  
            if not node_did_fail and not node_did_register:
                try:
                    x = as_node(nodename)
                    node_did_fail_on_start = 1
                    node_did_fail = 1
                    if self.debug: print 'We were able to instantiate node: %s.' % nodename
                except:
                    if self.debug: print 'Failed to instantiate node: %s.' % nodename
                    # The node doesn't seem to exist at all.  Let the following code
                    # draw the appropriate conclusions.
                    pass
            
            if not node_did_register:
                if self.debug: print 'node_did_fail_on_load: %d.' % node_did_fail_on_load
                if self.debug: print 'node_did_fail_on_config: %d.' % node_did_fail_on_config
                if self.debug: print 'node_did_fail_on_start: %d.' % node_did_fail_on_start
                if self.debug: print 'node_did_fail: %d.' % node_did_fail
                if self.debug: print 'parent_did_fail: %d.' % parent_did_fail
                if self.debug: print 'purge_type: %d.' % purge_type
                
                # OK, the node didn't register.  Check to see what we've
                # been told to do in this case.
                if node_did_fail and (purge_type == GC_ONFAILURE):
                    should_purge = 1

                # For now, purge even if it was a parent who failed and purge_type
                # is GC_ONFAILURE.  @fixme: We need to think about if this is what
                # we want to do or not.
                if parent_did_fail and (purge_type == GC_ONFAILURE):
                    should_purge = 1

                # If the node did not register and neither it nor a parent
                # failed to start, then we assume that it has been deleted.
                # Note: This does not seem to be correct for auto-discovered
                #       nodes, so we need a better way of detecting this case.
                if (not node_did_fail) and (not parent_did_fail) and (purge_type == GC_ONDELETE):
                    should_purge = 1

            # If the node did not register and we aren't going to purge it, then
            # save it's registration information so that if circumstances change,
            # we can consider purging it at some later date.
            if (not node_did_register) and (not should_purge):
                if self.debug: print '%s did not register, but we are registering for it.' % nodename
                self._data.registered.append(regnode)
                        
            # OK, we've figured out that we should purge this persistent
            # data.  Go ahead and do so.
            if should_purge:
                if os.access(filename, os.F_OK):
                    if self.debug: print 'We decided we should purge the following file: %s.' % filename
                    msglog.log('garbage_collector',msglog.types.INFO,
                           'Purging the following persistent data file: %s on behalf of %s.' % (filename,
                                                                                                nodename))
                    try:
                        os.remove(filename)
                    except:
                        msglog.log('garbage_collector',msglog.types.INFO,
                                   'Got exception trying to remove persistent data: %s.' % filename)
                        msglog.exception('garbage_collector')
            else:
                if self.debug: print '%s: Will SAVE the following file: %s.' % (nodename, filename)


        if self.debug: print '---- Done Processing Potential Reaping List ----'
        
        # Now, at long last, persist our own data.
        self._data.save()
        self._did_save = 1
        self._post_configuration=1

    def register(self, nodename, filename, type=None):
        # Default to GC_ONDELETE
        if type == None:
            type = GC_ONDELETE
        if self.debug: print '%s: Registered with type of %d.' % (nodename, type)
        if self._post_configuration:
            self._data.registered+=(
                {'nodename':nodename, 'filename':filename, 'type':type},
                )
        else:
            self._registered += (
                {'nodename':nodename, 'filename':filename, 'type':type},
                )

        # If we have already saved our data, but just received a new registration,
        # then save it again.
        if self._did_save:
            self._data.save()

    def _path_is_parent(self, path, node):
        # If they are the same, we aren't talking a parent/child relationship here
        if path == node:
            return 0
        strind = string.find(node, path)
        if strind == -1:
            return 0
        if strind == 0:
            return 1
        # If we got something other than -1 or 0 here, strange things are
        # happening.  Dump a message to msglog so that whatever is wrong
        # can be fixed.
        msglog.log(
            'garbage_collector',msglog.types.INFO,
            '_path_is_parent: Found %s at a weird spot in %s.' % (node,
                                                                  parent)
            )
        return 1
    ##
    # Return a tuple of dict()s describing all the registered PDOs.
    # @note DO NOT MODIFY THE DICT()s IN THE TUPLE!
    def registered_pdo_tuple(self):
        return self._registered
Example #53
0
class CloudConfigurator(CompositeNode):
    def __init__(self, *args, **kw):
        self.secured = True
        self.path = "/cloudconfig"
        self.manager = '/services/Cloud Manager'
        self.security_manager = '/services/Security Manager'
        super(CloudConfigurator, self).__init__(*args, **kw)
    def configure(self, config):
        self.secured = as_boolean(as_internal_node("/services").secured)
        self.setattr('path', config.get('path',self.path))
        self.setattr('manager', config.get('manager','/services/Cloud Manager'))
        super(CloudConfigurator, self).configure(config)
    def configuration(self):
        config = super(CloudConfigurator, self).configuration()
        config['path'] = self.getattr('path')
        config['manager'] = self.getattr('manager')
        config['secured'] = str(int(self.secured))
        return config
    def stop(self):
        if not isinstance(self.manager, str):
            self.manager.dispatcher.unregister(self.sub)
            self.manager = as_node_url(self.manager)
        if not isinstance(self.security_manager, str):
            self.security_manager = as_node_url(self.security_manager)
        return super(CloudConfigurator, self).stop()
    def get_manager(self):
        manager = self.manager
        if self.secured:
            manager = self.security_manager.as_secured_node(manager)
        return manager
    def match(self, path):
        return path.startswith(self.path)
    def start(self):
        self.manager = self.nodespace.as_node(self.manager)
        self.security_manager = as_node(self.security_manager)
        self._pdo = PersistentDataObject(self)
        msg='The CloudConfigurator Persistent Object is in the file :%s' %str(self._pdo.filename())
        msglog.log('CloudConfigurator', msglog.types.INFO,msg)
        if os.path.exists(self._pdo.filename()):
            # Migration 
            msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration in Progress')
            self._pdo.formation = cPickle.dumps(IPickles(self.manager.formation))
            self._pdo.load()
            formation = IPickles(cPickle.loads(self._pdo.formation))()
            msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration for the Formation:%s' %str(formation))
            self.manager.update_formation(formation,None)
            self._pdo.destroy()
            del(self._pdo)
            msglog.log('CloudConfigurator', msglog.types.INFO, 'PDO Migration is Complete')           
        return super(CloudConfigurator, self).start()

    def get_node_names(self):
        formation = self.manager.get_formation()
        norm_formation=self.manager.nformation.normalize_formation(formation)
        ind=norm_formation.index(self.manager.peer)

        # move the peer to be at the head of the list
        p=formation.pop(ind)
        formation.insert(0,p)

        #insert manager at the very first place
        portal=self.manager.get_portal()
        if(portal == None):
            formation.insert(0,"")
        else:
            formation.insert(0,portal)
        return (formation)
 
    def validate(self,name):
        name=name.strip()
        if ( not (valid_ip_address(name) or valid_hostname(name))):
            return(1)
        if(name == 'localhost' ):
            return(1)
        if(name == '127.0.0.1' ):
            return(1)
        
        return(0)
    
    def handle_request(self, request):
        pass

        
    #create_node: name - name of the peer/portal
    #config - type = config["type"] string will tell if this is a "Peer" or a "Portal"
    def create_node(self, name, config=()):
        config = dict(config)
        type = config['type'].lower()
        manager = self.get_manager()
        # Next statements verify access to modifier permitted.
        if type == "peer":
            manager.add_peer
        else:
            manager.set_portal
        config.setdefault("parent", self.manager)
        peer_or_portal = config.setdefault("name", name).strip()
        ret = self.validate(peer_or_portal)
        if(ret != 0 ):
            msg='Add Peer/Portal failed. %s is a invalid hostname/IP Address' %(peer_or_portal)
            raise ValueError(msg)
        if(valid_hostname(peer_or_portal)):
            tmp=get_ip_addr(peer_or_portal)
            if(not valid_ip_address(tmp) ):
                raise ValueError('Cannot resolve the hostname %s. Please try with a valid Hostname' %(peer_or_portal))
        if(type == 'peer'):
            peer=peer_or_portal
            if (self.manager.is_peer_in_formation(peer) == False):
                if(self.manager.is_host_the_portal(peer) == False):
                    msg='Adding %s as a Peer' %str(peer)
                    msglog.log('CloudConfigurator', msglog.types.INFO,msg)
                    # Use possibly secured reference for the add.
                    manager.add_peer(peer)
                else:
                    raise ValueError,'A Portal cannot be a Peer : "%s" is the Portal for the Cloud.' % peer
            else:
                raise ValueError,'Add peer did nothing: "%s" already in Cloud Formation.' % peer
        else:
            portal=peer_or_portal
            if(self.manager.is_host_the_portal(portal) == False):
                if (self.manager.is_peer_in_formation(portal) == False):
                    msg='Setting the Portal as :%s' %str(portal)
                    msglog.log('CloudConfigurator', msglog.types.INFO,msg)
                    # Use possibly secured reference for the modification.
                    manager.set_portal(portal)
                else:
                    raise ValueError,'%s is in the formation. It cannot be added as Portal ' % portal
            else:
                raise ValueError,'Set Portal did nothing: "%s" already the Portal' % portal
        return(peer_or_portal)

    #remove_node: First check if name is manager then check in peer list to delete
    def remove_node(self, name):
        manager = self.get_manager()
        formation=self.manager.get_formation()
        if( name in formation ):
            msg='Removing %s as a Peer' %str(name)
            msglog.log('CloudConfigurator', msglog.types.INFO,msg)
            manager.remove_peer(name)
        else:
            msg='Removing %s as a Portal' %str(name)
            msglog.log('CloudConfigurator', msglog.types.INFO,msg)
            manager.set_portal(None)
        return name
Example #54
0
class ScheduleConfigurator(CompositeNode):
    LOADFAILURE = '"%s" failed to load %s "%s".  Exception follows.'

    def __init__(self, *args):
        self._pdo = None
        self.template = ""
        self._pdo_lock = Lock()
        self.page_template = "/templates/schedules.html"
        super(ScheduleConfigurator, self).__init__(*args)

    def configure(self, config):
        self.setattr('path', config.get('path', '/scheduleconfig'))
        self.secured = as_boolean(as_internal_node("/services").secured)
        security_manager = config.get('security_manager',
                                      '/services/Security Manager')
        self.setattr('security_manager', security_manager)
        super(ScheduleConfigurator, self).configure(config)

    def configuration(self):
        config = super(ScheduleConfigurator, self).configuration()
        config['path'] = self.getattr('path')
        config['security_manager'] = self.getattr('security_manager')
        config['secured'] = str(self.getattr('secured'))
        return config

    def start(self):
        self.nodes = []
        from mpx.service.time.time_zone import TimeZone
        if not IScheduleHolderParent.implementedBy(TimeZone):
            class_implements(TimeZone, IScheduleHolderParent)
        self.security_manager = self.nodespace.as_node(self.security_manager)
        self._pdo_lock.acquire()
        try:
            if not self._pdo:
                self._pdo = PersistentDataObject(self, dmtype=GC_NEVER)
                self._pdo.holders = {}
                self._pdo.schedules = {}
                self._pdo.load()
            holderdicts = self._pdo.holders.values()
            holderdumps = []
            for holderdict in holderdicts:
                holderdumps.extend(holderdict.items())
            # schedules = {'local': {'Holder1': {'Sched1': Sched1Dump}, 'Holder2': ...}, 'UTC': ...}
            scheduledicts = []
            [
                scheduledicts.extend(holderdict.values())
                for holderdict in self._pdo.schedules.values()
            ]
            scheduledumps = []
            for scheduledict in scheduledicts:
                scheduledumps.extend(scheduledict.items())
        finally:
            self._pdo_lock.release()
        super(ScheduleConfigurator, self).start()
        tstart = time.time()
        for holdername, holderdump in holderdumps:
            try:
                self.nodes.append(IPickles(cPickle.loads(holderdump))())
            except:
                message = self.LOADFAILURE % (self.name, 'Schedule Holder',
                                              holdername)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix='Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log(
            'broadway', msglog.types.INFO, 'Schedule Configurator loaded '
            '%s nodes in %s seconds.' % (len(holderdumps), tlapse))

        tstart = time.time()
        for schedulename, scheduledump in scheduledumps:
            try:
                self.nodes.append(IPickles(cPickle.loads(scheduledump))())
            except:
                message = self.LOADFAILURE % (self.name, 'Schedule Holder',
                                              schedulename)
                msglog.log('broadway', msglog.types.ERR, message)
                msglog.exception(prefix='Handled')
        tend = time.time()
        tlapse = tend - tstart
        msglog.log(
            'broadway', msglog.types.INFO, 'Schedule Configurator loaded '
            '%s nodes in %s seconds.' % (len(scheduledumps), tlapse))
        self.template = self.parent.read_resource(self.page_template)

    def stop(self):
        super(ScheduleConfigurator, self).stop()

    def match(self, path):
        return path.startswith(self.path)

    def message(self, message, *args, **kw):
        mtype = kw.get("type", msglog.types.INFO)
        if mtype != msglog.types.DB or self.debug:
            message = message % args
            msglog.log("broadway", mtype, "[%s] %s" % (self.name, message))
            return True
        return False

    def debugout(self, *args, **kw):
        kw.setdefault("type", msglog.types.DB)
        return self.message(*args, **kw)

    def configurenode(self, node, data):
        config = {}
        for fieldname in data.keys():
            prefix, sep, attrname = fieldname.partition('.')
            if sep and prefix == "configure":
                values = map(urllib.unquote_plus, data[fieldname])
                if (attrname == "name"):
                    #make sure the node name starts with RZSched_
                    #if not, append it.
                    if not values[0].startswith("RZSched_"):
                        values[0] = "RZSched_" + values[0]
                config[attrname] = values[0]
        self.message("configuring %s with %s", node, config)
        try:
            node.stop()
        except:
            msglog.exception(prefix='Handled')
        node.configure(config)
        try:
            node.start()
        except:
            msglog.exception(prefix='Handled')
        return node.parent

    def createnode(self, node, data):
        if IScheduleHolderParent.providedBy(node):
            defaultname = "Holder %d"
            nodetype = ScheduleHolder
        elif IScheduleHolder.providedBy(node):
            defaultname = "RZShed_%d"
            nodetype = Schedule
        else:
            message = "createnode() does not know what to create for %s"
            raise TypeError(message % node)
        parentnode = node
        if data.has_key("configure.name"):
            childname = urllib.unquote_plus(data['configure.name'][0])
        else:
            index = len(parentnode.children_nodes()) + 1
            childname = defaultname % index
            while parentnode.has_child(childname):
                index += 1
                childname = defaultname % index
            childname = defaultname % len(parentnode.children_nodes())
        if not childname:
            raise TypeError("Illegal node name: %r" % childname)
        if IScheduleHolder.providedBy(node):
            if childname == '':
                error = "Holder name required"
                raise TypeError(error)
            if not childname.startswith("RZSched_"):
                if childname.lower().startswith("rzsched_"):
                    childname = childname.lower().split("rzsched_")[1]
                childname = "RZSched_" + childname
        config = {
            'name': childname,
            'parent': parentnode,
            'description': '',
            'source': 'ScheduleConfigurator'
        }
        message = "createnode() creating %s type child of %s."
        self.message(message, nodetype.__name__, parentnode)
        childnode = nodetype()
        childnode.configure(config)
        return parentnode

    def holder_parents(self):
        children = as_node("/services/time").children_nodes()
        return filter(IScheduleHolderParent.providedBy, children)

    def holder_parent_names(self):
        return [parent.name for parent in self.holder_parents()]

    def schedule_holders(self, parent=None):
        if parent is None:
            parent = self.nodespace.as_node("/services/time/local")
        return filter(IScheduleHolder.providedBy, parent.children_nodes())

    def schedule_holder_names(self, parent=None):
        return [holder.name for holder in self.schedule_holders(parent)]

    def prunenode(self, node, data):
        self.message("removing %s", as_node_url(node))
        parentnode = node.parent
        self.debugout("parent node %s", as_node_url(parentnode))
        node.prune()
        return parentnode

    def handle_request(self, request):
        response = Response(request)
        data = request.get_post_data_as_dictionary()
        data.update(request.get_query_string_as_dictionary())
        if data.has_key("node"):
            nodeurl = urllib.unquote_plus(data["node"][0])
        else:
            nodeurl = "/services/time/local"
        try:
            adapt = self.__handle_by_context(nodeurl, request, data)
        except TypeError, error:
            msglog.exception(prefix="handled")
            response.send_error(500, str(error))
        else:
Example #55
0
 def __init__(self):
     PersistentDataObject.__init__(self, 'mpx.lib.user._UserDictionary')
     self.users = {}
     self.__lock = Lock()