Example #1
0
class OverrideManager(object):
    def __init__(self):
        self._priority_arrays = PersistentDictionary('OverrideManager')
    
    def get_array(self, nodepath):
        pa = self._priority_arrays.get(nodepath)
        if pa is None:
            pa = {'1':None, '2':None,
                  '3':None, '4':None,
                  '5':None, '6':None,
                  '7':None, '8':None,
                  '9':None, '10':None,
                  '11':None, '12':None,
                  '13':None, '14':None,
                  '15':None, '16':None,
                  '17':None, '18':None}
            self._priority_arrays[nodepath] = pa
        return pa
    
    def notify_changed(self, nodepath, priority_array=None):
        if priority_array is not None:
            self._priority_arrays[nodepath] = priority_array
        self._priority_arrays.notify_changed(nodepath)
    
    def singleton_unload_hook(self):
        pass
Example #2
0
 def start(self):
     filename = '%s (%s)' % (self.name, 'triggers')
     self.manager = self.nodespace.as_node(self.manager)
     self._pdo_lock.acquire()
     try:
         if self._triggers is None:
             self._triggers = PersistentDictionary(filename,
                                                   encode=None,
                                                   decode=None)
         if not self._triggers:
             pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
             if os.path.exists(pdodata.filename()):
                 msglog.log('broadway', msglog.types.INFO,
                            "Migrating previous trigger data.")
                 pdodata.triggers = {}
                 pdodata.load()
                 self._triggers.update(pdodata.triggers)
                 pdodata.destroy()
             del (pdodata)
         self._loadtriggers()
         if self.secured:
             self.security_manager = self.as_node(
                 "/services/Security Manager")
         else:
             self.security_manager = None
     finally:
         self._pdo_lock.release()
     return super(TriggersConfigurator, self).start()
Example #3
0
 def start(self):
     self.managernode = self.as_node(self.manager)
     self.synclock.acquire()
     try:
         alarmsname = '%s (%s)' % (self.name, 'alarms')
         eventsname = '%s (%s)' % (self.name, 'events')
         self.alarms = PersistentDictionary(alarmsname,
                                            encode=self.encode,
                                            decode=self.decode)
         self.events = PersistentDictionary(eventsname,
                                            encode=self.encode,
                                            decode=self.decode)
         # Migrate PDO data from old style persistence.
         pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
         if os.path.exists(pdodata.filename()):
             msglog.log('broadway', msglog.types.INFO,
                        "Migrating previous alarm and event data")
             pdodata.events = {}
             pdodata.alarms = {}
             pdodata.load()
             migrate(pdodata, self.decode)
             self.rebuildstorage()
             pdodata.destroy()
         del(pdodata)
     finally:
         self.synclock.release()
     self.securitymanager = self.as_node('/services/Security Manager')
     
     register = self.managernode.register_for_type
     self.sub = register(self.handle_event, StateEvent)
     self.running.set()
     super(AlarmConfigurator, self).start()
Example #4
0
 def __init__(self):
     # {nodepath:{'node_config':node_config,
     #            'group_config':group_config,
     #            'entity_map':entity_map}}
     self._persisted_data = PersistentDictionary('GSPData')
     self.debug = 1
     self._persist_enabled = False
Example #5
0
 def __init__(self):
     # {nodepath:{'cfg':cfg,
     #            'summary':summary,
     #            'meta':meta,
     #            'properties':properties,
     #            'fail_list':fail_list,
     #            'sync_state':sync_in_progress,
     #            'override':override}}
     self._persisted_data = PersistentDictionary('ScheduleData')
     self.debug = 1
Example #6
0
 def start(self):
     if self.nodes is None:
         dictname = "%s (%s)" % (type(self).__name__, self.name)
         self.nodes = PersistentDictionary(dictname)
         nodeurls = self.nodes.keys()
         nodeurls.sort(pathcompare)
         for nodeurl in nodeurls:
             nodedata = self.nodes[nodeurl]
             factory, configuration = nodedata
             self.create_node(factory, nodeurl, **configuration)
     super(NodeConfigurator, self).start()
Example #7
0
 def start(self):
     try:
         self._pdo_lock.acquire()
         try:
             if self.__running:
                 return
             self.__running = True
             self._trendconfig = PersistentDictionary(filename(self),
                                                      encode=None,
                                                      decode=None)
             if not self._trendconfig:
                 pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                 if os.path.exists(pdodata.filename()):
                     msglog.log('broadway', msglog.types.INFO,
                                "Migrating previous trend data")
                     pdodata.trends = {}
                     pdodata.load()
                     self._trendconfig.update(pdodata.trends)
                 del (pdodata)
         finally:
             self._pdo_lock.release()
         super(TrendManager, self).start()
         self.logger = node.as_internal_node(self.logger_url)
         if self.has_child('trends'):
             self.trends = self.get_child('trends')
         else:
             self.trends = CompositeNode()
             self.trends.configure({'parent': self, 'name': 'trends'})
             self.trends.start()
         corrupt_trends = []
         for trendname, trenddump in self._trendconfig.items():
             msg = "Loading trend: %s" % trendname
             msglog.log('trendmanager', msglog.types.INFO, msg)
             try:
                 trend = unmarshal(trenddump)
             except:
                 corrupt_trends.append(trendname)
                 msg = "Failed to load trend: %s" % trendname
                 msglog.log('trendmanager', msglog.types.ERR, msg)
                 msglog.exception(prefix='Handled')
         for trendname in corrupt_trends:
             try:
                 msg = "Deleting trend information: %s" % trendname
                 msglog.log('trendmanager', msglog.types.INFO, msg)
                 self._delete_trend_configuration(trendname)
                 if self.trends.has_child(trendname):
                     trend = self.trends.get_child(trendname)
                     trend.prune(force=True)
             except:
                 msglog.exception(prefix='Handled')
     except:
         self.__running = False
         raise
     return
Example #8
0
 def start(self):
     self._begin_critical_section()
     try:
         if self._sessions is None:
             self._sessions = PersistentDictionary(self.name,
                                                   encode=Session.encode,
                                                   decode=Session.decode)
         if not self._scheduled:
             self._scheduled = scheduler.every(self.ttl, self.collect)
     finally:
         self._end_critical_section()
     self.user_manager = as_node("/services/User Manager")
     return ServiceNode.start(self)
Example #9
0
 def __init__(self):
     # {nodepath:{'node_config':node_config, 
     #            'group_config':group_config, 
     #            'entity_map':entity_map}}
     self._persisted_data = PersistentDictionary('GSPData')
     self.debug = 1
     self._persist_enabled = False
Example #10
0
 def start(self):
     filename = '%s (%s)' % (self.name, 'triggers')
     self.manager = self.nodespace.as_node(self.manager)
     self._pdo_lock.acquire()
     try:
         if self._triggers is None:
             self._triggers = PersistentDictionary(
                 filename, encode=None, decode=None)
         if not self._triggers:
             pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
             if os.path.exists(pdodata.filename()):
                 msglog.log('broadway', msglog.types.INFO, 
                            "Migrating previous trigger data.")
                 pdodata.triggers = {}
                 pdodata.load()
                 self._triggers.update(pdodata.triggers)
                 pdodata.destroy()
             del(pdodata)
         self._loadtriggers()
         if self.secured:
             self.security_manager = self.as_node("/services/Security Manager")
         else:
             self.security_manager = None
     finally: 
         self._pdo_lock.release()
     return super(TriggersConfigurator, self).start()
Example #11
0
 def __init__(self):
     # {nodepath:{'cfg':cfg, 
     #            'summary':summary, 
     #            'meta':meta, 
     #            'properties':properties,
     #            'fail_list':fail_list,
     #            'sync_state':sync_in_progress,
     #            'override':override}}
     self._persisted_data = PersistentDictionary('ScheduleData')
     self.debug = 1
Example #12
0
 def start(self):
     if self.nodes is None:
         dictname = "%s (%s)" % (type(self).__name__, self.name)
         self.nodes = PersistentDictionary(dictname)
         nodeurls = self.nodes.keys()
         nodeurls.sort(pathcompare)
         for nodeurl in nodeurls:
             nodedata = self.nodes[nodeurl]
             factory,configuration = nodedata
             self.create_node(factory, nodeurl, **configuration)
     super(NodeConfigurator, self).start()
Example #13
0
 def start(self):
     self._begin_critical_section()
     try:
         if self._sessions is None:
             self._sessions = PersistentDictionary(self.name, 
                                                   encode=Session.encode, 
                                                   decode=Session.decode)
         if not self._scheduled:
             self._scheduled = scheduler.every(self.ttl, self.collect)
     finally:
         self._end_critical_section()
     self.user_manager = as_node("/services/User Manager")
     return ServiceNode.start(self)
Example #14
0
 def start(self):
     try:
         self._pdo_lock.acquire()
         try:
             if self.__running:
                 return
             self.__running = True
             self._trendconfig = PersistentDictionary(filename(self), encode=None, decode=None)
             if not self._trendconfig:
                 pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                 if os.path.exists(pdodata.filename()):
                     msglog.log("broadway", msglog.types.INFO, "Migrating previous trend data")
                     pdodata.trends = {}
                     pdodata.load()
                     self._trendconfig.update(pdodata.trends)
                 del (pdodata)
         finally:
             self._pdo_lock.release()
         super(TrendManager, self).start()
         self.logger = node.as_internal_node(self.logger_url)
         if self.has_child("trends"):
             self.trends = self.get_child("trends")
         else:
             self.trends = CompositeNode()
             self.trends.configure({"parent": self, "name": "trends"})
             self.trends.start()
         corrupt_trends = []
         for trendname, trenddump in self._trendconfig.items():
             msg = "Loading trend: %s" % trendname
             msglog.log("trendmanager", msglog.types.INFO, msg)
             try:
                 trend = unmarshal(trenddump)
             except:
                 corrupt_trends.append(trendname)
                 msg = "Failed to load trend: %s" % trendname
                 msglog.log("trendmanager", msglog.types.ERR, msg)
                 msglog.exception(prefix="Handled")
         for trendname in corrupt_trends:
             try:
                 msg = "Deleting trend information: %s" % trendname
                 msglog.log("trendmanager", msglog.types.INFO, msg)
                 self._delete_trend_configuration(trendname)
                 if self.trends.has_child(trendname):
                     trend = self.trends.get_child(trendname)
                     trend.prune(force=True)
             except:
                 msglog.exception(prefix="Handled")
     except:
         self.__running = False
         raise
     return
Example #15
0
 def start(self):
     if self.is_running():
         raise TypeError("Equipment Monitor already running.")
     if TESTING and not self.test_machines:
         self.test_machines = setup_machines()
         machinecount = len(self.test_machines)
         self.debugout("Setup %d test machines" % machinecount)
     self.synclock.acquire()
     try:
         self.running.set()
         if self.subscriptions and not self.subscriptions.closed():
             self.subscriptions.close()
         self.formatter = None
         self.transporter = None
         children = self.children_nodes()
         for childnode in children:
             if IFormatter.providedBy(childnode):
                 if self.formatter is not None:
                     raise TypeError("Already has formatter child.")
                 self.formatter = childnode
             if ITransporter.providedBy(childnode):
                 if self.transporter is not None:
                     raise TypeError("Already has transporter child.")
                 self.transporter = childnode
         if not self.formatter:
             raise TypeError("Must have one formatter child node.")
         if not self.transporter:
             raise TypeError("Must have one transporter child node.")
         self.smservice = as_node(self.smnodeurl)
         self.subscriptions = PersistentDictionary(
             self.name,
             encode=self.serialize_subscription,
             decode=self.unserialize_subscription)
         pdodata = PersistentDataObject(self)
         if os.path.exists(pdodata.filename()):
             msglog.log('broadway', msglog.types.WARN,
                        "Equipment Monitor upgrading persistence.")
             migrate = frompdo(pdodata)
             self.subscriptions.update(migrate)
             message = "Equipment Monitor merged %d subscriptions."
             message = message % len(migrate)
             msglog.log('broadway', msglog.types.INFO, message)
             pdodata.destroy()
             msglog.log('broadway', msglog.types.WARN,
                        "Equipment Monitor destroyed old persistence.")
             msglog.log('broadway', msglog.types.INFO,
                        "Equipment Monitor persistence upgrade complete.")
         del (pdodata)
         message = 'Equipment Monitor startup: %s %s'
         for subscription in self.subscriptions.values():
             try:
                 subscription.setup_subscription()
             except:
                 msglog.exception(prefix="handled")
             else:
                 self.debugout(message % ('setup', subscription))
         skipcounts = []
         for i in range(0, 1 + len(self.subscriptions) / 30):
             skipcounts.extend([i + 1] * 30)
         self.setup_work_threads()
         for subscription in self.subscriptions.values():
             try:
                 subscription.start(skipcounts.pop())
             except:
                 msglog.exception(prefix="Handled")
             else:
                 self.debugout(message % ('started', subscription))
     except:
         self.cleanup_resources()
         self.running.clear()
         raise
     finally:
         self.synclock.release()
     super(EquipmentMonitor, self).start()
Example #16
0
class PersistanceManager(object):
    def __init__(self):
        # {nodepath:{'node_config':node_config, 
        #            'group_config':group_config, 
        #            'entity_map':entity_map}}
        self._persisted_data = PersistentDictionary('GSPData')
        self.debug = 1
        self._persist_enabled = False
        
    def message(self, message, mtype=msglog.types.INFO, level=1):
        if self.debug >= level:
            msglog.log('Global Setpoint Manager', mtype, message)
        
    def persist_enabled(self):
        return self._persist_enabled
    
    def enable_persist(self):
        self._persist_enabled = True
        
    def disable_persist(self):
        self._persist_enabled = False
        
    def get_gsp_groups(self):
        groups = self._persisted_data.keys()
        groups.sort(lambda a,b: cmp(a.count('/'), b.count('/')))
        return groups
        
    def get_gsp_group(self, nodepath):
        return self._persisted_data[normalize_nodepath(nodepath)]
    
    def put_gsp_group(self, nodepath, nodedata):
        if not self.persist_enabled():
            return
        nodepath = normalize_nodepath(nodepath)
        if not self._persisted_data.has_key(nodepath):
            # create default configuration.
            data = {'node_config':{},
                    'group_config':[],
                    'entity_map':{},
                    'node_factory':''}
            self._persisted_data[nodepath] = data
        self.put_gsp_group_data(nodepath, nodedata)
        
    def remove_gsp_group(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        if self._persisted_data.has_key(nodepath):
            del self._persisted_data[nodepath]
            
    def get_gsp_group_data(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        return self._persisted_data[nodepath] 
    
    def put_gsp_group_data(self, nodepath, nodedata):
        nodepath = normalize_nodepath(nodepath)
        for data_key in self._persisted_data[nodepath].keys():
            value = nodedata.get(data_key)
            if value is not None:
                self._put_entry(nodepath, data_key, value)
    
    def get_gsp_group_nconfig(self, nodepath):
        # node configuration data
        return self._get_entry(nodepath, 'node_config')
    
    def put_gsp_group_nconfig(self, nodepath, value):
        # node configuration data
        self._put_entry(nodepath, 'node_config', value)
    
    def get_gsp_group_gconfig(self, nodepath):
        # gsp group configuration data
        return self._get_entry(nodepath, 'group_config')
    
    def putt_gsp_group_gconfig(self, nodepath, value):
        # gsp group configuration data
        self._put_entry(nodepath, 'group_config', value)
    
    def get_gsp_group_entity_map(self, nodepath):
        return self._get_entry(nodepath, 'entity_map')
    
    def put_gsp_group_entity_map(self, nodepath, value):
        self._put_entry(nodepath, 'entity_map', value)
        
    def _get_entry(self, nodepath, data_type):
        return self.get_gsp_group(normalize_nodepath(nodepath))[data_type]
        
    def _put_entry(self, nodepath, data_type, value):
        if not self.persist_enabled():
            return
        nodepath = normalize_nodepath(nodepath)
        group = self.get_gsp_group(nodepath)
        assert group, \
        'A group must exist before data can be stored against it.'
        group[data_type] = value
        self._persisted_data.notify_changed(nodepath)        
            
    def singleton_unload_hook(self):
        pass
Example #17
0
class EquipmentMonitor(CompositeNode):
    implements(IEquipmentMonitor)

    def __init__(self, *args):
        self.test_machines = []
        self.synclock = RLock()
        self.threadcount = 1
        self.formatter = None
        self.transporter = None
        self.smservice = None
        self.subscriptions = None
        self.running = Flag()
        self.work_threads = []
        self.work_queue = Queue()
        self.scheduling_lock = Lock()
        self.execution_groups = Dictionary()
        self.smnodeurl = '/services/Subscription Manager'
        super(EquipmentMonitor, self).__init__(*args)

    def configure(self, config):
        self.smnodeurl = config.get('subscription_manager', self.smnodeurl)
        self.threadcount = int(config.get('threadcount', self.threadcount))
        super(EquipmentMonitor, self).configure(config)

    def configuration(self):
        config = super(EquipmentMonitor, self).configuration()
        config['subscription_manager'] = self.smnodeurl
        config['threadcount'] = str(self.threadcount)
        return config

    def start(self):
        if self.is_running():
            raise TypeError("Equipment Monitor already running.")
        if TESTING and not self.test_machines:
            self.test_machines = setup_machines()
            machinecount = len(self.test_machines)
            self.debugout("Setup %d test machines" % machinecount)
        self.synclock.acquire()
        try:
            self.running.set()
            if self.subscriptions and not self.subscriptions.closed():
                self.subscriptions.close()
            self.formatter = None
            self.transporter = None
            children = self.children_nodes()
            for childnode in children:
                if IFormatter.providedBy(childnode):
                    if self.formatter is not None:
                        raise TypeError("Already has formatter child.")
                    self.formatter = childnode
                if ITransporter.providedBy(childnode):
                    if self.transporter is not None:
                        raise TypeError("Already has transporter child.")
                    self.transporter = childnode
            if not self.formatter:
                raise TypeError("Must have one formatter child node.")
            if not self.transporter:
                raise TypeError("Must have one transporter child node.")
            self.smservice = as_node(self.smnodeurl)
            self.subscriptions = PersistentDictionary(
                self.name,
                encode=self.serialize_subscription,
                decode=self.unserialize_subscription)
            pdodata = PersistentDataObject(self)
            if os.path.exists(pdodata.filename()):
                msglog.log('broadway', msglog.types.WARN,
                           "Equipment Monitor upgrading persistence.")
                migrate = frompdo(pdodata)
                self.subscriptions.update(migrate)
                message = "Equipment Monitor merged %d subscriptions."
                message = message % len(migrate)
                msglog.log('broadway', msglog.types.INFO, message)
                pdodata.destroy()
                msglog.log('broadway', msglog.types.WARN,
                           "Equipment Monitor destroyed old persistence.")
                msglog.log('broadway', msglog.types.INFO,
                           "Equipment Monitor persistence upgrade complete.")
            del (pdodata)
            message = 'Equipment Monitor startup: %s %s'
            for subscription in self.subscriptions.values():
                try:
                    subscription.setup_subscription()
                except:
                    msglog.exception(prefix="handled")
                else:
                    self.debugout(message % ('setup', subscription))
            skipcounts = []
            for i in range(0, 1 + len(self.subscriptions) / 30):
                skipcounts.extend([i + 1] * 30)
            self.setup_work_threads()
            for subscription in self.subscriptions.values():
                try:
                    subscription.start(skipcounts.pop())
                except:
                    msglog.exception(prefix="Handled")
                else:
                    self.debugout(message % ('started', subscription))
        except:
            self.cleanup_resources()
            self.running.clear()
            raise
        finally:
            self.synclock.release()
        super(EquipmentMonitor, self).start()

    def stop(self):
        if not self.is_running():
            raise TypeError('Equipment Monitor not running.')
        self.synclock.acquire()
        try:
            self.running.clear()
            message = "Equipment Monitor shutdown: %s %s"
            for subscription in self.subscriptions.values():
                try:
                    subscription.stop()
                except:
                    msglog.exception(prefix='Handled')
                else:
                    self.debugout(message % ('stopped', subscription))
            self.teardown_work_threads()
        except:
            message = "Exception caused Eqiupment Monitor shutdown to fail."
            msglog.log('broadway', msglog.types.ERR, message)
            self.running.set()
            raise
        else:
            self.cleanup_resources()
        finally:
            self.synclock.release()
        super(EquipmentMonitor, self).stop()

    def get_subscription(self, sid, default=None):
        return self.subscriptions.get(sid, default)

    def get_subscription_manager(self):
        return self.smservice

    def get_formatter(self):
        return self.formatter

    def get_transporter(self):
        return self.transporter

    def schedule_subscription(self, subscription, timestamp):
        self.scheduling_lock.acquire()
        try:
            schedulegroup = self.execution_groups.get(timestamp)
            if schedulegroup is None:
                schedulegroup = SubscriptionGroup(self, timestamp)
                self.execution_groups[timestamp] = schedulegroup
                schedulegroup.scheduled = scheduler.at(timestamp,
                                                       schedulegroup.execute)
            schedentry = schedulegroup.add_subscription(subscription)
        finally:
            self.scheduling_lock.release()
        return schedentry

    def enqueue_work(self, callback, *args):
        self.work_queue.put((callback, args))

    def dequeue_work(self, blocking=True):
        return self.work_queue.get(blocking)

    def is_running(self):
        return self.running.isSet()

    def assert_running(self):
        if not self.is_running():
            raise TypeError('Service must be running.')
        return

    def create_pushed(self, target, node_table, period=2, retries=10):
        self.assert_running()
        pushed = PushedSubscription(self, target, node_table, period, retries)
        sid = pushed.setup_subscription()
        self.subscriptions[sid] = pushed
        message = ['Equipment Monitor created subscription: ']
        message.append('Target URL: %s' % target)
        message.append('Period: %d sec' % period)
        message.append('Subscription ID: %s' % sid)
        if isinstance(node_table, str):
            message.append('Subscription for children of: %s' % node_table)
        else:
            firstthree = node_table.items()[0:3]
            message.append('Number of nodes: %d' % len(node_table))
            message.append('First three nodes: %s' % (firstthree, ))
        self.debugout('\n    '.join(message), 2)
        pushed.start(1)
        return sid

    def cancel(self, sid):
        self.assert_running()
        if self.pause(sid):
            subscription = self.subscriptions.pop(sid)
            message = 'Equipment Monitor cancelled subscription: "%s"'
            self.debugout(message % sid, 2)
            return True
        return False

    def pause(self, sid, delay=None):
        subscription = self.subscriptions.get(sid)
        if subscription and subscription.is_running():
            subscription.stop()
            return True
        else:
            return False

    def play(self, sid):
        self.assert_running()
        subscription = self.subscriptions[sid]
        if not subscription.is_running():
            subscription.start()
            return True
        else:
            return False

    def reset(self, sid):
        subscription = self.subscriptions.get(sid)
        if subscription:
            subscription.reset_subscription()
            return True
        else:
            return False

    def list_subscriptions(self):
        return self.subscriptions.keys()

    def notify_group_executed(self, group):
        self.scheduling_lock.acquire()
        try:
            self.execution_groups.pop(group.timestamp)
        finally:
            self.scheduling_lock.release()

    def cleanup_resources(self):
        self.synclock.acquire()
        try:
            for group in self.execution_groups:
                try:
                    group.scheduled.cancel()
                except:
                    msglog.exception(prefix="handled")
            self.execution_groups.clear()
            try:
                while self.work_queue.get_nowait():
                    pass
            except Empty:
                pass
            if self.transporter:
                commonitor = self.transporter.monitor
                transmanager = self.transporter.transaction_manager
                try:
                    commonitor.shutdown_channels()
                except:
                    msglog.exception(prefix="handled")
                transmanager.controllers.clear()
            if self.subscriptions and not self.subscriptions.closed():
                self.subscriptions.close()
            self.subscriptions = None
            self.transporter = None
            self.formatter = None
        finally:
            self.synclock.release()

    def setup_work_threads(self):
        assert self.is_running()
        assert len(self.work_threads) == 0
        while len(self.work_threads) < self.threadcount:
            monitor = WorkThread(self.is_running, self.dequeue_work)
            monitor.setDaemon(True)
            monitor.start()
            self.work_threads.append(monitor)
        return len(self.work_threads)

    def teardown_work_threads(self):
        assert not self.is_running()
        threadcount = len(self.work_threads)
        map(self.work_queue.put, [None] * threadcount)
        while self.work_threads:
            self.work_threads.pop().join()
        return threadcount

    def serialize_subscription(self, subscription):
        return repr(subscription.as_dictionary())

    def unserialize_subscription(self, data):
        return PushedSubscription.from_dictionary(eval(data))

    def debugout(self, dbmessage, dblevel=1):
        if dblevel <= DEBUG:
            msglog.log('broadway', msglog.types.DB, dbmessage)
Example #18
0
class PersistanceManager(object):
    def __init__(self):
        # {nodepath:{'cfg':cfg, 
        #            'summary':summary, 
        #            'meta':meta, 
        #            'properties':properties,
        #            'fail_list':fail_list,
        #            'sync_state':sync_in_progress,
        #            'override':override}}
        self._persisted_data = PersistentDictionary('ScheduleData')
        self.debug = 1
        
    def message(self, message, mtype=msglog.types.INFO, level=1):
        if self.debug >= level:
            msglog.log('Scheduler', mtype, message)
        
    def get_scheds(self):
        scheds = self._persisted_data.keys()
        scheds.sort(sched_sort)
        return scheds
        
    def get_sched(self, nodepath):
        return self._persisted_data[normalize_nodepath(nodepath)]
    
    def put_sched(self, nodepath, cfg):
        nodepath = normalize_nodepath(nodepath)
        if not self._persisted_data.has_key(nodepath):
            # create default configuration.
            pdata = {'cfg':{},
                     'summary':[[], [], [], 'exceptions'],
                     'meta':{},
                     'properties':[],
                     'fail_list':[],
                     'sync_state':False,
                     'override':False}
            self._persisted_data[nodepath] = pdata
        self.put_sched_cfg(nodepath, cfg)
        
    def remove_sched(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        if self._persisted_data.has_key(nodepath):
            for sched in self.get_scheds():
                if sched.startswith(nodepath):
                    del self._persisted_data[sched]
        else:
            msg = 'Error removing non-existent schedule %s from persistent data.' \
                % nodepath
            self.message(msg)
                         
    def move_sched(self, source, destination, cfg, is_rename=False):
        source = normalize_nodepath(source)
        destination = normalize_nodepath(destination)
        for sched in self.get_scheds():
            if not sched.startswith(source):
                continue
            data = self._persisted_data[sched]
            del self._persisted_data[sched]
            if sched == source:
                # rename
                if is_rename:
                    newsched = destination
                else:
                    newsched = sched.replace(source, destination) + source.split('/')[-2] + '/'
                oldroot = sched
                newroot = newsched
                self._persisted_data[newsched] = data 
                # prior to persisting, the schedule should have been moved
                # within the nodetree.  We grab and persist the latest configuration.
                # This put call will also ensure sync to disk to takes place.
                self.put_sched_cfg(newsched, cfg)
            else:
                newsched = normalize_nodepath(sched.replace(oldroot, newroot)) #+ sched_name + '/'
                self._persisted_data[newsched] = data 
                self.put_sched_cfg(newsched, serialize_node(as_node(newsched)))
                    
    def get_sched_cfg(self, nodepath):
        return self._get_entry('cfg', nodepath)
    
    def put_sched_cfg(self, nodepath, cfg):
        self._put_entry('cfg', nodepath, cfg)
        
    def get_sched_summary(self, nodepath):
        return self._get_entry('summary', nodepath)
    
    def put_sched_summary(self, nodepath, summary):
        self._put_entry('summary', nodepath, summary)
        
    def get_sched_props(self, nodepath):
        return self._get_entry('properties', nodepath)
    
    def put_sched_props(self, nodepath, properties):
        self._put_entry('properties', nodepath, properties)
                    
    def get_sched_meta(self, nodepath):
        return self._get_entry('meta', nodepath)
    
    def put_sched_meta(self, nodepath, meta):
        self._put_entry('meta', nodepath, meta)
                
    def get_fail_list(self, nodepath):
        return self._get_entry('fail_list', nodepath)
    
    def put_fail_list(self, nodepath, fail_list):
        self._put_entry('fail_list', nodepath, fail_list)
        
    def get_sync_state(self, nodepath):
        return self._get_entry('sync_state', nodepath)
    
    def put_sync_state(self, nodepath, sync_state):
        self._put_entry('sync_state', nodepath, sync_state)
        
    def get_override(self, nodepath):
        return self._get_entry('override', nodepath)
    
    def put_override(self, nodepath, override):
        self._put_entry('override', nodepath, override)
        
    def _get_entry(self, ptype, nodepath):
        return self.get_sched(normalize_nodepath(nodepath))[ptype]
        
    def _put_entry(self, ptype, nodepath, value):
        nodepath = normalize_nodepath(nodepath)
        sched = self.get_sched(nodepath)
        assert sched, \
        'A schedule must exist before data can be stored against it.'
        sched[ptype] = value
        self._persisted_data.notify_changed(nodepath)        
            
    def singleton_unload_hook(self):
        pass
Example #19
0
class SessionManager(ServiceNode):
    import string
    IDCHARS = string.ascii_letters + string.digits 
    NCHARS  = len(IDCHARS)
    IDLEN = 20
    ETC_DIR=properties.ETC_DIR
    def __init__(self):
        self.ttl = 3600
        self._lock = Lock()
        self._sessions = None
        self._scheduled = None
        self.user_manager = None
        ServiceNode.__init__(self)
    def _begin_critical_section(self):
        self._lock.acquire()
    def _end_critical_section(self):
        self._lock.release()
    def _random_id(self):
        return str(UUID())
    def _next_session_id(self):
        sid = self._random_id()
        while self._sessions.has_key(sid):
            sid = self._random_id()
        return sid
    def start(self):
        self._begin_critical_section()
        try:
            if self._sessions is None:
                self._sessions = PersistentDictionary(self.name, 
                                                      encode=Session.encode, 
                                                      decode=Session.decode)
            if not self._scheduled:
                self._scheduled = scheduler.every(self.ttl, self.collect)
        finally:
            self._end_critical_section()
        self.user_manager = as_node("/services/User Manager")
        return ServiceNode.start(self)
    def stop(self):
        self._begin_critical_section()
        try:
            if self._scheduled:
                self._scheduled.cancel()
            self._scheduled = None
            self._sessions = None
        finally:
            self._end_critical_section()
        self.user_manager = None
        return ServiceNode.stop(self)
    def configure(self, cd):
        ServiceNode.configure(self, cd)
        set_attribute(self, 'ttl', self.ttl, cd, float)
        self.enabled = 1
    def configuration(self):
        cd = ServiceNode.configuration(self)
        get_attribute(self, 'ttl', cd, str)
        return cd
    ##
    # @fixme Use mpx.lib.security.User, as soon as it exists.
    # @fixme Cache mediator users...
    # @param nocheck Since we do not use shadow pass, the check can fail
    #                This is used to allow for bypass of check (for testing)
    # @exception ESessionDenied Raised when the SessionManager rejects
    #            creating the session because the request is not valid.
    #            In other words, the username or password are incorrect
    #            or there is some other aspect of the request which is
    #            not acceptable to the SessionManager.
    def create(self, user, password=None):
        if not isinstance(user, User):
            if properties.get_boolean('PAM_ENABLE'):
                authenticator = self.user_manager.user_from_pam
            else:
                authenticator = self.user_manager.user_from_cleartext
            try:
                user = authenticator(user, password)
            except EAuthenticationFailed:
                raise ESessionDenied("User credentials invalid.")
        self._begin_critical_section()
        try:
            sid = self._next_session_id()
            username = None
            if isinstance(user, User): username = user.name
            if isinstance(user, _User): username = user.name()
            self._sessions[sid] = Session(session_id=sid, 
                                          ttl=self.ttl, 
                                          username=username, 
                                          password=password)
        finally:
            self._end_critical_section()
        return sid
    ##
    # Immediately invalidate a session.
    # @param session_id The string that identifies the session to invalidate.
    def destroy(self, session_id):
        self._begin_critical_section()
        try:
            removed = self._sessions.pop(session_id)
            del self._sessions[session_id]
        except KeyError:
            removed = False
        finally:
            self._end_critical_section()
        return removed
    ##
    # Checks if a session_id is in the list of valid sessions.
    # @param session_id The string that identifies the session.
    # @param touch If true, and the session exists, then the session's
    #        last_access time will be updated.
    # @return True if the session_id is currently valid.
    # @note The implementation assumes that if a session_id is in the list of
    #       managed sessions, then it is valid.  It is the responsibility of
    #       the "auto collection" mechanism to remove expired sessions in a
    #       timely fashon.
    def validate(self, session_id, touch=0):
        self._begin_critical_section()
        try:
            session = self._sessions.get(session_id)
            if session and session.valid():
                if touch:
                    session.touch()
                valid = True
            else:
                valid = False
        finally:
            self._end_critical_section()
        return valid
    ##
    # Scan all managed sessions for expired sessions.
    # @return The number of expired sessions invalidated by this invocation.
    def collect(self):
        sessions = self._sessions
        self._begin_critical_section()
        try:
            sids = [sid for sid,ses in sessions.items() if not ses.valid()]
        finally:
            self._end_critical_section()
        for sid in sids:
            self.destroy(sid)
        return sids
    ##
    # Look up the user associated with a session
    # @return A string representing the user associated with this session.
    def get_user_from_sid(self, sid):
        user = None
        session = self._sessions.get(str(sid))
        if session:
            try:
                user = self.user_manager.get_user(session.username)
            except:
                msglog.exception(prefix="handled")
        return user

    ##
    # Look up the user associated with a session
    # @return True or False according to the user existence in session.
    def is_user_active(self, username):
        for sid in self._sessions:
            if self._sessions[sid].username == username:
                return True
        return False
Example #20
0
class EquipmentMonitor(CompositeNode):
    implements(IEquipmentMonitor)
    def __init__(self, *args):
        self.test_machines = []
        self.synclock = RLock()
        self.threadcount = 1
        self.formatter = None
        self.transporter = None
        self.smservice = None
        self.subscriptions = None
        self.running = Flag()
        self.work_threads = []
        self.work_queue = Queue()
        self.scheduling_lock = Lock()
        self.execution_groups = Dictionary()
        self.smnodeurl = '/services/Subscription Manager'
        super(EquipmentMonitor, self).__init__(*args)
    def configure(self, config):
        self.smnodeurl = config.get('subscription_manager', self.smnodeurl)
        self.threadcount = int(config.get('threadcount', self.threadcount))
        super(EquipmentMonitor, self).configure(config)
    def configuration(self):
        config = super(EquipmentMonitor, self).configuration()
        config['subscription_manager'] = self.smnodeurl
        config['threadcount'] = str(self.threadcount)
        return config
    def start(self):
        if self.is_running():
            raise TypeError("Equipment Monitor already running.")
        if TESTING and not self.test_machines:
            self.test_machines = setup_machines()
            machinecount = len(self.test_machines)
            self.debugout("Setup %d test machines" % machinecount)
        self.synclock.acquire()
        try:
            self.running.set()
            if self.subscriptions and not self.subscriptions.closed():
                self.subscriptions.close()
            self.formatter = None
            self.transporter = None
            children = self.children_nodes()
            for childnode in children:
                if IFormatter.providedBy(childnode):
                    if self.formatter is not None:
                        raise TypeError("Already has formatter child.")
                    self.formatter = childnode
                if ITransporter.providedBy(childnode):
                    if self.transporter is not None:
                        raise TypeError("Already has transporter child.")
                    self.transporter = childnode
            if not self.formatter:
                raise TypeError("Must have one formatter child node.")
            if not self.transporter:
                raise TypeError("Must have one transporter child node.")
            self.smservice = as_node(self.smnodeurl)
            self.subscriptions = PersistentDictionary(
                self.name, encode=self.serialize_subscription, 
                decode=self.unserialize_subscription)
            pdodata = PersistentDataObject(self)
            if os.path.exists(pdodata.filename()):
                msglog.log('broadway', msglog.types.WARN, 
                           "Equipment Monitor upgrading persistence.")
                migrate = frompdo(pdodata)
                self.subscriptions.update(migrate)
                message = "Equipment Monitor merged %d subscriptions."
                message = message % len(migrate)
                msglog.log('broadway', msglog.types.INFO, message)
                pdodata.destroy()
                msglog.log('broadway', msglog.types.WARN, 
                           "Equipment Monitor destroyed old persistence.")
                msglog.log('broadway', msglog.types.INFO, 
                           "Equipment Monitor persistence upgrade complete.")
            del(pdodata)
            message = 'Equipment Monitor startup: %s %s'
            for subscription in self.subscriptions.values():
                try:
                    subscription.setup_subscription()
                except:
                    msglog.exception(prefix="handled")
                else:
                    self.debugout(message % ('setup', subscription))
            skipcounts = []
            for i in range(0, 1 + len(self.subscriptions) / 30):
                skipcounts.extend([i + 1] * 30)
            self.setup_work_threads()
            for subscription in self.subscriptions.values():
                try: 
                    subscription.start(skipcounts.pop())
                except: 
                    msglog.exception(prefix = "Handled")        
                else:
                    self.debugout(message % ('started', subscription))
        except:
            self.cleanup_resources()
            self.running.clear()
            raise
        finally:
            self.synclock.release()
        super(EquipmentMonitor, self).start()
    def stop(self):
        if not self.is_running():
            raise TypeError('Equipment Monitor not running.')
        self.synclock.acquire()
        try:
            self.running.clear()
            message = "Equipment Monitor shutdown: %s %s"
            for subscription in self.subscriptions.values():
                try: 
                    subscription.stop()
                except: 
                    msglog.exception(prefix='Handled')
                else:
                    self.debugout(message % ('stopped', subscription))
            self.teardown_work_threads()
        except:
            message = "Exception caused Eqiupment Monitor shutdown to fail."
            msglog.log('broadway', msglog.types.ERR, message)
            self.running.set()
            raise
        else:
            self.cleanup_resources()
        finally:
            self.synclock.release()
        super(EquipmentMonitor, self).stop()
    def get_subscription(self, sid, default = None):
        return self.subscriptions.get(sid, default)
    def get_subscription_manager(self):
        return self.smservice
    def get_formatter(self):
        return self.formatter
    def get_transporter(self):
        return self.transporter
    def schedule_subscription(self, subscription, timestamp):
        self.scheduling_lock.acquire()
        try:
            schedulegroup = self.execution_groups.get(timestamp)
            if schedulegroup is None:
                schedulegroup = SubscriptionGroup(self, timestamp)
                self.execution_groups[timestamp] = schedulegroup
                schedulegroup.scheduled = scheduler.at(
                    timestamp, schedulegroup.execute)
            schedentry = schedulegroup.add_subscription(subscription)
        finally:
            self.scheduling_lock.release()
        return schedentry
    def enqueue_work(self, callback, *args):
        self.work_queue.put((callback, args))
    def dequeue_work(self, blocking = True):
        return self.work_queue.get(blocking)
    def is_running(self):
        return self.running.isSet()
    def assert_running(self):
        if not self.is_running():
            raise TypeError('Service must be running.')
        return
    def create_pushed(self, target, node_table, period=2, retries=10):
        self.assert_running()
        pushed = PushedSubscription(self, target, node_table, period, retries)
        sid = pushed.setup_subscription()
        self.subscriptions[sid] = pushed
        message = ['Equipment Monitor created subscription: ']
        message.append('Target URL: %s' % target)
        message.append('Period: %d sec' % period)
        message.append('Subscription ID: %s' % sid)
        if isinstance(node_table, str):
            message.append('Subscription for children of: %s' % node_table)
        else:
            firstthree = node_table.items()[0:3]
            message.append('Number of nodes: %d' % len(node_table))
            message.append('First three nodes: %s' % (firstthree,))
        self.debugout('\n    '.join(message), 2)
        pushed.start(1)
        return sid
    def cancel(self, sid):
        self.assert_running()
        if self.pause(sid):
            subscription = self.subscriptions.pop(sid)
            message = 'Equipment Monitor cancelled subscription: "%s"'
            self.debugout(message % sid, 2)
            return True
        return False
    def pause(self, sid, delay = None):
        subscription = self.subscriptions.get(sid)
        if subscription and subscription.is_running():
            subscription.stop()
            return True
        else:
            return False
    def play(self, sid):
        self.assert_running()
        subscription = self.subscriptions[sid]
        if not subscription.is_running():
            subscription.start()
            return True
        else:
            return False
    def reset(self, sid):
        subscription = self.subscriptions.get(sid)
        if subscription:
            subscription.reset_subscription()
            return True
        else:
            return False
    def list_subscriptions(self):
        return self.subscriptions.keys()
    def notify_group_executed(self, group):
        self.scheduling_lock.acquire()
        try:
            self.execution_groups.pop(group.timestamp)
        finally:
            self.scheduling_lock.release()
    def cleanup_resources(self):
        self.synclock.acquire()
        try:
            for group in self.execution_groups:
                try: 
                    group.scheduled.cancel()
                except:
                    msglog.exception(prefix="handled")
            self.execution_groups.clear()
            try:
                while self.work_queue.get_nowait():
                    pass
            except Empty:
                pass
            if self.transporter:
                commonitor = self.transporter.monitor
                transmanager = self.transporter.transaction_manager
                try:
                    commonitor.shutdown_channels()
                except:
                    msglog.exception(prefix="handled")
                transmanager.controllers.clear()
            if self.subscriptions and not self.subscriptions.closed():            
                self.subscriptions.close()
            self.subscriptions = None
            self.transporter = None
            self.formatter = None
        finally:
            self.synclock.release()
    def setup_work_threads(self):
        assert self.is_running()
        assert len(self.work_threads) == 0
        while len(self.work_threads) < self.threadcount:
            monitor = WorkThread(self.is_running, self.dequeue_work)
            monitor.setDaemon(True)
            monitor.start()
            self.work_threads.append(monitor)
        return len(self.work_threads)
    def teardown_work_threads(self):
        assert not self.is_running()
        threadcount = len(self.work_threads)
        map(self.work_queue.put, [None] * threadcount)
        while self.work_threads:
            self.work_threads.pop().join()
        return threadcount
    def serialize_subscription(self, subscription):
        return repr(subscription.as_dictionary())
    def unserialize_subscription(self, data):
        return PushedSubscription.from_dictionary(eval(data))
    def debugout(self, dbmessage, dblevel = 1):
        if dblevel <= DEBUG: 
            msglog.log('broadway', msglog.types.DB, dbmessage)
Example #21
0
class TriggersConfigurator(CompositeNode):
    security = SecurityInformation.from_default()
    secured_by(security)

    def __init__(self, *args):
        self._triggers = None
        self.security_manager = None
        self._pdo_lock = Lock()
        super(TriggersConfigurator, self).__init__(*args)

    def configure(self, config):
        self.setattr('path', config.get('path', '/triggerconfig'))
        self.setattr('manager',
                     config.get('container', '/services/Trigger Manager'))
        self.secured = as_internal_node("/services").secured
        super(TriggersConfigurator, self).configure(config)

    def configuration(self):
        config = super(TriggersConfigurator, self).configuration()
        config['path'] = self.getattr('path')
        config['manager'] = self.getattr('manager')
        return config

    def start(self):
        filename = '%s (%s)' % (self.name, 'triggers')
        self.manager = self.nodespace.as_node(self.manager)
        self._pdo_lock.acquire()
        try:
            if self._triggers is None:
                self._triggers = PersistentDictionary(filename,
                                                      encode=None,
                                                      decode=None)
            if not self._triggers:
                pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                if os.path.exists(pdodata.filename()):
                    msglog.log('broadway', msglog.types.INFO,
                               "Migrating previous trigger data.")
                    pdodata.triggers = {}
                    pdodata.load()
                    self._triggers.update(pdodata.triggers)
                    pdodata.destroy()
                del (pdodata)
            self._loadtriggers()
            if self.secured:
                self.security_manager = self.as_node(
                    "/services/Security Manager")
            else:
                self.security_manager = None
        finally:
            self._pdo_lock.release()
        return super(TriggersConfigurator, self).start()

    def stop(self):
        super(TriggersConfigurator, self).stop()
        self.manager = None

    def _loadtriggers(self, names=None):
        triggers = []
        if names is None:
            names = self._triggers.keys()
        elif not isinstance(names, (list, tuple, set)):
            names = [names]
        for name in names:
            dump = self._triggers[name]
            try:
                trigger = unmarshal(dump)
            except:
                msglog.log("broadway", msglog.types.WARN,
                           "Unable to load trigger: %s" % name)
                msglog.exception(prefix="handled")
            else:
                triggers.append(trigger)
        return triggers

    def _storetriggers(self, triggers=None):
        if triggers is None:
            triggers = self.manager.get_triggers()
        elif not isinstance(triggers, (list, set, tuple)):
            triggers = [triggers]
        for trigger in triggers:
            try:
                dump = marshal(trigger)
            except:
                msglog.log("broadway", msglog.types.WARN,
                           "Unable to marshal trigger: %s" % trigger.name)
                msglog.exception(prefix="handled")
            else:
                self._triggers[trigger.name] = dump
        return triggers

    def _poptriggers(self, names=None):
        if names is None:
            existing = set(self.manager.get_trigger_names())
            stored = self._triggers.keys()
            names = set(stored) - set(existing)
        elif not isinstance(names, (list, tuple, set)):
            names = [names]
        removed = []
        for name in names:
            try:
                self._triggers.pop(name)
            except:
                msglog.log("broadway", msglog.types.WARN,
                           "Unable to remove trigger data: %s" % name)
                msglog.exception(prefix="handled")
            else:
                removed.append(name)
        return removed

    def match(self, path):
        return path.startswith(self.path)

    security.protect('create_trigger', 'Configure')
    security.protect('create_node', 'Configure')

    def create_trigger(self, name, config=()):
        config = dict(config)
        if "type" in config:
            type = config.pop("type")
        else:
            type = "ComparisonTrigger"
        if isinstance(type, str):
            if type.endswith("ComparisonTrigger"):
                type = ComparisonTrigger
            elif type.endswith("BoundTrigger"):
                type = BoundTrigger
            else:
                raise ValueError("Uknown type: %r" % type)
        config.setdefault("name", name)
        config.setdefault("parent", self.manager)
        trigger = self._create_trigger(type, config)
        self._storetriggers([trigger])
        return trigger.name

    create_node = create_trigger

    security.protect('remove_trigger', 'Configure')
    security.protect('remove_node', 'Configure')

    def remove_trigger(self, name):
        self._remove_trigger(name)
        self._poptriggers([name])
        return name

    remove_node = remove_trigger

    security.protect('configure_trigger', 'Configure')
    security.protect('configure_node', 'Configure')

    def configure_trigger(self, name=None, config=()):
        config = dict(config)
        if name is None:
            if config.has_key("name"):
                name = config["name"]
            else:
                raise TypeError("configure_trigger() requires"
                                " name or configuration with name")
        trigger = self.manager.get_trigger(name)
        try:
            trigger.stop()
        except Exception, error:
            msglog.log('broadway', msglog.types.WARN,
                       'Ignoring following exception on stop.')
            msglog.exception(prefix='Handled')
        trigger.configure(config)
        try:
            trigger.start()
        except Exception, error:
            msglog.log('broadway', msglog.types.WARN,
                       'Ignoring following exception on start.')
            msglog.exception(prefix='Handled')
Example #22
0
 def start(self):
     if self.is_running():
         raise TypeError("Equipment Monitor already running.")
     if TESTING and not self.test_machines:
         self.test_machines = setup_machines()
         machinecount = len(self.test_machines)
         self.debugout("Setup %d test machines" % machinecount)
     self.synclock.acquire()
     try:
         self.running.set()
         if self.subscriptions and not self.subscriptions.closed():
             self.subscriptions.close()
         self.formatter = None
         self.transporter = None
         children = self.children_nodes()
         for childnode in children:
             if IFormatter.providedBy(childnode):
                 if self.formatter is not None:
                     raise TypeError("Already has formatter child.")
                 self.formatter = childnode
             if ITransporter.providedBy(childnode):
                 if self.transporter is not None:
                     raise TypeError("Already has transporter child.")
                 self.transporter = childnode
         if not self.formatter:
             raise TypeError("Must have one formatter child node.")
         if not self.transporter:
             raise TypeError("Must have one transporter child node.")
         self.smservice = as_node(self.smnodeurl)
         self.subscriptions = PersistentDictionary(
             self.name, encode=self.serialize_subscription, 
             decode=self.unserialize_subscription)
         pdodata = PersistentDataObject(self)
         if os.path.exists(pdodata.filename()):
             msglog.log('broadway', msglog.types.WARN, 
                        "Equipment Monitor upgrading persistence.")
             migrate = frompdo(pdodata)
             self.subscriptions.update(migrate)
             message = "Equipment Monitor merged %d subscriptions."
             message = message % len(migrate)
             msglog.log('broadway', msglog.types.INFO, message)
             pdodata.destroy()
             msglog.log('broadway', msglog.types.WARN, 
                        "Equipment Monitor destroyed old persistence.")
             msglog.log('broadway', msglog.types.INFO, 
                        "Equipment Monitor persistence upgrade complete.")
         del(pdodata)
         message = 'Equipment Monitor startup: %s %s'
         for subscription in self.subscriptions.values():
             try:
                 subscription.setup_subscription()
             except:
                 msglog.exception(prefix="handled")
             else:
                 self.debugout(message % ('setup', subscription))
         skipcounts = []
         for i in range(0, 1 + len(self.subscriptions) / 30):
             skipcounts.extend([i + 1] * 30)
         self.setup_work_threads()
         for subscription in self.subscriptions.values():
             try: 
                 subscription.start(skipcounts.pop())
             except: 
                 msglog.exception(prefix = "Handled")        
             else:
                 self.debugout(message % ('started', subscription))
     except:
         self.cleanup_resources()
         self.running.clear()
         raise
     finally:
         self.synclock.release()
     super(EquipmentMonitor, self).start()
Example #23
0
class TrendManager(CompositeNode):
    implements(ITrendManager)
    security = SecurityInformation.from_default()
    secured_by(security)

    def __init__(self, *args):
        super(TrendManager, self).__init__(*args)
        self.logger_url = None
        self.trends = None
        self._pdo_lock = Lock()
        self._trendconfig = None
        self.__running = False
        self.secured = True
        return

    def _persist_trend_configuration(self, trend):
        self._pdo_lock.acquire()
        try:
            self._trendconfig[trend.name] = marshal(trend)
        finally:
            self._pdo_lock.release()
        return

    def _delete_trend_configuration(self, trend_name):
        self._pdo_lock.acquire()
        try:
            if self._trendconfig.has_key(trend_name):
                del self._trendconfig[trend_name]
        finally:
            self._pdo_lock.release()
        return

    def configure(self, config):
        self.setattr('name', config.get('name', 'Trend Manager'))
        self.setattr('logger_url', config.get('logger_url',
                                              '/services/logger'))
        self.secured = as_internal_node("/services").secured
        super(TrendManager, self).configure(config)
        return

    def configuration(self):
        config = super(TrendManager, self).configuration()
        config['logger_url'] = self.getattr('logger_url')
        return config

    def start(self):
        try:
            self._pdo_lock.acquire()
            try:
                if self.__running:
                    return
                self.__running = True
                self._trendconfig = PersistentDictionary(filename(self),
                                                         encode=None,
                                                         decode=None)
                if not self._trendconfig:
                    pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                    if os.path.exists(pdodata.filename()):
                        msglog.log('broadway', msglog.types.INFO,
                                   "Migrating previous trend data")
                        pdodata.trends = {}
                        pdodata.load()
                        self._trendconfig.update(pdodata.trends)
                    del (pdodata)
            finally:
                self._pdo_lock.release()
            super(TrendManager, self).start()
            self.logger = node.as_internal_node(self.logger_url)
            if self.has_child('trends'):
                self.trends = self.get_child('trends')
            else:
                self.trends = CompositeNode()
                self.trends.configure({'parent': self, 'name': 'trends'})
                self.trends.start()
            corrupt_trends = []
            for trendname, trenddump in self._trendconfig.items():
                msg = "Loading trend: %s" % trendname
                msglog.log('trendmanager', msglog.types.INFO, msg)
                try:
                    trend = unmarshal(trenddump)
                except:
                    corrupt_trends.append(trendname)
                    msg = "Failed to load trend: %s" % trendname
                    msglog.log('trendmanager', msglog.types.ERR, msg)
                    msglog.exception(prefix='Handled')
            for trendname in corrupt_trends:
                try:
                    msg = "Deleting trend information: %s" % trendname
                    msglog.log('trendmanager', msglog.types.INFO, msg)
                    self._delete_trend_configuration(trendname)
                    if self.trends.has_child(trendname):
                        trend = self.trends.get_child(trendname)
                        trend.prune(force=True)
                except:
                    msglog.exception(prefix='Handled')
        except:
            self.__running = False
            raise
        return

    def stop(self):
        self.__running = False
        super(TrendManager, self).stop()
        return

    def is_trendable(self, log_node):
        if log_node.configuration().has_key('period'):
            # Assume a log with a period is valid.
            return True
        if not log_node.has_child('columns'):
            # If the log does not have a columns container, than it doesn't
            # look like a proper log.
            return False
        column_nodes = log_node.get_child('columns').children_nodes()
        if not column_nodes:
            # If the log does not have any columns, than it doesn't
            # look like a proper log.
            return False
        if len(column_nodes) > MAX_TRENDABLE_COLUMNS:
            # If the log has more than MAX_TRENDABLE_COLUMNS (9), then the
            # EmbeddedGraph can not display it.
            return False
        has_timestamp = False
        for column in column_nodes:
            column_configuration = column.configuration()
            if not column_configuration.has_key('name'):
                # OK, this should NEVER happen...
                return False
            if column_configuration['name'] == 'timestamp':
                has_timestamp = True
            if not column_configuration.has_key('conversion'):
                # To be safe, each column must have a conversion...
                return False
            if column_configuration['conversion'] != 'magnitude':
                # And the conversion must be a 'magnitude'
                return False
        if not has_timestamp:
            # Graph requires a timestamp.
            return False
        return True

    security.protect('get_trends', 'View')

    def get_trends(self):
        if not self.__running: raise ENotRunning()
        trend_names = []
        for name in self.trends.children_names():
            trend_names.append(name)
        for log_node in self.logger.children_nodes():
            trend_name = log_node.name
            if not trend_name in trend_names:
                if (self.is_trendable(log_node)):
                    trend_adapter = PeriodicLogTrendAdapter()
                    trend_adapter.configure({
                        'parent': self.trends,
                        'name': trend_name,
                    })
                    trend_adapter.start()
                    self._persist_trend_configuration(trend_adapter)
                    trend_names.append(trend_name)
        trend_names.sort()
        trends = []
        for trend_name in trend_names:
            trends.append(self.trends.get_child(trend_name))
        return trends

    security.protect('get_trend', 'View')

    def get_trend(self, trend_name):
        if not self.__running: raise ENotRunning()
        if not self.trends.has_child(trend_name):
            # Autodiscoveresque.
            self.get_trends()

        # @fixme Raise a better exception...
        return self.trends.get_child(trend_name)

    def get_trend_preferences(self, trend_name):
        trend_name = urllib.unquote_plus(trend_name)
        trend = self.get_trend(trend_name)
        preferences = trend.get_preferences()
        points = trend.get_points()
        for i in xrange(0, len(points)):
            try:
                points[i]["color"] = preferences["points"][i][
                    "color"] = "#%06X" % int(preferences["points"][i]["color"])
            except:
                points[i]["color"] = preferences["points"][i]["color"]
            points[i]["y-axis"] = preferences["points"][i]["y-axis"]
        preferences["points"] = points
        try:
            preferences["background"]["color"] = "#%06X" % int(
                preferences["background"]["color"])
        except:
            pass
        try:
            preferences["text"]["color"] = "#%06X" % int(
                preferences["text"]["color"])
        except:
            pass
        msglog.log("broadway", msglog.types.INFO,
                   "Preferences: %r" % preferences)
        return preferences

    security.protect('delete_trend', 'Configure')

    def delete_trend(self, trend_name):
        if not self.__running: raise ENotRunning()
        self._delete_trend_configuration(trend_name)
        if not self.trends.has_child(trend_name):
            # Autodiscoveresque.
            self.get_trends()
        # @fixme Raise a better exception...
        trend = self.trends.get_child(trend_name)
        trend.prune()
        trend.destroy()
        return

    security.protect('update_trend', 'Configure')

    def update_trend(self, trend_name, new_cfg, **kw):
        if not self.__running: raise ENotRunning()
        confirmed = kw.get('confirmed', 0)
        deletedata = kw.get('deletedata', 0)
        trend = self.get_trend(trend_name)
        confirmation = ConfirmUpdateTrend(trend, new_cfg)
        #@fixme, dleimbro
        if 0:  #not confirmed and confirmation.requires_confirmation():
            return confirmation
        if confirmation.configuration_changed():
            try:
                if deletedata:
                    trend.delete_existing_data()
                if confirmation.requires_stop_and_restart():
                    trend.stop()
                trend.configure(confirmation.configuration())
                if confirmation.requires_stop_and_restart():
                    trend.start()
            except:
                msglog.exception()
                try:
                    trend.stop()
                except:
                    msglog.exception()
                trend.configure(confirmation.original_configuration())
                trend.start()
                raise
            else:
                self._persist_trend_configuration(trend)
        return None

    def _new_trend(self, name):
        if not self.__running: raise ENotRunning()
        """
        Return an instance that implements ITrend interface for new trend with
        no points.
        """
        new_trend = Trend()
        period = 60
        points = []
        preferences = {}
        new_trend.configure({
            'parent': self.trends,
            'name': name,
            'period': period,
            'points': points,
            'preferences': preferences
        })
        return new_trend

    security.protect('new_trend', 'Configure')

    def new_trend(self, name=None):
        if name:
            return self._new_trend(name)
        while True:
            try:
                new_trend = self._new_trend(self.generate_trend_name())
                break  #was going into loop and generating thousands of trends.
                #This breaks loop when an unused (generated) trend name is found
            except ENameInUse:
                continue

        return new_trend

    security.protect('generate_trend_name', 'View')

    def generate_trend_name(self):
        i_trend = 1
        while True:
            try:
                self.get_trend('Trend %d' % i_trend)
                i_trend += 1
            except ENoSuchName:
                break
        return ('Trend %d' % i_trend)
Example #24
0
class SessionManager(ServiceNode):
    import string
    IDCHARS = string.ascii_letters + string.digits
    NCHARS = len(IDCHARS)
    IDLEN = 20
    ETC_DIR = properties.ETC_DIR

    def __init__(self):
        self.ttl = 3600
        self._lock = Lock()
        self._sessions = None
        self._scheduled = None
        self.user_manager = None
        ServiceNode.__init__(self)

    def _begin_critical_section(self):
        self._lock.acquire()

    def _end_critical_section(self):
        self._lock.release()

    def _random_id(self):
        return str(UUID())

    def _next_session_id(self):
        sid = self._random_id()
        while self._sessions.has_key(sid):
            sid = self._random_id()
        return sid

    def start(self):
        self._begin_critical_section()
        try:
            if self._sessions is None:
                self._sessions = PersistentDictionary(self.name,
                                                      encode=Session.encode,
                                                      decode=Session.decode)
            if not self._scheduled:
                self._scheduled = scheduler.every(self.ttl, self.collect)
        finally:
            self._end_critical_section()
        self.user_manager = as_node("/services/User Manager")
        return ServiceNode.start(self)

    def stop(self):
        self._begin_critical_section()
        try:
            if self._scheduled:
                self._scheduled.cancel()
            self._scheduled = None
            self._sessions = None
        finally:
            self._end_critical_section()
        self.user_manager = None
        return ServiceNode.stop(self)

    def configure(self, cd):
        ServiceNode.configure(self, cd)
        set_attribute(self, 'ttl', self.ttl, cd, float)
        self.enabled = 1

    def configuration(self):
        cd = ServiceNode.configuration(self)
        get_attribute(self, 'ttl', cd, str)
        return cd

    ##
    # @fixme Use mpx.lib.security.User, as soon as it exists.
    # @fixme Cache mediator users...
    # @param nocheck Since we do not use shadow pass, the check can fail
    #                This is used to allow for bypass of check (for testing)
    # @exception ESessionDenied Raised when the SessionManager rejects
    #            creating the session because the request is not valid.
    #            In other words, the username or password are incorrect
    #            or there is some other aspect of the request which is
    #            not acceptable to the SessionManager.
    def create(self, user, password=None):
        if not isinstance(user, User):
            if properties.get_boolean('PAM_ENABLE'):
                authenticator = self.user_manager.user_from_pam
            else:
                authenticator = self.user_manager.user_from_cleartext
            try:
                user = authenticator(user, password)
            except EAuthenticationFailed:
                raise ESessionDenied("User credentials invalid.")
        self._begin_critical_section()
        try:
            sid = self._next_session_id()
            username = None
            if isinstance(user, User): username = user.name
            if isinstance(user, _User): username = user.name()
            self._sessions[sid] = Session(session_id=sid,
                                          ttl=self.ttl,
                                          username=username,
                                          password=password)
        finally:
            self._end_critical_section()
        return sid

    ##
    # Immediately invalidate a session.
    # @param session_id The string that identifies the session to invalidate.
    def destroy(self, session_id):
        self._begin_critical_section()
        try:
            removed = self._sessions.pop(session_id)
            del self._sessions[session_id]
        except KeyError:
            removed = False
        finally:
            self._end_critical_section()
        return removed

    ##
    # Checks if a session_id is in the list of valid sessions.
    # @param session_id The string that identifies the session.
    # @param touch If true, and the session exists, then the session's
    #        last_access time will be updated.
    # @return True if the session_id is currently valid.
    # @note The implementation assumes that if a session_id is in the list of
    #       managed sessions, then it is valid.  It is the responsibility of
    #       the "auto collection" mechanism to remove expired sessions in a
    #       timely fashon.
    def validate(self, session_id, touch=0):
        self._begin_critical_section()
        try:
            session = self._sessions.get(session_id)
            if session and session.valid():
                if touch:
                    session.touch()
                valid = True
            else:
                valid = False
        finally:
            self._end_critical_section()
        return valid

    ##
    # Scan all managed sessions for expired sessions.
    # @return The number of expired sessions invalidated by this invocation.
    def collect(self):
        sessions = self._sessions
        self._begin_critical_section()
        try:
            sids = [sid for sid, ses in sessions.items() if not ses.valid()]
        finally:
            self._end_critical_section()
        for sid in sids:
            self.destroy(sid)
        return sids

    ##
    # Look up the user associated with a session
    # @return A string representing the user associated with this session.
    def get_user_from_sid(self, sid):
        user = None
        session = self._sessions.get(str(sid))
        if session:
            try:
                user = self.user_manager.get_user(session.username)
            except:
                msglog.exception(prefix="handled")
        return user

    ##
    # Look up the user associated with a session
    # @return True or False according to the user existence in session.
    def is_user_active(self, username):
        for sid in self._sessions:
            if self._sessions[sid].username == username:
                return True
        return False
Example #25
0
class NodeConfigurator(CompositeNode):
    def __init__(self, *args, **kw):
        self.nodes = None
        super(NodeConfigurator, self).__init__(*args, **kw)
    def start(self):
        if self.nodes is None:
            dictname = "%s (%s)" % (type(self).__name__, self.name)
            self.nodes = PersistentDictionary(dictname)
            nodeurls = self.nodes.keys()
            nodeurls.sort(pathcompare)
            for nodeurl in nodeurls:
                nodedata = self.nodes[nodeurl]
                factory,configuration = nodedata
                self.create_node(factory, nodeurl, **configuration)
        super(NodeConfigurator, self).start()
    def get_managed_node(self, nodeurl):
        if not self.nodes.has_key(nodeurl):
            raise TypeError("cannot manipulate unmanaged node: %s" % nodeurl)
        return as_node(nodeurl)
    def node_children(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        return node.children_names()
    def node_configuration(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        return node.configuration()
    def start_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.start()
    def stop_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.stop()
    def node_attr(self, nodeurl, name, value=Undefined):
        node = self.get_managed_node(nodeurl)
        if value is not Undefined:
            setattr(node, name, value)
            self.updatepdo(nodeurl, node)
        return getattr(node, name)
    def configure_node(self, nodeurl, config):
        node = self.get_managed_node(nodeurl)
        node.stop()
        try:
            node.configure(config)
        except:
            msglog.log("broadway", msglog.types.WARN, 
                       "Error prevented reconfiguration of node: %s" % node)
            msglog.exception(prefix="handled")
            msglog.log("broadway", msglog.types.WARN, 
                       "Rolling back configuration.")
            try:
                node.configure(self.nodes[nodeurl])
            except:
                msglog.log("broadway", msglog.types.WARN, 
                           "Configuration rollback failed.")
                msglog.exception(prefix="handled")
            else:
                msglog.log("broadway", msglog.types.INFO, 
                           "Rollback of configuration succeeded.")
        else:
            msglog.log("broadway", msglog.types.INFO, 
                       "Node reconfigured: %s" % node)
            self.updatepdo(nodeurl, node)
        finally:
            node.start()
        return node.configuration()
    def create_node(self, factory, nodeurl, **config):
        try:
            as_node(nodeurl)
        except KeyError:
            pass
        else:
            raise TypeError("Node exists: %s" % nodeurl)
        if isinstance(factory, str):
            module,sep,name = factory.rpartition(".")
            if name:
                exec("import %s" % module)
            factory = eval(factory)
        parent,sep,name = nodeurl.rpartition("/")
        configuration = {"name": name, "parent": parent}
        configuration.update(config)
        node = factory()
        try:
            node.configure(configuration)
        except:
            msglog.log("broadway", msglog.types.WARN, 
                       "Error prevented configuration of new node: %s" % node)
            msglog.exception(prefix="handled")
            try:
                node.prune()
            except:
                msglog.exception(prefix="handled")
            else:
                msglog.log("broadway", msglog.types.INFO, 
                           "Node successfully pruned.")
        else:
            msglog.log("broadway", msglog.types.INFO, 
                       "New node created: %s" % node)
            self.updatepdo(nodeurl, node)
            node.start()
        return node.configuration()
    def remove_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.prune()
        self.updatepdo(nodeurl, None)
    def updatepdo(self, nodeurl, node):
        if self.nodes.has_key(nodeurl):
            self.nodes.pop(nodeurl)
        if node:
            node = as_node(node)
            nodeurl = as_node_url(node)
            datatype = type(node)
            factory = "%s.%s" % (datatype.__module__, datatype.__name__)
            data = (factory, node.configuration())
            self.nodes[nodeurl] = (factory, node.configuration())
        return nodeurl
Example #26
0
class TriggersConfigurator(CompositeNode):
    security = SecurityInformation.from_default()
    secured_by(security)
    
    def __init__(self, *args):
        self._triggers = None
        self.security_manager = None
        self._pdo_lock = Lock()
        super(TriggersConfigurator, self).__init__(*args)
    def configure(self, config):
        self.setattr('path', config.get('path','/triggerconfig'))
        self.setattr('manager', config.get('container','/services/Trigger Manager'))
        self.secured = as_internal_node("/services").secured
        super(TriggersConfigurator, self).configure(config)
    def configuration(self):
        config = super(TriggersConfigurator, self).configuration()
        config['path'] = self.getattr('path')
        config['manager'] = self.getattr('manager')
        return config
    def start(self):
        filename = '%s (%s)' % (self.name, 'triggers')
        self.manager = self.nodespace.as_node(self.manager)
        self._pdo_lock.acquire()
        try:
            if self._triggers is None:
                self._triggers = PersistentDictionary(
                    filename, encode=None, decode=None)
            if not self._triggers:
                pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                if os.path.exists(pdodata.filename()):
                    msglog.log('broadway', msglog.types.INFO, 
                               "Migrating previous trigger data.")
                    pdodata.triggers = {}
                    pdodata.load()
                    self._triggers.update(pdodata.triggers)
                    pdodata.destroy()
                del(pdodata)
            self._loadtriggers()
            if self.secured:
                self.security_manager = self.as_node("/services/Security Manager")
            else:
                self.security_manager = None
        finally: 
            self._pdo_lock.release()
        return super(TriggersConfigurator, self).start()
    def stop(self):
        super(TriggersConfigurator, self).stop()
        self.manager = None
    def _loadtriggers(self, names=None):
        triggers = []
        if names is None:
            names = self._triggers.keys()
        elif not isinstance(names, (list, tuple, set)):
            names = [names]
        for name in names:
            dump = self._triggers[name]
            try:
                trigger = unmarshal(dump)
            except:
                msglog.log("broadway", msglog.types.WARN, 
                           "Unable to load trigger: %s" % name)
                msglog.exception(prefix="handled")
            else:
                triggers.append(trigger)
        return triggers
    def _storetriggers(self, triggers=None):
        if triggers is None:
            triggers = self.manager.get_triggers()
        elif not isinstance(triggers, (list, set, tuple)):
            triggers = [triggers]
        for trigger in triggers:
            try:
                dump = marshal(trigger)
            except:
                msglog.log("broadway", msglog.types.WARN, 
                           "Unable to marshal trigger: %s" % trigger.name)
                msglog.exception(prefix="handled")
            else:
                self._triggers[trigger.name] = dump
        return triggers
    def _poptriggers(self, names=None):
        if names is None:
            existing = set(self.manager.get_trigger_names())
            stored = self._triggers.keys()
            names = set(stored) - set(existing)
        elif not isinstance(names, (list, tuple, set)):
            names = [names]
        removed = []
        for name in names:
            try:
                self._triggers.pop(name)
            except:
                msglog.log("broadway", msglog.types.WARN, 
                           "Unable to remove trigger data: %s" % name)
                msglog.exception(prefix="handled")
            else:
                removed.append(name)
        return removed
    def match(self, path):
        return path.startswith(self.path)
    security.protect('create_trigger', 'Configure')
    security.protect('create_node', 'Configure')
    def create_trigger(self, name, config=()):
        config = dict(config)
        if "type" in config:
            type = config.pop("type")
        else:
            type = "ComparisonTrigger"
        if isinstance(type, str):
            if type.endswith("ComparisonTrigger"):
                type = ComparisonTrigger
            elif type.endswith("BoundTrigger"):
                type = BoundTrigger
            else:
                raise ValueError("Uknown type: %r" % type)
        config.setdefault("name", name)
        config.setdefault("parent", self.manager)
        trigger = self._create_trigger(type, config)
        self._storetriggers([trigger])
        return trigger.name
    create_node = create_trigger
    
    security.protect('remove_trigger', 'Configure')
    security.protect('remove_node', 'Configure')
    def remove_trigger(self, name):
        self._remove_trigger(name)
        self._poptriggers([name])
        return name
    remove_node = remove_trigger
    
    security.protect('configure_trigger', 'Configure')
    security.protect('configure_node', 'Configure')
    def configure_trigger(self, name=None, config=()):
        config = dict(config)
        if name is None:
            if config.has_key("name"):
                name = config["name"]
            else:
                raise TypeError("configure_trigger() requires"
                                " name or configuration with name")
        trigger = self.manager.get_trigger(name)
        try: 
            trigger.stop()
        except Exception, error:
            msglog.log('broadway', msglog.types.WARN,
                       'Ignoring following exception on stop.')
            msglog.exception(prefix = 'Handled')
        trigger.configure(config)
        try: 
            trigger.start()
        except Exception, error:
            msglog.log('broadway', msglog.types.WARN,
                       'Ignoring following exception on start.')
            msglog.exception(prefix = 'Handled')
Example #27
0
 def __init__(self):
     self._priority_arrays = PersistentDictionary('OverrideManager')
Example #28
0
class TrendManager(CompositeNode):
    implements(ITrendManager)
    security = SecurityInformation.from_default()
    secured_by(security)

    def __init__(self, *args):
        super(TrendManager, self).__init__(*args)
        self.logger_url = None
        self.trends = None
        self._pdo_lock = Lock()
        self._trendconfig = None
        self.__running = False
        self.secured = True
        return

    def _persist_trend_configuration(self, trend):
        self._pdo_lock.acquire()
        try:
            self._trendconfig[trend.name] = marshal(trend)
        finally:
            self._pdo_lock.release()
        return

    def _delete_trend_configuration(self, trend_name):
        self._pdo_lock.acquire()
        try:
            if self._trendconfig.has_key(trend_name):
                del self._trendconfig[trend_name]
        finally:
            self._pdo_lock.release()
        return

    def configure(self, config):
        self.setattr("name", config.get("name", "Trend Manager"))
        self.setattr("logger_url", config.get("logger_url", "/services/logger"))
        self.secured = as_internal_node("/services").secured
        super(TrendManager, self).configure(config)
        return

    def configuration(self):
        config = super(TrendManager, self).configuration()
        config["logger_url"] = self.getattr("logger_url")
        return config

    def start(self):
        try:
            self._pdo_lock.acquire()
            try:
                if self.__running:
                    return
                self.__running = True
                self._trendconfig = PersistentDictionary(filename(self), encode=None, decode=None)
                if not self._trendconfig:
                    pdodata = PersistentDataObject(self, dmtype=GC_NEVER)
                    if os.path.exists(pdodata.filename()):
                        msglog.log("broadway", msglog.types.INFO, "Migrating previous trend data")
                        pdodata.trends = {}
                        pdodata.load()
                        self._trendconfig.update(pdodata.trends)
                    del (pdodata)
            finally:
                self._pdo_lock.release()
            super(TrendManager, self).start()
            self.logger = node.as_internal_node(self.logger_url)
            if self.has_child("trends"):
                self.trends = self.get_child("trends")
            else:
                self.trends = CompositeNode()
                self.trends.configure({"parent": self, "name": "trends"})
                self.trends.start()
            corrupt_trends = []
            for trendname, trenddump in self._trendconfig.items():
                msg = "Loading trend: %s" % trendname
                msglog.log("trendmanager", msglog.types.INFO, msg)
                try:
                    trend = unmarshal(trenddump)
                except:
                    corrupt_trends.append(trendname)
                    msg = "Failed to load trend: %s" % trendname
                    msglog.log("trendmanager", msglog.types.ERR, msg)
                    msglog.exception(prefix="Handled")
            for trendname in corrupt_trends:
                try:
                    msg = "Deleting trend information: %s" % trendname
                    msglog.log("trendmanager", msglog.types.INFO, msg)
                    self._delete_trend_configuration(trendname)
                    if self.trends.has_child(trendname):
                        trend = self.trends.get_child(trendname)
                        trend.prune(force=True)
                except:
                    msglog.exception(prefix="Handled")
        except:
            self.__running = False
            raise
        return

    def stop(self):
        self.__running = False
        super(TrendManager, self).stop()
        return

    def is_trendable(self, log_node):
        if log_node.configuration().has_key("period"):
            # Assume a log with a period is valid.
            return True
        if not log_node.has_child("columns"):
            # If the log does not have a columns container, than it doesn't
            # look like a proper log.
            return False
        column_nodes = log_node.get_child("columns").children_nodes()
        if not column_nodes:
            # If the log does not have any columns, than it doesn't
            # look like a proper log.
            return False
        if len(column_nodes) > MAX_TRENDABLE_COLUMNS:
            # If the log has more than MAX_TRENDABLE_COLUMNS (9), then the
            # EmbeddedGraph can not display it.
            return False
        has_timestamp = False
        for column in column_nodes:
            column_configuration = column.configuration()
            if not column_configuration.has_key("name"):
                # OK, this should NEVER happen...
                return False
            if column_configuration["name"] == "timestamp":
                has_timestamp = True
            if not column_configuration.has_key("conversion"):
                # To be safe, each column must have a conversion...
                return False
            if column_configuration["conversion"] != "magnitude":
                # And the conversion must be a 'magnitude'
                return False
        if not has_timestamp:
            # Graph requires a timestamp.
            return False
        return True

    security.protect("get_trends", "View")

    def get_trends(self):
        if not self.__running:
            raise ENotRunning()
        trend_names = []
        for name in self.trends.children_names():
            trend_names.append(name)
        for log_node in self.logger.children_nodes():
            trend_name = log_node.name
            if not trend_name in trend_names:
                if self.is_trendable(log_node):
                    trend_adapter = PeriodicLogTrendAdapter()
                    trend_adapter.configure({"parent": self.trends, "name": trend_name})
                    trend_adapter.start()
                    self._persist_trend_configuration(trend_adapter)
                    trend_names.append(trend_name)
        trend_names.sort()
        trends = []
        for trend_name in trend_names:
            trends.append(self.trends.get_child(trend_name))
        return trends

    security.protect("get_trend", "View")

    def get_trend(self, trend_name):
        if not self.__running:
            raise ENotRunning()
        if not self.trends.has_child(trend_name):
            # Autodiscoveresque.
            self.get_trends()

        # @fixme Raise a better exception...
        return self.trends.get_child(trend_name)

    def get_trend_preferences(self, trend_name):
        trend_name = urllib.unquote_plus(trend_name)
        trend = self.get_trend(trend_name)
        preferences = trend.get_preferences()
        points = trend.get_points()
        for i in xrange(0, len(points)):
            try:
                points[i]["color"] = preferences["points"][i]["color"] = "#%06X" % int(
                    preferences["points"][i]["color"]
                )
            except:
                points[i]["color"] = preferences["points"][i]["color"]
            points[i]["y-axis"] = preferences["points"][i]["y-axis"]
        preferences["points"] = points
        try:
            preferences["background"]["color"] = "#%06X" % int(preferences["background"]["color"])
        except:
            pass
        try:
            preferences["text"]["color"] = "#%06X" % int(preferences["text"]["color"])
        except:
            pass
        msglog.log("broadway", msglog.types.INFO, "Preferences: %r" % preferences)
        return preferences

    security.protect("delete_trend", "Configure")

    def delete_trend(self, trend_name):
        if not self.__running:
            raise ENotRunning()
        self._delete_trend_configuration(trend_name)
        if not self.trends.has_child(trend_name):
            # Autodiscoveresque.
            self.get_trends()
        # @fixme Raise a better exception...
        trend = self.trends.get_child(trend_name)
        trend.prune()
        trend.destroy()
        return

    security.protect("update_trend", "Configure")

    def update_trend(self, trend_name, new_cfg, **kw):
        if not self.__running:
            raise ENotRunning()
        confirmed = kw.get("confirmed", 0)
        deletedata = kw.get("deletedata", 0)
        trend = self.get_trend(trend_name)
        confirmation = ConfirmUpdateTrend(trend, new_cfg)
        # @fixme, dleimbro
        if 0:  # not confirmed and confirmation.requires_confirmation():
            return confirmation
        if confirmation.configuration_changed():
            try:
                if deletedata:
                    trend.delete_existing_data()
                if confirmation.requires_stop_and_restart():
                    trend.stop()
                trend.configure(confirmation.configuration())
                if confirmation.requires_stop_and_restart():
                    trend.start()
            except:
                msglog.exception()
                try:
                    trend.stop()
                except:
                    msglog.exception()
                trend.configure(confirmation.original_configuration())
                trend.start()
                raise
            else:
                self._persist_trend_configuration(trend)
        return None

    def _new_trend(self, name):
        if not self.__running:
            raise ENotRunning()
        """
        Return an instance that implements ITrend interface for new trend with
        no points.
        """
        new_trend = Trend()
        period = 60
        points = []
        preferences = {}
        new_trend.configure(
            {"parent": self.trends, "name": name, "period": period, "points": points, "preferences": preferences}
        )
        return new_trend

    security.protect("new_trend", "Configure")

    def new_trend(self, name=None):
        if name:
            return self._new_trend(name)
        while True:
            try:
                new_trend = self._new_trend(self.generate_trend_name())
                break  # was going into loop and generating thousands of trends.
                # This breaks loop when an unused (generated) trend name is found
            except ENameInUse:
                continue

        return new_trend

    security.protect("generate_trend_name", "View")

    def generate_trend_name(self):
        i_trend = 1
        while True:
            try:
                self.get_trend("Trend %d" % i_trend)
                i_trend += 1
            except ENoSuchName:
                break
        return "Trend %d" % i_trend
Example #29
0
class PersistanceManager(object):
    def __init__(self):
        # {nodepath:{'node_config':node_config,
        #            'group_config':group_config,
        #            'entity_map':entity_map}}
        self._persisted_data = PersistentDictionary('GSPData')
        self.debug = 1
        self._persist_enabled = False

    def message(self, message, mtype=msglog.types.INFO, level=1):
        if self.debug >= level:
            msglog.log('Global Setpoint Manager', mtype, message)

    def persist_enabled(self):
        return self._persist_enabled

    def enable_persist(self):
        self._persist_enabled = True

    def disable_persist(self):
        self._persist_enabled = False

    def get_gsp_groups(self):
        groups = self._persisted_data.keys()
        groups.sort(lambda a, b: cmp(a.count('/'), b.count('/')))
        return groups

    def get_gsp_group(self, nodepath):
        return self._persisted_data[normalize_nodepath(nodepath)]

    def put_gsp_group(self, nodepath, nodedata):
        if not self.persist_enabled():
            return
        nodepath = normalize_nodepath(nodepath)
        if not self._persisted_data.has_key(nodepath):
            # create default configuration.
            data = {
                'node_config': {},
                'group_config': [],
                'entity_map': {},
                'node_factory': ''
            }
            self._persisted_data[nodepath] = data
        self.put_gsp_group_data(nodepath, nodedata)

    def remove_gsp_group(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        if self._persisted_data.has_key(nodepath):
            del self._persisted_data[nodepath]

    def get_gsp_group_data(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        return self._persisted_data[nodepath]

    def put_gsp_group_data(self, nodepath, nodedata):
        nodepath = normalize_nodepath(nodepath)
        for data_key in self._persisted_data[nodepath].keys():
            value = nodedata.get(data_key)
            if value is not None:
                self._put_entry(nodepath, data_key, value)

    def get_gsp_group_nconfig(self, nodepath):
        # node configuration data
        return self._get_entry(nodepath, 'node_config')

    def put_gsp_group_nconfig(self, nodepath, value):
        # node configuration data
        self._put_entry(nodepath, 'node_config', value)

    def get_gsp_group_gconfig(self, nodepath):
        # gsp group configuration data
        return self._get_entry(nodepath, 'group_config')

    def putt_gsp_group_gconfig(self, nodepath, value):
        # gsp group configuration data
        self._put_entry(nodepath, 'group_config', value)

    def get_gsp_group_entity_map(self, nodepath):
        return self._get_entry(nodepath, 'entity_map')

    def put_gsp_group_entity_map(self, nodepath, value):
        self._put_entry(nodepath, 'entity_map', value)

    def _get_entry(self, nodepath, data_type):
        return self.get_gsp_group(normalize_nodepath(nodepath))[data_type]

    def _put_entry(self, nodepath, data_type, value):
        if not self.persist_enabled():
            return
        nodepath = normalize_nodepath(nodepath)
        group = self.get_gsp_group(nodepath)
        assert group, \
        'A group must exist before data can be stored against it.'
        group[data_type] = value
        self._persisted_data.notify_changed(nodepath)

    def singleton_unload_hook(self):
        pass
Example #30
0
class PersistanceManager(object):
    def __init__(self):
        # {nodepath:{'cfg':cfg,
        #            'summary':summary,
        #            'meta':meta,
        #            'properties':properties,
        #            'fail_list':fail_list,
        #            'sync_state':sync_in_progress,
        #            'override':override}}
        self._persisted_data = PersistentDictionary('ScheduleData')
        self.debug = 1

    def message(self, message, mtype=msglog.types.INFO, level=1):
        if self.debug >= level:
            msglog.log('Scheduler', mtype, message)

    def get_scheds(self):
        scheds = self._persisted_data.keys()
        scheds.sort(sched_sort)
        return scheds

    def get_sched(self, nodepath):
        return self._persisted_data[normalize_nodepath(nodepath)]

    def put_sched(self, nodepath, cfg):
        nodepath = normalize_nodepath(nodepath)
        if not self._persisted_data.has_key(nodepath):
            # create default configuration.
            pdata = {
                'cfg': {},
                'summary': [[], [], [], 'exceptions'],
                'meta': {},
                'properties': [],
                'fail_list': [],
                'sync_state': False,
                'override': False
            }
            self._persisted_data[nodepath] = pdata
        self.put_sched_cfg(nodepath, cfg)

    def remove_sched(self, nodepath):
        nodepath = normalize_nodepath(nodepath)
        if self._persisted_data.has_key(nodepath):
            for sched in self.get_scheds():
                if sched.startswith(nodepath):
                    del self._persisted_data[sched]
        else:
            msg = 'Error removing non-existent schedule %s from persistent data.' \
                % nodepath
            self.message(msg)

    def move_sched(self, source, destination, cfg, is_rename=False):
        source = normalize_nodepath(source)
        destination = normalize_nodepath(destination)
        for sched in self.get_scheds():
            if not sched.startswith(source):
                continue
            data = self._persisted_data[sched]
            del self._persisted_data[sched]
            if sched == source:
                # rename
                if is_rename:
                    newsched = destination
                else:
                    newsched = sched.replace(
                        source, destination) + source.split('/')[-2] + '/'
                oldroot = sched
                newroot = newsched
                self._persisted_data[newsched] = data
                # prior to persisting, the schedule should have been moved
                # within the nodetree.  We grab and persist the latest configuration.
                # This put call will also ensure sync to disk to takes place.
                self.put_sched_cfg(newsched, cfg)
            else:
                newsched = normalize_nodepath(sched.replace(
                    oldroot, newroot))  #+ sched_name + '/'
                self._persisted_data[newsched] = data
                self.put_sched_cfg(newsched, serialize_node(as_node(newsched)))

    def get_sched_cfg(self, nodepath):
        return self._get_entry('cfg', nodepath)

    def put_sched_cfg(self, nodepath, cfg):
        self._put_entry('cfg', nodepath, cfg)

    def get_sched_summary(self, nodepath):
        return self._get_entry('summary', nodepath)

    def put_sched_summary(self, nodepath, summary):
        self._put_entry('summary', nodepath, summary)

    def get_sched_props(self, nodepath):
        return self._get_entry('properties', nodepath)

    def put_sched_props(self, nodepath, properties):
        self._put_entry('properties', nodepath, properties)

    def get_sched_meta(self, nodepath):
        return self._get_entry('meta', nodepath)

    def put_sched_meta(self, nodepath, meta):
        self._put_entry('meta', nodepath, meta)

    def get_fail_list(self, nodepath):
        return self._get_entry('fail_list', nodepath)

    def put_fail_list(self, nodepath, fail_list):
        self._put_entry('fail_list', nodepath, fail_list)

    def get_sync_state(self, nodepath):
        return self._get_entry('sync_state', nodepath)

    def put_sync_state(self, nodepath, sync_state):
        self._put_entry('sync_state', nodepath, sync_state)

    def get_override(self, nodepath):
        return self._get_entry('override', nodepath)

    def put_override(self, nodepath, override):
        self._put_entry('override', nodepath, override)

    def _get_entry(self, ptype, nodepath):
        return self.get_sched(normalize_nodepath(nodepath))[ptype]

    def _put_entry(self, ptype, nodepath, value):
        nodepath = normalize_nodepath(nodepath)
        sched = self.get_sched(nodepath)
        assert sched, \
        'A schedule must exist before data can be stored against it.'
        sched[ptype] = value
        self._persisted_data.notify_changed(nodepath)

    def singleton_unload_hook(self):
        pass
Example #31
0
class NodeConfigurator(CompositeNode):
    def __init__(self, *args, **kw):
        self.nodes = None
        super(NodeConfigurator, self).__init__(*args, **kw)

    def start(self):
        if self.nodes is None:
            dictname = "%s (%s)" % (type(self).__name__, self.name)
            self.nodes = PersistentDictionary(dictname)
            nodeurls = self.nodes.keys()
            nodeurls.sort(pathcompare)
            for nodeurl in nodeurls:
                nodedata = self.nodes[nodeurl]
                factory, configuration = nodedata
                self.create_node(factory, nodeurl, **configuration)
        super(NodeConfigurator, self).start()

    def get_managed_node(self, nodeurl):
        if not self.nodes.has_key(nodeurl):
            raise TypeError("cannot manipulate unmanaged node: %s" % nodeurl)
        return as_node(nodeurl)

    def node_children(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        return node.children_names()

    def node_configuration(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        return node.configuration()

    def start_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.start()

    def stop_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.stop()

    def node_attr(self, nodeurl, name, value=Undefined):
        node = self.get_managed_node(nodeurl)
        if value is not Undefined:
            setattr(node, name, value)
            self.updatepdo(nodeurl, node)
        return getattr(node, name)

    def configure_node(self, nodeurl, config):
        node = self.get_managed_node(nodeurl)
        node.stop()
        try:
            node.configure(config)
        except:
            msglog.log("broadway", msglog.types.WARN,
                       "Error prevented reconfiguration of node: %s" % node)
            msglog.exception(prefix="handled")
            msglog.log("broadway", msglog.types.WARN,
                       "Rolling back configuration.")
            try:
                node.configure(self.nodes[nodeurl])
            except:
                msglog.log("broadway", msglog.types.WARN,
                           "Configuration rollback failed.")
                msglog.exception(prefix="handled")
            else:
                msglog.log("broadway", msglog.types.INFO,
                           "Rollback of configuration succeeded.")
        else:
            msglog.log("broadway", msglog.types.INFO,
                       "Node reconfigured: %s" % node)
            self.updatepdo(nodeurl, node)
        finally:
            node.start()
        return node.configuration()

    def create_node(self, factory, nodeurl, **config):
        try:
            as_node(nodeurl)
        except KeyError:
            pass
        else:
            raise TypeError("Node exists: %s" % nodeurl)
        if isinstance(factory, str):
            module, sep, name = factory.rpartition(".")
            if name:
                exec("import %s" % module)
            factory = eval(factory)
        parent, sep, name = nodeurl.rpartition("/")
        configuration = {"name": name, "parent": parent}
        configuration.update(config)
        node = factory()
        try:
            node.configure(configuration)
        except:
            msglog.log("broadway", msglog.types.WARN,
                       "Error prevented configuration of new node: %s" % node)
            msglog.exception(prefix="handled")
            try:
                node.prune()
            except:
                msglog.exception(prefix="handled")
            else:
                msglog.log("broadway", msglog.types.INFO,
                           "Node successfully pruned.")
        else:
            msglog.log("broadway", msglog.types.INFO,
                       "New node created: %s" % node)
            self.updatepdo(nodeurl, node)
            node.start()
        return node.configuration()

    def remove_node(self, nodeurl):
        node = self.get_managed_node(nodeurl)
        node.prune()
        self.updatepdo(nodeurl, None)

    def updatepdo(self, nodeurl, node):
        if self.nodes.has_key(nodeurl):
            self.nodes.pop(nodeurl)
        if node:
            node = as_node(node)
            nodeurl = as_node_url(node)
            datatype = type(node)
            factory = "%s.%s" % (datatype.__module__, datatype.__name__)
            data = (factory, node.configuration())
            self.nodes[nodeurl] = (factory, node.configuration())
        return nodeurl