class NodeConfigurator(CompositeNode): def __init__(self, *args, **kw): self.nodes = None super(NodeConfigurator, self).__init__(*args, **kw) def start(self): if self.nodes is None: dictname = "%s (%s)" % (type(self).__name__, self.name) self.nodes = PersistentDictionary(dictname) nodeurls = self.nodes.keys() nodeurls.sort(pathcompare) for nodeurl in nodeurls: nodedata = self.nodes[nodeurl] factory,configuration = nodedata self.create_node(factory, nodeurl, **configuration) super(NodeConfigurator, self).start() def get_managed_node(self, nodeurl): if not self.nodes.has_key(nodeurl): raise TypeError("cannot manipulate unmanaged node: %s" % nodeurl) return as_node(nodeurl) def node_children(self, nodeurl): node = self.get_managed_node(nodeurl) return node.children_names() def node_configuration(self, nodeurl): node = self.get_managed_node(nodeurl) return node.configuration() def start_node(self, nodeurl): node = self.get_managed_node(nodeurl) node.start() def stop_node(self, nodeurl): node = self.get_managed_node(nodeurl) node.stop() def node_attr(self, nodeurl, name, value=Undefined): node = self.get_managed_node(nodeurl) if value is not Undefined: setattr(node, name, value) self.updatepdo(nodeurl, node) return getattr(node, name) def configure_node(self, nodeurl, config): node = self.get_managed_node(nodeurl) node.stop() try: node.configure(config) except: msglog.log("broadway", msglog.types.WARN, "Error prevented reconfiguration of node: %s" % node) msglog.exception(prefix="handled") msglog.log("broadway", msglog.types.WARN, "Rolling back configuration.") try: node.configure(self.nodes[nodeurl]) except: msglog.log("broadway", msglog.types.WARN, "Configuration rollback failed.") msglog.exception(prefix="handled") else: msglog.log("broadway", msglog.types.INFO, "Rollback of configuration succeeded.") else: msglog.log("broadway", msglog.types.INFO, "Node reconfigured: %s" % node) self.updatepdo(nodeurl, node) finally: node.start() return node.configuration() def create_node(self, factory, nodeurl, **config): try: as_node(nodeurl) except KeyError: pass else: raise TypeError("Node exists: %s" % nodeurl) if isinstance(factory, str): module,sep,name = factory.rpartition(".") if name: exec("import %s" % module) factory = eval(factory) parent,sep,name = nodeurl.rpartition("/") configuration = {"name": name, "parent": parent} configuration.update(config) node = factory() try: node.configure(configuration) except: msglog.log("broadway", msglog.types.WARN, "Error prevented configuration of new node: %s" % node) msglog.exception(prefix="handled") try: node.prune() except: msglog.exception(prefix="handled") else: msglog.log("broadway", msglog.types.INFO, "Node successfully pruned.") else: msglog.log("broadway", msglog.types.INFO, "New node created: %s" % node) self.updatepdo(nodeurl, node) node.start() return node.configuration() def remove_node(self, nodeurl): node = self.get_managed_node(nodeurl) node.prune() self.updatepdo(nodeurl, None) def updatepdo(self, nodeurl, node): if self.nodes.has_key(nodeurl): self.nodes.pop(nodeurl) if node: node = as_node(node) nodeurl = as_node_url(node) datatype = type(node) factory = "%s.%s" % (datatype.__module__, datatype.__name__) data = (factory, node.configuration()) self.nodes[nodeurl] = (factory, node.configuration()) return nodeurl
class SessionManager(ServiceNode): import string IDCHARS = string.ascii_letters + string.digits NCHARS = len(IDCHARS) IDLEN = 20 ETC_DIR=properties.ETC_DIR def __init__(self): self.ttl = 3600 self._lock = Lock() self._sessions = None self._scheduled = None self.user_manager = None ServiceNode.__init__(self) def _begin_critical_section(self): self._lock.acquire() def _end_critical_section(self): self._lock.release() def _random_id(self): return str(UUID()) def _next_session_id(self): sid = self._random_id() while self._sessions.has_key(sid): sid = self._random_id() return sid def start(self): self._begin_critical_section() try: if self._sessions is None: self._sessions = PersistentDictionary(self.name, encode=Session.encode, decode=Session.decode) if not self._scheduled: self._scheduled = scheduler.every(self.ttl, self.collect) finally: self._end_critical_section() self.user_manager = as_node("/services/User Manager") return ServiceNode.start(self) def stop(self): self._begin_critical_section() try: if self._scheduled: self._scheduled.cancel() self._scheduled = None self._sessions = None finally: self._end_critical_section() self.user_manager = None return ServiceNode.stop(self) def configure(self, cd): ServiceNode.configure(self, cd) set_attribute(self, 'ttl', self.ttl, cd, float) self.enabled = 1 def configuration(self): cd = ServiceNode.configuration(self) get_attribute(self, 'ttl', cd, str) return cd ## # @fixme Use mpx.lib.security.User, as soon as it exists. # @fixme Cache mediator users... # @param nocheck Since we do not use shadow pass, the check can fail # This is used to allow for bypass of check (for testing) # @exception ESessionDenied Raised when the SessionManager rejects # creating the session because the request is not valid. # In other words, the username or password are incorrect # or there is some other aspect of the request which is # not acceptable to the SessionManager. def create(self, user, password=None): if not isinstance(user, User): if properties.get_boolean('PAM_ENABLE'): authenticator = self.user_manager.user_from_pam else: authenticator = self.user_manager.user_from_cleartext try: user = authenticator(user, password) except EAuthenticationFailed: raise ESessionDenied("User credentials invalid.") self._begin_critical_section() try: sid = self._next_session_id() username = None if isinstance(user, User): username = user.name if isinstance(user, _User): username = user.name() self._sessions[sid] = Session(session_id=sid, ttl=self.ttl, username=username, password=password) finally: self._end_critical_section() return sid ## # Immediately invalidate a session. # @param session_id The string that identifies the session to invalidate. def destroy(self, session_id): self._begin_critical_section() try: removed = self._sessions.pop(session_id) del self._sessions[session_id] except KeyError: removed = False finally: self._end_critical_section() return removed ## # Checks if a session_id is in the list of valid sessions. # @param session_id The string that identifies the session. # @param touch If true, and the session exists, then the session's # last_access time will be updated. # @return True if the session_id is currently valid. # @note The implementation assumes that if a session_id is in the list of # managed sessions, then it is valid. It is the responsibility of # the "auto collection" mechanism to remove expired sessions in a # timely fashon. def validate(self, session_id, touch=0): self._begin_critical_section() try: session = self._sessions.get(session_id) if session and session.valid(): if touch: session.touch() valid = True else: valid = False finally: self._end_critical_section() return valid ## # Scan all managed sessions for expired sessions. # @return The number of expired sessions invalidated by this invocation. def collect(self): sessions = self._sessions self._begin_critical_section() try: sids = [sid for sid,ses in sessions.items() if not ses.valid()] finally: self._end_critical_section() for sid in sids: self.destroy(sid) return sids ## # Look up the user associated with a session # @return A string representing the user associated with this session. def get_user_from_sid(self, sid): user = None session = self._sessions.get(str(sid)) if session: try: user = self.user_manager.get_user(session.username) except: msglog.exception(prefix="handled") return user ## # Look up the user associated with a session # @return True or False according to the user existence in session. def is_user_active(self, username): for sid in self._sessions: if self._sessions[sid].username == username: return True return False
class TriggersConfigurator(CompositeNode): security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args): self._triggers = None self.security_manager = None self._pdo_lock = Lock() super(TriggersConfigurator, self).__init__(*args) def configure(self, config): self.setattr('path', config.get('path','/triggerconfig')) self.setattr('manager', config.get('container','/services/Trigger Manager')) self.secured = as_internal_node("/services").secured super(TriggersConfigurator, self).configure(config) def configuration(self): config = super(TriggersConfigurator, self).configuration() config['path'] = self.getattr('path') config['manager'] = self.getattr('manager') return config def start(self): filename = '%s (%s)' % (self.name, 'triggers') self.manager = self.nodespace.as_node(self.manager) self._pdo_lock.acquire() try: if self._triggers is None: self._triggers = PersistentDictionary( filename, encode=None, decode=None) if not self._triggers: pdodata = PersistentDataObject(self, dmtype=GC_NEVER) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.INFO, "Migrating previous trigger data.") pdodata.triggers = {} pdodata.load() self._triggers.update(pdodata.triggers) pdodata.destroy() del(pdodata) self._loadtriggers() if self.secured: self.security_manager = self.as_node("/services/Security Manager") else: self.security_manager = None finally: self._pdo_lock.release() return super(TriggersConfigurator, self).start() def stop(self): super(TriggersConfigurator, self).stop() self.manager = None def _loadtriggers(self, names=None): triggers = [] if names is None: names = self._triggers.keys() elif not isinstance(names, (list, tuple, set)): names = [names] for name in names: dump = self._triggers[name] try: trigger = unmarshal(dump) except: msglog.log("broadway", msglog.types.WARN, "Unable to load trigger: %s" % name) msglog.exception(prefix="handled") else: triggers.append(trigger) return triggers def _storetriggers(self, triggers=None): if triggers is None: triggers = self.manager.get_triggers() elif not isinstance(triggers, (list, set, tuple)): triggers = [triggers] for trigger in triggers: try: dump = marshal(trigger) except: msglog.log("broadway", msglog.types.WARN, "Unable to marshal trigger: %s" % trigger.name) msglog.exception(prefix="handled") else: self._triggers[trigger.name] = dump return triggers def _poptriggers(self, names=None): if names is None: existing = set(self.manager.get_trigger_names()) stored = self._triggers.keys() names = set(stored) - set(existing) elif not isinstance(names, (list, tuple, set)): names = [names] removed = [] for name in names: try: self._triggers.pop(name) except: msglog.log("broadway", msglog.types.WARN, "Unable to remove trigger data: %s" % name) msglog.exception(prefix="handled") else: removed.append(name) return removed def match(self, path): return path.startswith(self.path) security.protect('create_trigger', 'Configure') security.protect('create_node', 'Configure') def create_trigger(self, name, config=()): config = dict(config) if "type" in config: type = config.pop("type") else: type = "ComparisonTrigger" if isinstance(type, str): if type.endswith("ComparisonTrigger"): type = ComparisonTrigger elif type.endswith("BoundTrigger"): type = BoundTrigger else: raise ValueError("Uknown type: %r" % type) config.setdefault("name", name) config.setdefault("parent", self.manager) trigger = self._create_trigger(type, config) self._storetriggers([trigger]) return trigger.name create_node = create_trigger security.protect('remove_trigger', 'Configure') security.protect('remove_node', 'Configure') def remove_trigger(self, name): self._remove_trigger(name) self._poptriggers([name]) return name remove_node = remove_trigger security.protect('configure_trigger', 'Configure') security.protect('configure_node', 'Configure') def configure_trigger(self, name=None, config=()): config = dict(config) if name is None: if config.has_key("name"): name = config["name"] else: raise TypeError("configure_trigger() requires" " name or configuration with name") trigger = self.manager.get_trigger(name) try: trigger.stop() except Exception, error: msglog.log('broadway', msglog.types.WARN, 'Ignoring following exception on stop.') msglog.exception(prefix = 'Handled') trigger.configure(config) try: trigger.start() except Exception, error: msglog.log('broadway', msglog.types.WARN, 'Ignoring following exception on start.') msglog.exception(prefix = 'Handled')
class TriggersConfigurator(CompositeNode): security = SecurityInformation.from_default() secured_by(security) def __init__(self, *args): self._triggers = None self.security_manager = None self._pdo_lock = Lock() super(TriggersConfigurator, self).__init__(*args) def configure(self, config): self.setattr('path', config.get('path', '/triggerconfig')) self.setattr('manager', config.get('container', '/services/Trigger Manager')) self.secured = as_internal_node("/services").secured super(TriggersConfigurator, self).configure(config) def configuration(self): config = super(TriggersConfigurator, self).configuration() config['path'] = self.getattr('path') config['manager'] = self.getattr('manager') return config def start(self): filename = '%s (%s)' % (self.name, 'triggers') self.manager = self.nodespace.as_node(self.manager) self._pdo_lock.acquire() try: if self._triggers is None: self._triggers = PersistentDictionary(filename, encode=None, decode=None) if not self._triggers: pdodata = PersistentDataObject(self, dmtype=GC_NEVER) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.INFO, "Migrating previous trigger data.") pdodata.triggers = {} pdodata.load() self._triggers.update(pdodata.triggers) pdodata.destroy() del (pdodata) self._loadtriggers() if self.secured: self.security_manager = self.as_node( "/services/Security Manager") else: self.security_manager = None finally: self._pdo_lock.release() return super(TriggersConfigurator, self).start() def stop(self): super(TriggersConfigurator, self).stop() self.manager = None def _loadtriggers(self, names=None): triggers = [] if names is None: names = self._triggers.keys() elif not isinstance(names, (list, tuple, set)): names = [names] for name in names: dump = self._triggers[name] try: trigger = unmarshal(dump) except: msglog.log("broadway", msglog.types.WARN, "Unable to load trigger: %s" % name) msglog.exception(prefix="handled") else: triggers.append(trigger) return triggers def _storetriggers(self, triggers=None): if triggers is None: triggers = self.manager.get_triggers() elif not isinstance(triggers, (list, set, tuple)): triggers = [triggers] for trigger in triggers: try: dump = marshal(trigger) except: msglog.log("broadway", msglog.types.WARN, "Unable to marshal trigger: %s" % trigger.name) msglog.exception(prefix="handled") else: self._triggers[trigger.name] = dump return triggers def _poptriggers(self, names=None): if names is None: existing = set(self.manager.get_trigger_names()) stored = self._triggers.keys() names = set(stored) - set(existing) elif not isinstance(names, (list, tuple, set)): names = [names] removed = [] for name in names: try: self._triggers.pop(name) except: msglog.log("broadway", msglog.types.WARN, "Unable to remove trigger data: %s" % name) msglog.exception(prefix="handled") else: removed.append(name) return removed def match(self, path): return path.startswith(self.path) security.protect('create_trigger', 'Configure') security.protect('create_node', 'Configure') def create_trigger(self, name, config=()): config = dict(config) if "type" in config: type = config.pop("type") else: type = "ComparisonTrigger" if isinstance(type, str): if type.endswith("ComparisonTrigger"): type = ComparisonTrigger elif type.endswith("BoundTrigger"): type = BoundTrigger else: raise ValueError("Uknown type: %r" % type) config.setdefault("name", name) config.setdefault("parent", self.manager) trigger = self._create_trigger(type, config) self._storetriggers([trigger]) return trigger.name create_node = create_trigger security.protect('remove_trigger', 'Configure') security.protect('remove_node', 'Configure') def remove_trigger(self, name): self._remove_trigger(name) self._poptriggers([name]) return name remove_node = remove_trigger security.protect('configure_trigger', 'Configure') security.protect('configure_node', 'Configure') def configure_trigger(self, name=None, config=()): config = dict(config) if name is None: if config.has_key("name"): name = config["name"] else: raise TypeError("configure_trigger() requires" " name or configuration with name") trigger = self.manager.get_trigger(name) try: trigger.stop() except Exception, error: msglog.log('broadway', msglog.types.WARN, 'Ignoring following exception on stop.') msglog.exception(prefix='Handled') trigger.configure(config) try: trigger.start() except Exception, error: msglog.log('broadway', msglog.types.WARN, 'Ignoring following exception on start.') msglog.exception(prefix='Handled')
class SessionManager(ServiceNode): import string IDCHARS = string.ascii_letters + string.digits NCHARS = len(IDCHARS) IDLEN = 20 ETC_DIR = properties.ETC_DIR def __init__(self): self.ttl = 3600 self._lock = Lock() self._sessions = None self._scheduled = None self.user_manager = None ServiceNode.__init__(self) def _begin_critical_section(self): self._lock.acquire() def _end_critical_section(self): self._lock.release() def _random_id(self): return str(UUID()) def _next_session_id(self): sid = self._random_id() while self._sessions.has_key(sid): sid = self._random_id() return sid def start(self): self._begin_critical_section() try: if self._sessions is None: self._sessions = PersistentDictionary(self.name, encode=Session.encode, decode=Session.decode) if not self._scheduled: self._scheduled = scheduler.every(self.ttl, self.collect) finally: self._end_critical_section() self.user_manager = as_node("/services/User Manager") return ServiceNode.start(self) def stop(self): self._begin_critical_section() try: if self._scheduled: self._scheduled.cancel() self._scheduled = None self._sessions = None finally: self._end_critical_section() self.user_manager = None return ServiceNode.stop(self) def configure(self, cd): ServiceNode.configure(self, cd) set_attribute(self, 'ttl', self.ttl, cd, float) self.enabled = 1 def configuration(self): cd = ServiceNode.configuration(self) get_attribute(self, 'ttl', cd, str) return cd ## # @fixme Use mpx.lib.security.User, as soon as it exists. # @fixme Cache mediator users... # @param nocheck Since we do not use shadow pass, the check can fail # This is used to allow for bypass of check (for testing) # @exception ESessionDenied Raised when the SessionManager rejects # creating the session because the request is not valid. # In other words, the username or password are incorrect # or there is some other aspect of the request which is # not acceptable to the SessionManager. def create(self, user, password=None): if not isinstance(user, User): if properties.get_boolean('PAM_ENABLE'): authenticator = self.user_manager.user_from_pam else: authenticator = self.user_manager.user_from_cleartext try: user = authenticator(user, password) except EAuthenticationFailed: raise ESessionDenied("User credentials invalid.") self._begin_critical_section() try: sid = self._next_session_id() username = None if isinstance(user, User): username = user.name if isinstance(user, _User): username = user.name() self._sessions[sid] = Session(session_id=sid, ttl=self.ttl, username=username, password=password) finally: self._end_critical_section() return sid ## # Immediately invalidate a session. # @param session_id The string that identifies the session to invalidate. def destroy(self, session_id): self._begin_critical_section() try: removed = self._sessions.pop(session_id) del self._sessions[session_id] except KeyError: removed = False finally: self._end_critical_section() return removed ## # Checks if a session_id is in the list of valid sessions. # @param session_id The string that identifies the session. # @param touch If true, and the session exists, then the session's # last_access time will be updated. # @return True if the session_id is currently valid. # @note The implementation assumes that if a session_id is in the list of # managed sessions, then it is valid. It is the responsibility of # the "auto collection" mechanism to remove expired sessions in a # timely fashon. def validate(self, session_id, touch=0): self._begin_critical_section() try: session = self._sessions.get(session_id) if session and session.valid(): if touch: session.touch() valid = True else: valid = False finally: self._end_critical_section() return valid ## # Scan all managed sessions for expired sessions. # @return The number of expired sessions invalidated by this invocation. def collect(self): sessions = self._sessions self._begin_critical_section() try: sids = [sid for sid, ses in sessions.items() if not ses.valid()] finally: self._end_critical_section() for sid in sids: self.destroy(sid) return sids ## # Look up the user associated with a session # @return A string representing the user associated with this session. def get_user_from_sid(self, sid): user = None session = self._sessions.get(str(sid)) if session: try: user = self.user_manager.get_user(session.username) except: msglog.exception(prefix="handled") return user ## # Look up the user associated with a session # @return True or False according to the user existence in session. def is_user_active(self, username): for sid in self._sessions: if self._sessions[sid].username == username: return True return False
class EquipmentMonitor(CompositeNode): implements(IEquipmentMonitor) def __init__(self, *args): self.test_machines = [] self.synclock = RLock() self.threadcount = 1 self.formatter = None self.transporter = None self.smservice = None self.subscriptions = None self.running = Flag() self.work_threads = [] self.work_queue = Queue() self.scheduling_lock = Lock() self.execution_groups = Dictionary() self.smnodeurl = '/services/Subscription Manager' super(EquipmentMonitor, self).__init__(*args) def configure(self, config): self.smnodeurl = config.get('subscription_manager', self.smnodeurl) self.threadcount = int(config.get('threadcount', self.threadcount)) super(EquipmentMonitor, self).configure(config) def configuration(self): config = super(EquipmentMonitor, self).configuration() config['subscription_manager'] = self.smnodeurl config['threadcount'] = str(self.threadcount) return config def start(self): if self.is_running(): raise TypeError("Equipment Monitor already running.") if TESTING and not self.test_machines: self.test_machines = setup_machines() machinecount = len(self.test_machines) self.debugout("Setup %d test machines" % machinecount) self.synclock.acquire() try: self.running.set() if self.subscriptions and not self.subscriptions.closed(): self.subscriptions.close() self.formatter = None self.transporter = None children = self.children_nodes() for childnode in children: if IFormatter.providedBy(childnode): if self.formatter is not None: raise TypeError("Already has formatter child.") self.formatter = childnode if ITransporter.providedBy(childnode): if self.transporter is not None: raise TypeError("Already has transporter child.") self.transporter = childnode if not self.formatter: raise TypeError("Must have one formatter child node.") if not self.transporter: raise TypeError("Must have one transporter child node.") self.smservice = as_node(self.smnodeurl) self.subscriptions = PersistentDictionary( self.name, encode=self.serialize_subscription, decode=self.unserialize_subscription) pdodata = PersistentDataObject(self) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.WARN, "Equipment Monitor upgrading persistence.") migrate = frompdo(pdodata) self.subscriptions.update(migrate) message = "Equipment Monitor merged %d subscriptions." message = message % len(migrate) msglog.log('broadway', msglog.types.INFO, message) pdodata.destroy() msglog.log('broadway', msglog.types.WARN, "Equipment Monitor destroyed old persistence.") msglog.log('broadway', msglog.types.INFO, "Equipment Monitor persistence upgrade complete.") del(pdodata) message = 'Equipment Monitor startup: %s %s' for subscription in self.subscriptions.values(): try: subscription.setup_subscription() except: msglog.exception(prefix="handled") else: self.debugout(message % ('setup', subscription)) skipcounts = [] for i in range(0, 1 + len(self.subscriptions) / 30): skipcounts.extend([i + 1] * 30) self.setup_work_threads() for subscription in self.subscriptions.values(): try: subscription.start(skipcounts.pop()) except: msglog.exception(prefix = "Handled") else: self.debugout(message % ('started', subscription)) except: self.cleanup_resources() self.running.clear() raise finally: self.synclock.release() super(EquipmentMonitor, self).start() def stop(self): if not self.is_running(): raise TypeError('Equipment Monitor not running.') self.synclock.acquire() try: self.running.clear() message = "Equipment Monitor shutdown: %s %s" for subscription in self.subscriptions.values(): try: subscription.stop() except: msglog.exception(prefix='Handled') else: self.debugout(message % ('stopped', subscription)) self.teardown_work_threads() except: message = "Exception caused Eqiupment Monitor shutdown to fail." msglog.log('broadway', msglog.types.ERR, message) self.running.set() raise else: self.cleanup_resources() finally: self.synclock.release() super(EquipmentMonitor, self).stop() def get_subscription(self, sid, default = None): return self.subscriptions.get(sid, default) def get_subscription_manager(self): return self.smservice def get_formatter(self): return self.formatter def get_transporter(self): return self.transporter def schedule_subscription(self, subscription, timestamp): self.scheduling_lock.acquire() try: schedulegroup = self.execution_groups.get(timestamp) if schedulegroup is None: schedulegroup = SubscriptionGroup(self, timestamp) self.execution_groups[timestamp] = schedulegroup schedulegroup.scheduled = scheduler.at( timestamp, schedulegroup.execute) schedentry = schedulegroup.add_subscription(subscription) finally: self.scheduling_lock.release() return schedentry def enqueue_work(self, callback, *args): self.work_queue.put((callback, args)) def dequeue_work(self, blocking = True): return self.work_queue.get(blocking) def is_running(self): return self.running.isSet() def assert_running(self): if not self.is_running(): raise TypeError('Service must be running.') return def create_pushed(self, target, node_table, period=2, retries=10): self.assert_running() pushed = PushedSubscription(self, target, node_table, period, retries) sid = pushed.setup_subscription() self.subscriptions[sid] = pushed message = ['Equipment Monitor created subscription: '] message.append('Target URL: %s' % target) message.append('Period: %d sec' % period) message.append('Subscription ID: %s' % sid) if isinstance(node_table, str): message.append('Subscription for children of: %s' % node_table) else: firstthree = node_table.items()[0:3] message.append('Number of nodes: %d' % len(node_table)) message.append('First three nodes: %s' % (firstthree,)) self.debugout('\n '.join(message), 2) pushed.start(1) return sid def cancel(self, sid): self.assert_running() if self.pause(sid): subscription = self.subscriptions.pop(sid) message = 'Equipment Monitor cancelled subscription: "%s"' self.debugout(message % sid, 2) return True return False def pause(self, sid, delay = None): subscription = self.subscriptions.get(sid) if subscription and subscription.is_running(): subscription.stop() return True else: return False def play(self, sid): self.assert_running() subscription = self.subscriptions[sid] if not subscription.is_running(): subscription.start() return True else: return False def reset(self, sid): subscription = self.subscriptions.get(sid) if subscription: subscription.reset_subscription() return True else: return False def list_subscriptions(self): return self.subscriptions.keys() def notify_group_executed(self, group): self.scheduling_lock.acquire() try: self.execution_groups.pop(group.timestamp) finally: self.scheduling_lock.release() def cleanup_resources(self): self.synclock.acquire() try: for group in self.execution_groups: try: group.scheduled.cancel() except: msglog.exception(prefix="handled") self.execution_groups.clear() try: while self.work_queue.get_nowait(): pass except Empty: pass if self.transporter: commonitor = self.transporter.monitor transmanager = self.transporter.transaction_manager try: commonitor.shutdown_channels() except: msglog.exception(prefix="handled") transmanager.controllers.clear() if self.subscriptions and not self.subscriptions.closed(): self.subscriptions.close() self.subscriptions = None self.transporter = None self.formatter = None finally: self.synclock.release() def setup_work_threads(self): assert self.is_running() assert len(self.work_threads) == 0 while len(self.work_threads) < self.threadcount: monitor = WorkThread(self.is_running, self.dequeue_work) monitor.setDaemon(True) monitor.start() self.work_threads.append(monitor) return len(self.work_threads) def teardown_work_threads(self): assert not self.is_running() threadcount = len(self.work_threads) map(self.work_queue.put, [None] * threadcount) while self.work_threads: self.work_threads.pop().join() return threadcount def serialize_subscription(self, subscription): return repr(subscription.as_dictionary()) def unserialize_subscription(self, data): return PushedSubscription.from_dictionary(eval(data)) def debugout(self, dbmessage, dblevel = 1): if dblevel <= DEBUG: msglog.log('broadway', msglog.types.DB, dbmessage)
class EquipmentMonitor(CompositeNode): implements(IEquipmentMonitor) def __init__(self, *args): self.test_machines = [] self.synclock = RLock() self.threadcount = 1 self.formatter = None self.transporter = None self.smservice = None self.subscriptions = None self.running = Flag() self.work_threads = [] self.work_queue = Queue() self.scheduling_lock = Lock() self.execution_groups = Dictionary() self.smnodeurl = '/services/Subscription Manager' super(EquipmentMonitor, self).__init__(*args) def configure(self, config): self.smnodeurl = config.get('subscription_manager', self.smnodeurl) self.threadcount = int(config.get('threadcount', self.threadcount)) super(EquipmentMonitor, self).configure(config) def configuration(self): config = super(EquipmentMonitor, self).configuration() config['subscription_manager'] = self.smnodeurl config['threadcount'] = str(self.threadcount) return config def start(self): if self.is_running(): raise TypeError("Equipment Monitor already running.") if TESTING and not self.test_machines: self.test_machines = setup_machines() machinecount = len(self.test_machines) self.debugout("Setup %d test machines" % machinecount) self.synclock.acquire() try: self.running.set() if self.subscriptions and not self.subscriptions.closed(): self.subscriptions.close() self.formatter = None self.transporter = None children = self.children_nodes() for childnode in children: if IFormatter.providedBy(childnode): if self.formatter is not None: raise TypeError("Already has formatter child.") self.formatter = childnode if ITransporter.providedBy(childnode): if self.transporter is not None: raise TypeError("Already has transporter child.") self.transporter = childnode if not self.formatter: raise TypeError("Must have one formatter child node.") if not self.transporter: raise TypeError("Must have one transporter child node.") self.smservice = as_node(self.smnodeurl) self.subscriptions = PersistentDictionary( self.name, encode=self.serialize_subscription, decode=self.unserialize_subscription) pdodata = PersistentDataObject(self) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.WARN, "Equipment Monitor upgrading persistence.") migrate = frompdo(pdodata) self.subscriptions.update(migrate) message = "Equipment Monitor merged %d subscriptions." message = message % len(migrate) msglog.log('broadway', msglog.types.INFO, message) pdodata.destroy() msglog.log('broadway', msglog.types.WARN, "Equipment Monitor destroyed old persistence.") msglog.log('broadway', msglog.types.INFO, "Equipment Monitor persistence upgrade complete.") del (pdodata) message = 'Equipment Monitor startup: %s %s' for subscription in self.subscriptions.values(): try: subscription.setup_subscription() except: msglog.exception(prefix="handled") else: self.debugout(message % ('setup', subscription)) skipcounts = [] for i in range(0, 1 + len(self.subscriptions) / 30): skipcounts.extend([i + 1] * 30) self.setup_work_threads() for subscription in self.subscriptions.values(): try: subscription.start(skipcounts.pop()) except: msglog.exception(prefix="Handled") else: self.debugout(message % ('started', subscription)) except: self.cleanup_resources() self.running.clear() raise finally: self.synclock.release() super(EquipmentMonitor, self).start() def stop(self): if not self.is_running(): raise TypeError('Equipment Monitor not running.') self.synclock.acquire() try: self.running.clear() message = "Equipment Monitor shutdown: %s %s" for subscription in self.subscriptions.values(): try: subscription.stop() except: msglog.exception(prefix='Handled') else: self.debugout(message % ('stopped', subscription)) self.teardown_work_threads() except: message = "Exception caused Eqiupment Monitor shutdown to fail." msglog.log('broadway', msglog.types.ERR, message) self.running.set() raise else: self.cleanup_resources() finally: self.synclock.release() super(EquipmentMonitor, self).stop() def get_subscription(self, sid, default=None): return self.subscriptions.get(sid, default) def get_subscription_manager(self): return self.smservice def get_formatter(self): return self.formatter def get_transporter(self): return self.transporter def schedule_subscription(self, subscription, timestamp): self.scheduling_lock.acquire() try: schedulegroup = self.execution_groups.get(timestamp) if schedulegroup is None: schedulegroup = SubscriptionGroup(self, timestamp) self.execution_groups[timestamp] = schedulegroup schedulegroup.scheduled = scheduler.at(timestamp, schedulegroup.execute) schedentry = schedulegroup.add_subscription(subscription) finally: self.scheduling_lock.release() return schedentry def enqueue_work(self, callback, *args): self.work_queue.put((callback, args)) def dequeue_work(self, blocking=True): return self.work_queue.get(blocking) def is_running(self): return self.running.isSet() def assert_running(self): if not self.is_running(): raise TypeError('Service must be running.') return def create_pushed(self, target, node_table, period=2, retries=10): self.assert_running() pushed = PushedSubscription(self, target, node_table, period, retries) sid = pushed.setup_subscription() self.subscriptions[sid] = pushed message = ['Equipment Monitor created subscription: '] message.append('Target URL: %s' % target) message.append('Period: %d sec' % period) message.append('Subscription ID: %s' % sid) if isinstance(node_table, str): message.append('Subscription for children of: %s' % node_table) else: firstthree = node_table.items()[0:3] message.append('Number of nodes: %d' % len(node_table)) message.append('First three nodes: %s' % (firstthree, )) self.debugout('\n '.join(message), 2) pushed.start(1) return sid def cancel(self, sid): self.assert_running() if self.pause(sid): subscription = self.subscriptions.pop(sid) message = 'Equipment Monitor cancelled subscription: "%s"' self.debugout(message % sid, 2) return True return False def pause(self, sid, delay=None): subscription = self.subscriptions.get(sid) if subscription and subscription.is_running(): subscription.stop() return True else: return False def play(self, sid): self.assert_running() subscription = self.subscriptions[sid] if not subscription.is_running(): subscription.start() return True else: return False def reset(self, sid): subscription = self.subscriptions.get(sid) if subscription: subscription.reset_subscription() return True else: return False def list_subscriptions(self): return self.subscriptions.keys() def notify_group_executed(self, group): self.scheduling_lock.acquire() try: self.execution_groups.pop(group.timestamp) finally: self.scheduling_lock.release() def cleanup_resources(self): self.synclock.acquire() try: for group in self.execution_groups: try: group.scheduled.cancel() except: msglog.exception(prefix="handled") self.execution_groups.clear() try: while self.work_queue.get_nowait(): pass except Empty: pass if self.transporter: commonitor = self.transporter.monitor transmanager = self.transporter.transaction_manager try: commonitor.shutdown_channels() except: msglog.exception(prefix="handled") transmanager.controllers.clear() if self.subscriptions and not self.subscriptions.closed(): self.subscriptions.close() self.subscriptions = None self.transporter = None self.formatter = None finally: self.synclock.release() def setup_work_threads(self): assert self.is_running() assert len(self.work_threads) == 0 while len(self.work_threads) < self.threadcount: monitor = WorkThread(self.is_running, self.dequeue_work) monitor.setDaemon(True) monitor.start() self.work_threads.append(monitor) return len(self.work_threads) def teardown_work_threads(self): assert not self.is_running() threadcount = len(self.work_threads) map(self.work_queue.put, [None] * threadcount) while self.work_threads: self.work_threads.pop().join() return threadcount def serialize_subscription(self, subscription): return repr(subscription.as_dictionary()) def unserialize_subscription(self, data): return PushedSubscription.from_dictionary(eval(data)) def debugout(self, dbmessage, dblevel=1): if dblevel <= DEBUG: msglog.log('broadway', msglog.types.DB, dbmessage)
class NodeConfigurator(CompositeNode): def __init__(self, *args, **kw): self.nodes = None super(NodeConfigurator, self).__init__(*args, **kw) def start(self): if self.nodes is None: dictname = "%s (%s)" % (type(self).__name__, self.name) self.nodes = PersistentDictionary(dictname) nodeurls = self.nodes.keys() nodeurls.sort(pathcompare) for nodeurl in nodeurls: nodedata = self.nodes[nodeurl] factory, configuration = nodedata self.create_node(factory, nodeurl, **configuration) super(NodeConfigurator, self).start() def get_managed_node(self, nodeurl): if not self.nodes.has_key(nodeurl): raise TypeError("cannot manipulate unmanaged node: %s" % nodeurl) return as_node(nodeurl) def node_children(self, nodeurl): node = self.get_managed_node(nodeurl) return node.children_names() def node_configuration(self, nodeurl): node = self.get_managed_node(nodeurl) return node.configuration() def start_node(self, nodeurl): node = self.get_managed_node(nodeurl) node.start() def stop_node(self, nodeurl): node = self.get_managed_node(nodeurl) node.stop() def node_attr(self, nodeurl, name, value=Undefined): node = self.get_managed_node(nodeurl) if value is not Undefined: setattr(node, name, value) self.updatepdo(nodeurl, node) return getattr(node, name) def configure_node(self, nodeurl, config): node = self.get_managed_node(nodeurl) node.stop() try: node.configure(config) except: msglog.log("broadway", msglog.types.WARN, "Error prevented reconfiguration of node: %s" % node) msglog.exception(prefix="handled") msglog.log("broadway", msglog.types.WARN, "Rolling back configuration.") try: node.configure(self.nodes[nodeurl]) except: msglog.log("broadway", msglog.types.WARN, "Configuration rollback failed.") msglog.exception(prefix="handled") else: msglog.log("broadway", msglog.types.INFO, "Rollback of configuration succeeded.") else: msglog.log("broadway", msglog.types.INFO, "Node reconfigured: %s" % node) self.updatepdo(nodeurl, node) finally: node.start() return node.configuration() def create_node(self, factory, nodeurl, **config): try: as_node(nodeurl) except KeyError: pass else: raise TypeError("Node exists: %s" % nodeurl) if isinstance(factory, str): module, sep, name = factory.rpartition(".") if name: exec("import %s" % module) factory = eval(factory) parent, sep, name = nodeurl.rpartition("/") configuration = {"name": name, "parent": parent} configuration.update(config) node = factory() try: node.configure(configuration) except: msglog.log("broadway", msglog.types.WARN, "Error prevented configuration of new node: %s" % node) msglog.exception(prefix="handled") try: node.prune() except: msglog.exception(prefix="handled") else: msglog.log("broadway", msglog.types.INFO, "Node successfully pruned.") else: msglog.log("broadway", msglog.types.INFO, "New node created: %s" % node) self.updatepdo(nodeurl, node) node.start() return node.configuration() def remove_node(self, nodeurl): node = self.get_managed_node(nodeurl) node.prune() self.updatepdo(nodeurl, None) def updatepdo(self, nodeurl, node): if self.nodes.has_key(nodeurl): self.nodes.pop(nodeurl) if node: node = as_node(node) nodeurl = as_node_url(node) datatype = type(node) factory = "%s.%s" % (datatype.__module__, datatype.__name__) data = (factory, node.configuration()) self.nodes[nodeurl] = (factory, node.configuration()) return nodeurl