class OverrideManager(object): def __init__(self): self._priority_arrays = PersistentDictionary('OverrideManager') def get_array(self, nodepath): pa = self._priority_arrays.get(nodepath) if pa is None: pa = {'1':None, '2':None, '3':None, '4':None, '5':None, '6':None, '7':None, '8':None, '9':None, '10':None, '11':None, '12':None, '13':None, '14':None, '15':None, '16':None, '17':None, '18':None} self._priority_arrays[nodepath] = pa return pa def notify_changed(self, nodepath, priority_array=None): if priority_array is not None: self._priority_arrays[nodepath] = priority_array self._priority_arrays.notify_changed(nodepath) def singleton_unload_hook(self): pass
class SessionManager(ServiceNode): import string IDCHARS = string.ascii_letters + string.digits NCHARS = len(IDCHARS) IDLEN = 20 ETC_DIR=properties.ETC_DIR def __init__(self): self.ttl = 3600 self._lock = Lock() self._sessions = None self._scheduled = None self.user_manager = None ServiceNode.__init__(self) def _begin_critical_section(self): self._lock.acquire() def _end_critical_section(self): self._lock.release() def _random_id(self): return str(UUID()) def _next_session_id(self): sid = self._random_id() while self._sessions.has_key(sid): sid = self._random_id() return sid def start(self): self._begin_critical_section() try: if self._sessions is None: self._sessions = PersistentDictionary(self.name, encode=Session.encode, decode=Session.decode) if not self._scheduled: self._scheduled = scheduler.every(self.ttl, self.collect) finally: self._end_critical_section() self.user_manager = as_node("/services/User Manager") return ServiceNode.start(self) def stop(self): self._begin_critical_section() try: if self._scheduled: self._scheduled.cancel() self._scheduled = None self._sessions = None finally: self._end_critical_section() self.user_manager = None return ServiceNode.stop(self) def configure(self, cd): ServiceNode.configure(self, cd) set_attribute(self, 'ttl', self.ttl, cd, float) self.enabled = 1 def configuration(self): cd = ServiceNode.configuration(self) get_attribute(self, 'ttl', cd, str) return cd ## # @fixme Use mpx.lib.security.User, as soon as it exists. # @fixme Cache mediator users... # @param nocheck Since we do not use shadow pass, the check can fail # This is used to allow for bypass of check (for testing) # @exception ESessionDenied Raised when the SessionManager rejects # creating the session because the request is not valid. # In other words, the username or password are incorrect # or there is some other aspect of the request which is # not acceptable to the SessionManager. def create(self, user, password=None): if not isinstance(user, User): if properties.get_boolean('PAM_ENABLE'): authenticator = self.user_manager.user_from_pam else: authenticator = self.user_manager.user_from_cleartext try: user = authenticator(user, password) except EAuthenticationFailed: raise ESessionDenied("User credentials invalid.") self._begin_critical_section() try: sid = self._next_session_id() username = None if isinstance(user, User): username = user.name if isinstance(user, _User): username = user.name() self._sessions[sid] = Session(session_id=sid, ttl=self.ttl, username=username, password=password) finally: self._end_critical_section() return sid ## # Immediately invalidate a session. # @param session_id The string that identifies the session to invalidate. def destroy(self, session_id): self._begin_critical_section() try: removed = self._sessions.pop(session_id) del self._sessions[session_id] except KeyError: removed = False finally: self._end_critical_section() return removed ## # Checks if a session_id is in the list of valid sessions. # @param session_id The string that identifies the session. # @param touch If true, and the session exists, then the session's # last_access time will be updated. # @return True if the session_id is currently valid. # @note The implementation assumes that if a session_id is in the list of # managed sessions, then it is valid. It is the responsibility of # the "auto collection" mechanism to remove expired sessions in a # timely fashon. def validate(self, session_id, touch=0): self._begin_critical_section() try: session = self._sessions.get(session_id) if session and session.valid(): if touch: session.touch() valid = True else: valid = False finally: self._end_critical_section() return valid ## # Scan all managed sessions for expired sessions. # @return The number of expired sessions invalidated by this invocation. def collect(self): sessions = self._sessions self._begin_critical_section() try: sids = [sid for sid,ses in sessions.items() if not ses.valid()] finally: self._end_critical_section() for sid in sids: self.destroy(sid) return sids ## # Look up the user associated with a session # @return A string representing the user associated with this session. def get_user_from_sid(self, sid): user = None session = self._sessions.get(str(sid)) if session: try: user = self.user_manager.get_user(session.username) except: msglog.exception(prefix="handled") return user ## # Look up the user associated with a session # @return True or False according to the user existence in session. def is_user_active(self, username): for sid in self._sessions: if self._sessions[sid].username == username: return True return False
class EquipmentMonitor(CompositeNode): implements(IEquipmentMonitor) def __init__(self, *args): self.test_machines = [] self.synclock = RLock() self.threadcount = 1 self.formatter = None self.transporter = None self.smservice = None self.subscriptions = None self.running = Flag() self.work_threads = [] self.work_queue = Queue() self.scheduling_lock = Lock() self.execution_groups = Dictionary() self.smnodeurl = '/services/Subscription Manager' super(EquipmentMonitor, self).__init__(*args) def configure(self, config): self.smnodeurl = config.get('subscription_manager', self.smnodeurl) self.threadcount = int(config.get('threadcount', self.threadcount)) super(EquipmentMonitor, self).configure(config) def configuration(self): config = super(EquipmentMonitor, self).configuration() config['subscription_manager'] = self.smnodeurl config['threadcount'] = str(self.threadcount) return config def start(self): if self.is_running(): raise TypeError("Equipment Monitor already running.") if TESTING and not self.test_machines: self.test_machines = setup_machines() machinecount = len(self.test_machines) self.debugout("Setup %d test machines" % machinecount) self.synclock.acquire() try: self.running.set() if self.subscriptions and not self.subscriptions.closed(): self.subscriptions.close() self.formatter = None self.transporter = None children = self.children_nodes() for childnode in children: if IFormatter.providedBy(childnode): if self.formatter is not None: raise TypeError("Already has formatter child.") self.formatter = childnode if ITransporter.providedBy(childnode): if self.transporter is not None: raise TypeError("Already has transporter child.") self.transporter = childnode if not self.formatter: raise TypeError("Must have one formatter child node.") if not self.transporter: raise TypeError("Must have one transporter child node.") self.smservice = as_node(self.smnodeurl) self.subscriptions = PersistentDictionary( self.name, encode=self.serialize_subscription, decode=self.unserialize_subscription) pdodata = PersistentDataObject(self) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.WARN, "Equipment Monitor upgrading persistence.") migrate = frompdo(pdodata) self.subscriptions.update(migrate) message = "Equipment Monitor merged %d subscriptions." message = message % len(migrate) msglog.log('broadway', msglog.types.INFO, message) pdodata.destroy() msglog.log('broadway', msglog.types.WARN, "Equipment Monitor destroyed old persistence.") msglog.log('broadway', msglog.types.INFO, "Equipment Monitor persistence upgrade complete.") del(pdodata) message = 'Equipment Monitor startup: %s %s' for subscription in self.subscriptions.values(): try: subscription.setup_subscription() except: msglog.exception(prefix="handled") else: self.debugout(message % ('setup', subscription)) skipcounts = [] for i in range(0, 1 + len(self.subscriptions) / 30): skipcounts.extend([i + 1] * 30) self.setup_work_threads() for subscription in self.subscriptions.values(): try: subscription.start(skipcounts.pop()) except: msglog.exception(prefix = "Handled") else: self.debugout(message % ('started', subscription)) except: self.cleanup_resources() self.running.clear() raise finally: self.synclock.release() super(EquipmentMonitor, self).start() def stop(self): if not self.is_running(): raise TypeError('Equipment Monitor not running.') self.synclock.acquire() try: self.running.clear() message = "Equipment Monitor shutdown: %s %s" for subscription in self.subscriptions.values(): try: subscription.stop() except: msglog.exception(prefix='Handled') else: self.debugout(message % ('stopped', subscription)) self.teardown_work_threads() except: message = "Exception caused Eqiupment Monitor shutdown to fail." msglog.log('broadway', msglog.types.ERR, message) self.running.set() raise else: self.cleanup_resources() finally: self.synclock.release() super(EquipmentMonitor, self).stop() def get_subscription(self, sid, default = None): return self.subscriptions.get(sid, default) def get_subscription_manager(self): return self.smservice def get_formatter(self): return self.formatter def get_transporter(self): return self.transporter def schedule_subscription(self, subscription, timestamp): self.scheduling_lock.acquire() try: schedulegroup = self.execution_groups.get(timestamp) if schedulegroup is None: schedulegroup = SubscriptionGroup(self, timestamp) self.execution_groups[timestamp] = schedulegroup schedulegroup.scheduled = scheduler.at( timestamp, schedulegroup.execute) schedentry = schedulegroup.add_subscription(subscription) finally: self.scheduling_lock.release() return schedentry def enqueue_work(self, callback, *args): self.work_queue.put((callback, args)) def dequeue_work(self, blocking = True): return self.work_queue.get(blocking) def is_running(self): return self.running.isSet() def assert_running(self): if not self.is_running(): raise TypeError('Service must be running.') return def create_pushed(self, target, node_table, period=2, retries=10): self.assert_running() pushed = PushedSubscription(self, target, node_table, period, retries) sid = pushed.setup_subscription() self.subscriptions[sid] = pushed message = ['Equipment Monitor created subscription: '] message.append('Target URL: %s' % target) message.append('Period: %d sec' % period) message.append('Subscription ID: %s' % sid) if isinstance(node_table, str): message.append('Subscription for children of: %s' % node_table) else: firstthree = node_table.items()[0:3] message.append('Number of nodes: %d' % len(node_table)) message.append('First three nodes: %s' % (firstthree,)) self.debugout('\n '.join(message), 2) pushed.start(1) return sid def cancel(self, sid): self.assert_running() if self.pause(sid): subscription = self.subscriptions.pop(sid) message = 'Equipment Monitor cancelled subscription: "%s"' self.debugout(message % sid, 2) return True return False def pause(self, sid, delay = None): subscription = self.subscriptions.get(sid) if subscription and subscription.is_running(): subscription.stop() return True else: return False def play(self, sid): self.assert_running() subscription = self.subscriptions[sid] if not subscription.is_running(): subscription.start() return True else: return False def reset(self, sid): subscription = self.subscriptions.get(sid) if subscription: subscription.reset_subscription() return True else: return False def list_subscriptions(self): return self.subscriptions.keys() def notify_group_executed(self, group): self.scheduling_lock.acquire() try: self.execution_groups.pop(group.timestamp) finally: self.scheduling_lock.release() def cleanup_resources(self): self.synclock.acquire() try: for group in self.execution_groups: try: group.scheduled.cancel() except: msglog.exception(prefix="handled") self.execution_groups.clear() try: while self.work_queue.get_nowait(): pass except Empty: pass if self.transporter: commonitor = self.transporter.monitor transmanager = self.transporter.transaction_manager try: commonitor.shutdown_channels() except: msglog.exception(prefix="handled") transmanager.controllers.clear() if self.subscriptions and not self.subscriptions.closed(): self.subscriptions.close() self.subscriptions = None self.transporter = None self.formatter = None finally: self.synclock.release() def setup_work_threads(self): assert self.is_running() assert len(self.work_threads) == 0 while len(self.work_threads) < self.threadcount: monitor = WorkThread(self.is_running, self.dequeue_work) monitor.setDaemon(True) monitor.start() self.work_threads.append(monitor) return len(self.work_threads) def teardown_work_threads(self): assert not self.is_running() threadcount = len(self.work_threads) map(self.work_queue.put, [None] * threadcount) while self.work_threads: self.work_threads.pop().join() return threadcount def serialize_subscription(self, subscription): return repr(subscription.as_dictionary()) def unserialize_subscription(self, data): return PushedSubscription.from_dictionary(eval(data)) def debugout(self, dbmessage, dblevel = 1): if dblevel <= DEBUG: msglog.log('broadway', msglog.types.DB, dbmessage)
class SessionManager(ServiceNode): import string IDCHARS = string.ascii_letters + string.digits NCHARS = len(IDCHARS) IDLEN = 20 ETC_DIR = properties.ETC_DIR def __init__(self): self.ttl = 3600 self._lock = Lock() self._sessions = None self._scheduled = None self.user_manager = None ServiceNode.__init__(self) def _begin_critical_section(self): self._lock.acquire() def _end_critical_section(self): self._lock.release() def _random_id(self): return str(UUID()) def _next_session_id(self): sid = self._random_id() while self._sessions.has_key(sid): sid = self._random_id() return sid def start(self): self._begin_critical_section() try: if self._sessions is None: self._sessions = PersistentDictionary(self.name, encode=Session.encode, decode=Session.decode) if not self._scheduled: self._scheduled = scheduler.every(self.ttl, self.collect) finally: self._end_critical_section() self.user_manager = as_node("/services/User Manager") return ServiceNode.start(self) def stop(self): self._begin_critical_section() try: if self._scheduled: self._scheduled.cancel() self._scheduled = None self._sessions = None finally: self._end_critical_section() self.user_manager = None return ServiceNode.stop(self) def configure(self, cd): ServiceNode.configure(self, cd) set_attribute(self, 'ttl', self.ttl, cd, float) self.enabled = 1 def configuration(self): cd = ServiceNode.configuration(self) get_attribute(self, 'ttl', cd, str) return cd ## # @fixme Use mpx.lib.security.User, as soon as it exists. # @fixme Cache mediator users... # @param nocheck Since we do not use shadow pass, the check can fail # This is used to allow for bypass of check (for testing) # @exception ESessionDenied Raised when the SessionManager rejects # creating the session because the request is not valid. # In other words, the username or password are incorrect # or there is some other aspect of the request which is # not acceptable to the SessionManager. def create(self, user, password=None): if not isinstance(user, User): if properties.get_boolean('PAM_ENABLE'): authenticator = self.user_manager.user_from_pam else: authenticator = self.user_manager.user_from_cleartext try: user = authenticator(user, password) except EAuthenticationFailed: raise ESessionDenied("User credentials invalid.") self._begin_critical_section() try: sid = self._next_session_id() username = None if isinstance(user, User): username = user.name if isinstance(user, _User): username = user.name() self._sessions[sid] = Session(session_id=sid, ttl=self.ttl, username=username, password=password) finally: self._end_critical_section() return sid ## # Immediately invalidate a session. # @param session_id The string that identifies the session to invalidate. def destroy(self, session_id): self._begin_critical_section() try: removed = self._sessions.pop(session_id) del self._sessions[session_id] except KeyError: removed = False finally: self._end_critical_section() return removed ## # Checks if a session_id is in the list of valid sessions. # @param session_id The string that identifies the session. # @param touch If true, and the session exists, then the session's # last_access time will be updated. # @return True if the session_id is currently valid. # @note The implementation assumes that if a session_id is in the list of # managed sessions, then it is valid. It is the responsibility of # the "auto collection" mechanism to remove expired sessions in a # timely fashon. def validate(self, session_id, touch=0): self._begin_critical_section() try: session = self._sessions.get(session_id) if session and session.valid(): if touch: session.touch() valid = True else: valid = False finally: self._end_critical_section() return valid ## # Scan all managed sessions for expired sessions. # @return The number of expired sessions invalidated by this invocation. def collect(self): sessions = self._sessions self._begin_critical_section() try: sids = [sid for sid, ses in sessions.items() if not ses.valid()] finally: self._end_critical_section() for sid in sids: self.destroy(sid) return sids ## # Look up the user associated with a session # @return A string representing the user associated with this session. def get_user_from_sid(self, sid): user = None session = self._sessions.get(str(sid)) if session: try: user = self.user_manager.get_user(session.username) except: msglog.exception(prefix="handled") return user ## # Look up the user associated with a session # @return True or False according to the user existence in session. def is_user_active(self, username): for sid in self._sessions: if self._sessions[sid].username == username: return True return False
class EquipmentMonitor(CompositeNode): implements(IEquipmentMonitor) def __init__(self, *args): self.test_machines = [] self.synclock = RLock() self.threadcount = 1 self.formatter = None self.transporter = None self.smservice = None self.subscriptions = None self.running = Flag() self.work_threads = [] self.work_queue = Queue() self.scheduling_lock = Lock() self.execution_groups = Dictionary() self.smnodeurl = '/services/Subscription Manager' super(EquipmentMonitor, self).__init__(*args) def configure(self, config): self.smnodeurl = config.get('subscription_manager', self.smnodeurl) self.threadcount = int(config.get('threadcount', self.threadcount)) super(EquipmentMonitor, self).configure(config) def configuration(self): config = super(EquipmentMonitor, self).configuration() config['subscription_manager'] = self.smnodeurl config['threadcount'] = str(self.threadcount) return config def start(self): if self.is_running(): raise TypeError("Equipment Monitor already running.") if TESTING and not self.test_machines: self.test_machines = setup_machines() machinecount = len(self.test_machines) self.debugout("Setup %d test machines" % machinecount) self.synclock.acquire() try: self.running.set() if self.subscriptions and not self.subscriptions.closed(): self.subscriptions.close() self.formatter = None self.transporter = None children = self.children_nodes() for childnode in children: if IFormatter.providedBy(childnode): if self.formatter is not None: raise TypeError("Already has formatter child.") self.formatter = childnode if ITransporter.providedBy(childnode): if self.transporter is not None: raise TypeError("Already has transporter child.") self.transporter = childnode if not self.formatter: raise TypeError("Must have one formatter child node.") if not self.transporter: raise TypeError("Must have one transporter child node.") self.smservice = as_node(self.smnodeurl) self.subscriptions = PersistentDictionary( self.name, encode=self.serialize_subscription, decode=self.unserialize_subscription) pdodata = PersistentDataObject(self) if os.path.exists(pdodata.filename()): msglog.log('broadway', msglog.types.WARN, "Equipment Monitor upgrading persistence.") migrate = frompdo(pdodata) self.subscriptions.update(migrate) message = "Equipment Monitor merged %d subscriptions." message = message % len(migrate) msglog.log('broadway', msglog.types.INFO, message) pdodata.destroy() msglog.log('broadway', msglog.types.WARN, "Equipment Monitor destroyed old persistence.") msglog.log('broadway', msglog.types.INFO, "Equipment Monitor persistence upgrade complete.") del (pdodata) message = 'Equipment Monitor startup: %s %s' for subscription in self.subscriptions.values(): try: subscription.setup_subscription() except: msglog.exception(prefix="handled") else: self.debugout(message % ('setup', subscription)) skipcounts = [] for i in range(0, 1 + len(self.subscriptions) / 30): skipcounts.extend([i + 1] * 30) self.setup_work_threads() for subscription in self.subscriptions.values(): try: subscription.start(skipcounts.pop()) except: msglog.exception(prefix="Handled") else: self.debugout(message % ('started', subscription)) except: self.cleanup_resources() self.running.clear() raise finally: self.synclock.release() super(EquipmentMonitor, self).start() def stop(self): if not self.is_running(): raise TypeError('Equipment Monitor not running.') self.synclock.acquire() try: self.running.clear() message = "Equipment Monitor shutdown: %s %s" for subscription in self.subscriptions.values(): try: subscription.stop() except: msglog.exception(prefix='Handled') else: self.debugout(message % ('stopped', subscription)) self.teardown_work_threads() except: message = "Exception caused Eqiupment Monitor shutdown to fail." msglog.log('broadway', msglog.types.ERR, message) self.running.set() raise else: self.cleanup_resources() finally: self.synclock.release() super(EquipmentMonitor, self).stop() def get_subscription(self, sid, default=None): return self.subscriptions.get(sid, default) def get_subscription_manager(self): return self.smservice def get_formatter(self): return self.formatter def get_transporter(self): return self.transporter def schedule_subscription(self, subscription, timestamp): self.scheduling_lock.acquire() try: schedulegroup = self.execution_groups.get(timestamp) if schedulegroup is None: schedulegroup = SubscriptionGroup(self, timestamp) self.execution_groups[timestamp] = schedulegroup schedulegroup.scheduled = scheduler.at(timestamp, schedulegroup.execute) schedentry = schedulegroup.add_subscription(subscription) finally: self.scheduling_lock.release() return schedentry def enqueue_work(self, callback, *args): self.work_queue.put((callback, args)) def dequeue_work(self, blocking=True): return self.work_queue.get(blocking) def is_running(self): return self.running.isSet() def assert_running(self): if not self.is_running(): raise TypeError('Service must be running.') return def create_pushed(self, target, node_table, period=2, retries=10): self.assert_running() pushed = PushedSubscription(self, target, node_table, period, retries) sid = pushed.setup_subscription() self.subscriptions[sid] = pushed message = ['Equipment Monitor created subscription: '] message.append('Target URL: %s' % target) message.append('Period: %d sec' % period) message.append('Subscription ID: %s' % sid) if isinstance(node_table, str): message.append('Subscription for children of: %s' % node_table) else: firstthree = node_table.items()[0:3] message.append('Number of nodes: %d' % len(node_table)) message.append('First three nodes: %s' % (firstthree, )) self.debugout('\n '.join(message), 2) pushed.start(1) return sid def cancel(self, sid): self.assert_running() if self.pause(sid): subscription = self.subscriptions.pop(sid) message = 'Equipment Monitor cancelled subscription: "%s"' self.debugout(message % sid, 2) return True return False def pause(self, sid, delay=None): subscription = self.subscriptions.get(sid) if subscription and subscription.is_running(): subscription.stop() return True else: return False def play(self, sid): self.assert_running() subscription = self.subscriptions[sid] if not subscription.is_running(): subscription.start() return True else: return False def reset(self, sid): subscription = self.subscriptions.get(sid) if subscription: subscription.reset_subscription() return True else: return False def list_subscriptions(self): return self.subscriptions.keys() def notify_group_executed(self, group): self.scheduling_lock.acquire() try: self.execution_groups.pop(group.timestamp) finally: self.scheduling_lock.release() def cleanup_resources(self): self.synclock.acquire() try: for group in self.execution_groups: try: group.scheduled.cancel() except: msglog.exception(prefix="handled") self.execution_groups.clear() try: while self.work_queue.get_nowait(): pass except Empty: pass if self.transporter: commonitor = self.transporter.monitor transmanager = self.transporter.transaction_manager try: commonitor.shutdown_channels() except: msglog.exception(prefix="handled") transmanager.controllers.clear() if self.subscriptions and not self.subscriptions.closed(): self.subscriptions.close() self.subscriptions = None self.transporter = None self.formatter = None finally: self.synclock.release() def setup_work_threads(self): assert self.is_running() assert len(self.work_threads) == 0 while len(self.work_threads) < self.threadcount: monitor = WorkThread(self.is_running, self.dequeue_work) monitor.setDaemon(True) monitor.start() self.work_threads.append(monitor) return len(self.work_threads) def teardown_work_threads(self): assert not self.is_running() threadcount = len(self.work_threads) map(self.work_queue.put, [None] * threadcount) while self.work_threads: self.work_threads.pop().join() return threadcount def serialize_subscription(self, subscription): return repr(subscription.as_dictionary()) def unserialize_subscription(self, data): return PushedSubscription.from_dictionary(eval(data)) def debugout(self, dbmessage, dblevel=1): if dblevel <= DEBUG: msglog.log('broadway', msglog.types.DB, dbmessage)