def initialize(self, manager): self.debug = 0 self.manager = manager self.cache_lock = Lock() self.cache_ttl = 30 self.closed_event_ttl = 3 * 60 self.caches = {} self.close_events = {} self.event_queue = EventQueue() self.hostname = AlarmEvent.LOCALORIGIN self.uri_base = 'http://' + self.hostname if self.uri_base[-1] == '/': self.uri_base = self.uri_base[0:-1] self.categories = { 'RAISED': rss.Category("Raised"), 'INACTIVE': rss.Category("Inactive"), 'ACCEPTED': rss.Category("Accepted"), 'CLEARED': rss.Category("Cleared"), 'CLOSED': rss.Category("Closed") } self.last_pub_time = None self.updatesub = self.manager.register_for_type( self.initialize_caches, FormationUpdated) self.subscription = self.manager.register_for_type( self.handle_alarm_update, StateEvent, None, True) self.initialize_caches()
class RSS2AlarmManager(object): implements(IRSS2Document) adapts(IAlarmManager) def __new__(klass, manager): syndicator_name = '__' + klass.__name__ syndicator = getattr(manager, syndicator_name, None) if syndicator is None: print 'Creating new RSS2AlarmManager' syndicator = super(RSS2AlarmManager, klass).__new__(klass) setattr(manager, syndicator_name, syndicator) syndicator.initialize(manager) return syndicator def initialize(self, manager): self.debug = 0 self.manager = manager self.cache_lock = Lock() self.cache_ttl = 30 self.closed_event_ttl = 3 * 60 self.caches = {} self.close_events = {} self.event_queue = EventQueue() self.hostname = AlarmEvent.LOCALORIGIN self.uri_base = 'http://' + self.hostname if self.uri_base[-1] == '/': self.uri_base = self.uri_base[0:-1] self.categories = { 'RAISED': rss.Category("Raised"), 'INACTIVE': rss.Category("Inactive"), 'ACCEPTED': rss.Category("Accepted"), 'CLEARED': rss.Category("Cleared"), 'CLOSED': rss.Category("Closed") } self.last_pub_time = None self.updatesub = self.manager.register_for_type( self.initialize_caches, FormationUpdated) self.subscription = self.manager.register_for_type( self.handle_alarm_update, StateEvent, None, True) self.initialize_caches() def initialize_caches(self, *args): if self.debug: tstart = time.time() print 'RSS2 Syndic initializing caches because: %s' % (args,) events = [] self.cache_lock.acquire() try: self.event_queue.popqueue() for group in map(Alarm.get_events, self.manager.get_alarms()): events.extend(group) events.extend(self.manager.get_remote_events()) events.extend(self.event_queue.popqueue()) self.caches = {None: ItemCache()} cache = self.process_events(events) finally: self.cache_lock.release() if self.debug: tend = time.time() tlapse = tend - tstart print 'RSS2 cache init of %s events took %s seconds.' % (len(events), tlapse) return cache def process_events(self, events): if not self.cache_lock.locked(): raise Exception('Process events cannot be called unless locked.') cache = {} if events: guids = map(Event.get_guid, events) items = map(self.item_from_event, events) cache.update(zip(guids, items)) for existing in self.caches.values(): existing.update(cache) return cache def handle_alarm_update(self, event): if self.debug: tstart = time.time() if isinstance(event, StateEvent): event = event.source if event.is_state('closed'): self.close_events[event.GUID] = uptime.secs() self.event_queue.enqueue(event) if self.cache_lock.acquire(0): try: self.trim_expired_caches() self.process_events(self.event_queue.popqueue()) finally: self.cache_lock.release() else: print 'Alarm update not processing queue; locked.' if self.debug: tend = time.time() tlapse = tend - tstart print 'Took RSS2 Syndic %s secs to handle alarm event.' % tlapse return def render(self, request_path = None, cache_id = None): if request_path is None: request_path = '/syndication' xmldoc = self.setup_xmldoc() channel = self.setup_channel(request_path) xmldoc.root_element.channel = channel self.cache_lock.acquire() try: self.trim_expired_caches() queue = self.event_queue.popqueue() if queue: self.process_events(queue) finally: self.cache_lock.release() items = self.get_items(cache_id) map(channel.items.append, items) return str(xmldoc) def setup_xmldoc(self): xmldoc = rss.XMLDoc() xmldoc.root_element = rss.RSS() return xmldoc def setup_channel(self, request_path): publish_time = time.time() channel = rss.Channel() channel.title = rss.Title("Network Building Mediator Alarms") if request_path[0] != '/': request_path = '/' + request_path url = self.uri_base + request_path channel.link = rss.Link(url) channel.description = rss.Description("RSS 2.0 feed of Network " "Building Mediator alarms.") if self.last_pub_time is not None: channel.last_build_date = rss.LastBuildDate(self.last_pub_time) self.last_pub_time = publish_time channel.generator = rss.Generator("Network Building " "Mediator Alarm Syndication") channel.docs = rss.Docs('http://blogs.law.harvard.edu/tech/rss') return channel def get_items(self, cid): """ Get Alarm Event Items that have not been returned to client with ID "cid". If client ID is None, return all. """ count = None if cid: count = 250 if cid not in self.caches: self.caches[cid] = ItemCache(self.caches[None]) cache = self.caches[cid] return cache.read(count) def trim_expired_caches(self): if not self.cache_lock.locked(): raise Exception('Must be locked to trim caches.') removed = [] now = uptime.secs() allitems = self.caches[None] for guid,closed in self.close_events.items(): if (now - closed) > self.closed_event_ttl: if guid in allitems: del(allitems[guid]) del(self.close_events[guid]) for cid,cache in self.caches.items(): if cid and (cache.since_touched() > self.cache_ttl): del(self.caches[cid]) removed.append(cid) if self.debug and removed: print 'Cache trim trimmed the following IDs: %s.' % (removed,) return removed def item_from_event(self, event): if isinstance(event, StateEvent): event = event.source return IRSS2ItemElement(event).as_item()
class RSS2AlarmManager(object): implements(IRSS2Document) adapts(IAlarmManager) def __new__(klass, manager): syndicator_name = '__' + klass.__name__ syndicator = getattr(manager, syndicator_name, None) if syndicator is None: print 'Creating new RSS2AlarmManager' syndicator = super(RSS2AlarmManager, klass).__new__(klass) setattr(manager, syndicator_name, syndicator) syndicator.initialize(manager) return syndicator def initialize(self, manager): self.debug = 0 self.manager = manager self.cache_lock = Lock() self.cache_ttl = 30 self.closed_event_ttl = 3 * 60 self.caches = {} self.close_events = {} self.event_queue = EventQueue() self.hostname = AlarmEvent.LOCALORIGIN self.uri_base = 'http://' + self.hostname if self.uri_base[-1] == '/': self.uri_base = self.uri_base[0:-1] self.categories = { 'RAISED': rss.Category("Raised"), 'INACTIVE': rss.Category("Inactive"), 'ACCEPTED': rss.Category("Accepted"), 'CLEARED': rss.Category("Cleared"), 'CLOSED': rss.Category("Closed") } self.last_pub_time = None self.updatesub = self.manager.register_for_type( self.initialize_caches, FormationUpdated) self.subscription = self.manager.register_for_type( self.handle_alarm_update, StateEvent, None, True) self.initialize_caches() def initialize_caches(self, *args): if self.debug: tstart = time.time() print 'RSS2 Syndic initializing caches because: %s' % (args, ) events = [] self.cache_lock.acquire() try: self.event_queue.popqueue() for group in map(Alarm.get_events, self.manager.get_alarms()): events.extend(group) events.extend(self.manager.get_remote_events()) events.extend(self.event_queue.popqueue()) self.caches = {None: ItemCache()} cache = self.process_events(events) finally: self.cache_lock.release() if self.debug: tend = time.time() tlapse = tend - tstart print 'RSS2 cache init of %s events took %s seconds.' % ( len(events), tlapse) return cache def process_events(self, events): if not self.cache_lock.locked(): raise Exception('Process events cannot be called unless locked.') cache = {} if events: guids = map(Event.get_guid, events) items = map(self.item_from_event, events) cache.update(zip(guids, items)) for existing in self.caches.values(): existing.update(cache) return cache def handle_alarm_update(self, event): if self.debug: tstart = time.time() if isinstance(event, StateEvent): event = event.source if event.is_state('closed'): self.close_events[event.GUID] = uptime.secs() self.event_queue.enqueue(event) if self.cache_lock.acquire(0): try: self.trim_expired_caches() self.process_events(self.event_queue.popqueue()) finally: self.cache_lock.release() else: print 'Alarm update not processing queue; locked.' if self.debug: tend = time.time() tlapse = tend - tstart print 'Took RSS2 Syndic %s secs to handle alarm event.' % tlapse return def render(self, request_path=None, cache_id=None): if request_path is None: request_path = '/syndication' xmldoc = self.setup_xmldoc() channel = self.setup_channel(request_path) xmldoc.root_element.channel = channel self.cache_lock.acquire() try: self.trim_expired_caches() queue = self.event_queue.popqueue() if queue: self.process_events(queue) finally: self.cache_lock.release() items = self.get_items(cache_id) map(channel.items.append, items) return str(xmldoc) def setup_xmldoc(self): xmldoc = rss.XMLDoc() xmldoc.root_element = rss.RSS() return xmldoc def setup_channel(self, request_path): publish_time = time.time() channel = rss.Channel() channel.title = rss.Title("Network Building Mediator Alarms") if request_path[0] != '/': request_path = '/' + request_path url = self.uri_base + request_path channel.link = rss.Link(url) channel.description = rss.Description("RSS 2.0 feed of Network " "Building Mediator alarms.") if self.last_pub_time is not None: channel.last_build_date = rss.LastBuildDate(self.last_pub_time) self.last_pub_time = publish_time channel.generator = rss.Generator("Network Building " "Mediator Alarm Syndication") channel.docs = rss.Docs('http://blogs.law.harvard.edu/tech/rss') return channel def get_items(self, cid): """ Get Alarm Event Items that have not been returned to client with ID "cid". If client ID is None, return all. """ count = None if cid: count = 250 if cid not in self.caches: self.caches[cid] = ItemCache(self.caches[None]) cache = self.caches[cid] return cache.read(count) def trim_expired_caches(self): if not self.cache_lock.locked(): raise Exception('Must be locked to trim caches.') removed = [] now = uptime.secs() allitems = self.caches[None] for guid, closed in self.close_events.items(): if (now - closed) > self.closed_event_ttl: if guid in allitems: del (allitems[guid]) del (self.close_events[guid]) for cid, cache in self.caches.items(): if cid and (cache.since_touched() > self.cache_ttl): del (self.caches[cid]) removed.append(cid) if self.debug and removed: print 'Cache trim trimmed the following IDs: %s.' % (removed, ) return removed def item_from_event(self, event): if isinstance(event, StateEvent): event = event.source return IRSS2ItemElement(event).as_item()