def wrapper(*args, **kw): tstart = timefunc() result = function(*args, **kw) tstop = timefunc() if DEBUG: msglog.debug(message % (tstop - tstart)) return result
def destroy_cleared(self): destroy = self.destroy.copy() self.destroy.clear() if debug: msglog.debug("%s destroying batches: \n%r\n" % (self, destroy)) for batch in destroy: NORMAL.queue_noresult(batch.destroy, self.manager) return len(destroy)
def run(self): while not self.cancelled.isSet(): try: packed = compact() except: msglog.error("Compactor failed to run compact().") msglog.exception(prefix="handled") else: msglog.debug("Compacted %d DB files." % packed) self.cancelled.wait(self.period) msglog.inform("Compactor thread exiting run-loop.")
def subscribe(self, manager): timeout = self.subscription_timeout self.subscription_manager = manager create = self.subscription_manager.create_polled_and_get result = create(self.nodemap, timeout, self.prime_timeout) self.sid = result["sid"] self.last_subscribed = time.time() self.nodes_changed = False if debug: total = len(self) primed = len(result["values"]) msglog.debug("%s primed %d/%d values." % (self, primed, total)) return result["values"]
def get_batch_manager(self): """ Override default batch manager creation method to replace NodeFacade 'manager' reference used by BM with ManagedNodeFacade instance. Somewhat of a hack based on knowledge of BM internals. """ bm = super(ManagedNodeFacade, self).get_batch_manager() if not isinstance(bm.manager, ManagedNodeFacade): host = self.host bm.manager = host.as_remote_node("/services/Subscription Manager") msglog.debug("Replaced BM %s SM proxy with Managed Proxy." % bm) return bm
def fetch(self, params=(), **kwargs): response = {} params = dict(params) params.update(kwargs) query = params.get("query") start = params.get("start", 0) options = params.get("queryOptions") count = params.get("count", len(self.items) - start) stop = start + count if "clientId" in params: cid = params.get("clientId") else: cid = str(UUID()) if "sort" in params: sort = params["sort"] else: sort = [{"attribute": "createdUTC", "descending": True}] if len(sort) != 1: raise ValueError("single-attribute sorting only, not: %r" % sort) attribute = sort[0]["attribute"] descending = sort[0].get("descending", False) client = self.client(cid) client.set_query(query, options) if attribute == "created" or attribute == "createdUTC": reverse = not descending collection = self.created guids = client.filtered(collection, start, stop, reverse=reverse) else: if DEBUG: msglog.debug("Event Store client fetching by: %r" % attribute) if attribute not in self.collections: raise ValueError("sort attribute unknown: %r" % attribute) guids = [] collection = self.collections[attribute] for key in sorted(collection.keys(), reverse=descending): remaining = stop - len(guids) if remaining <= 0: break guids.extend(client.filtered(collection[key], 0, remaining)) guids = guids[start: stop] items = client.getitems(guids) response["clientId"] = cid response["start"] = start response["items"] = items response["count"] = len(items) response["total"] = len(client) return response
def update(self, result): if not isinstance(result, Result): if isinstance(result, dict): result = Result.from_dict(result) else: message = "update() expects Result instance, not: %r" raise ValueError(message % result) self.synclock.acquire() try: if self.initialized(): previous = self.get_value() else: previous = None self.result = result value = self.get_value() self.updated = uptime.secs() finally: self.synclock.release() if self.support_cov: self.event_generate(COVEvent(self, previous, value, time.time())) if self.debug: msglog.debug("%s updated: %r" % (self, result))
def create_batches(self, nodemap): # MNB calls create batches once with only newly subscribed # point-map. For example, those MNRs added due to user # visiting single web-page. # # After invoking get-batch once on newly created batches, # create-batches invoked again, this time with point-map # containing all subscribed points. # # The point-map passed to the second invocation will be a # superset of the firsts point-map. building = None self.clear_batches() for nid,node in nodemap.items(): if (building is None) or (len(building) > self.MAXBATCH): building = RnaBatch(self) self.batches.add(building) building[nid] = node self.prime.update(self.batches) if debug: msglog.debug("%s created batches: \n%r\n" % (self, self.batches)) return list(self.batches)
def destroy(self, manager=None): if not manager: manager = self.subscription_manager if manager and self.subscribed(): if self.timeout_occurred: message = ("Batch %s destruction leaving remote" " subscription: timeout had occurred.") msglog.warn(message % self) else: try: manager.destroy(self.sid) except: message = "%s failed to destroy subscription %s on %s." msglog.log("broadway", msglog.types.WARN, message % (self, self.sid, manager)) msglog.exception(prefix="handled") else: if debug: message = "Batch %s destroyed remote subscription." msglog.debug(message % self) self.clear() self.sid = None self.subscription_manager = None
def handle_request(self, request): exceptions = [] userobject = request.user_object() if not userobject: username = request.get_cookie("NBMUSER", None) if not username: raise Unauthorized("User unknown") address = request.get_address() if not self._cloud_manager.is_host_in_formation(address): raise Unauthorized("Address unrecognized: " + address) message = "Getting user because address/usrename: %r, %r" msglog.debug(message % (address, username)) userobject = self.user_manager.get_user(username) request_data = request.get_post_data_as_dictionary() request_data.update(request.get_query_string_as_dictionary()) if request_data.has_key('command') and request_data.has_key('guid'): exceptions = [] path = request.get_path() username = userobject.name() method = request.get_command() protocol = request.get_protocol() command = request_data['command'][0] user = self.user_service.user_from_object(userobject) origins = {} for guid in request_data['guid']: try: event = AlarmEvent.get_event(guid) except KeyError: msglog.warn("Attempt to get event %r failed." % guid) else: origins.setdefault(event.origin, []).append(event) for origin, events in origins.items(): if origin == AlarmEvent.LOCALORIGIN: for event in events: message = "User '%s'" % username if self.secured: event = query_multi_adapter((event, user), ISecure) try: event.notify(command, self, time.time(), message) except Exception, error: msg = "Notify %r of %r failed." % (event, command) msglog.log("broadway", msglog.types.WARN, msg) msglog.exception(prefix="handled") # CSCte94039 - form the proper error msg exceptions.append((command, str(error))) else: headers = {"Cookie": "NBMUSER=%s" % userobject.name()} guids = [event.GUID for event in events] parameters = [("command", command)] parameters.extend([("guid", guid) for guid in guids]) data = urllib.urlencode(parameters) url = '%s://%s%s' % (protocol, origin, path) if method == "GET": url = "?".join([url, data]) data = None redirect = urllib2.Request(url, data, headers) try: urllib2.urlopen(redirect).read() except Exception, error: message = "'%s' remote events on '%s' failed." msglog.warn(message % (command, origin)) msglog.exception(prefix="handled") exceptions.append((origin, command, error))
def handle_request(self, request): exceptions = [] userobject = request.user_object() if not userobject: username = request.get_cookie("NBMUSER", None) if not username: raise Unauthorized("User unknown") address = request.get_address() if not self._cloud_manager.is_host_in_formation(address): raise Unauthorized("Address unrecognized: " + address) message = "Getting user because address/usrename: %r, %r" msglog.debug(message % (address, username)) userobject = self.user_manager.get_user(username) request_data = request.get_post_data_as_dictionary() request_data.update(request.get_query_string_as_dictionary()) if request_data.has_key("command") and request_data.has_key("guid"): exceptions = [] path = request.get_path() username = userobject.name() method = request.get_command() protocol = request.get_protocol() command = request_data["command"][0] user = self.user_service.user_from_object(userobject) origins = {} for guid in request_data["guid"]: try: event = AlarmEvent.get_event(guid) except KeyError: msglog.warn("Attempt to get event %r failed." % guid) else: origins.setdefault(event.origin, []).append(event) for origin, events in origins.items(): if origin == AlarmEvent.LOCALORIGIN: for event in events: message = "User '%s'" % username if self.secured: event = query_multi_adapter((event, user), ISecure) try: event.notify(command, self, time.time(), message) except Exception, error: msg = "Notify %r of %r failed." % (event, command) msglog.log("broadway", msglog.types.WARN, msg) msglog.exception(prefix="handled") # CSCte94039 - form the proper error msg exceptions.append((command, str(error))) else: headers = {"Cookie": "NBMUSER=%s" % userobject.name()} guids = [event.GUID for event in events] parameters = [("command", command)] parameters.extend([("guid", guid) for guid in guids]) data = urllib.urlencode(parameters) url = "%s://%s%s" % (protocol, origin, path) if method == "GET": url = "?".join([url, data]) data = None redirect = urllib2.Request(url, data, headers) try: urllib2.urlopen(redirect).read() except Exception, error: message = "'%s' remote events on '%s' failed." msglog.warn(message % (command, origin)) msglog.exception(prefix="handled") exceptions.append((origin, command, error))
def logdebug(self, message, *args, **kw): if kw.get("level", 1) <= DEBUG: if args: message = message % args typename = type(self).__name__ msglog.debug("%s(%r): %s." % (typename, self.filename, message))