class TelemetryStore(CollectionState): """ Accepts telemetry messages and exports the accumulated information obtained from them. """ implements(ITelemetryStore) def __init__(self, time_source=the_reactor): self.__interesting_objects = CellDict(dynamic=True) CollectionState.__init__(self, self.__interesting_objects) self.__objects = {} self.__expiry_times = {} self.__time_source = IReactorTime(time_source) self.__flush_call = None # not exported def receive(self, message): """Store the supplied telemetry message object.""" message = ITelemetryMessage(message) object_id = unicode(message.get_object_id()) if object_id in self.__objects: obj = self.__objects[object_id] else: obj = self.__objects[object_id] = ITelemetryObject( # TODO: Should probably have a context object supplying last message time and delete_me() message.get_object_constructor()(object_id=object_id)) obj.receive(message) expiry = obj.get_object_expiry() self.__expiry_times[object_id] = expiry if obj.is_interesting(): self.__interesting_objects[object_id] = obj self.__maybe_schedule_flush() def __flush_expired(self): current_time = self.__time_source.seconds() deletes = [] for object_id, expiry in self.__expiry_times.iteritems(): if expiry <= current_time: deletes.append(object_id) for object_id in deletes: del self.__objects[object_id] del self.__expiry_times[object_id] if object_id in self.__interesting_objects: del self.__interesting_objects[object_id] self.__maybe_schedule_flush() def __maybe_schedule_flush(self): """Schedule a call to __flush_expired if there is not one already.""" if self.__flush_call and self.__flush_call.active(): # Could need to schedule one earlier than already scheduled. self.__flush_call.cancel() if self.__expiry_times: next_expiry = min(self.__expiry_times.itervalues()) self.__flush_call = self.__time_source.callLater( next_expiry - self.__time_source.seconds(), self.__flush_expired)
async def _handle_json_response( reactor: IReactorTime, timeout_sec: float, request: MatrixFederationRequest, response: IResponse, start_ms: int, ): """ Reads the JSON body of a response, with a timeout Args: reactor: twisted reactor, for the timeout timeout_sec: number of seconds to wait for response to complete request: the request that triggered the response response: response to the request start_ms: Timestamp when request was made Returns: dict: parsed JSON response """ try: check_content_type_is_json(response.headers) d = treq.json_content(response) d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) body = await make_deferred_yieldable(d) except TimeoutError as e: logger.warning( "{%s} [%s] Timed out reading response - %s %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), ) raise RequestSendFailed(e, can_retry=True) from e except Exception as e: logger.warning( "{%s} [%s] Error reading response %s %s: %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), e, ) raise time_taken_secs = reactor.seconds() - start_ms / 1000 logger.info( "{%s} [%s] Completed request: %d %s in %.2f secs - %s %s", request.txn_id, request.destination, response.code, response.phrase.decode("ascii", errors="replace"), time_taken_secs, request.method, request.uri.decode("ascii"), ) return body
class SOCKSWrapper(object): implements(IStreamClientEndpoint) factory = SOCKSv4ClientFactory def __init__(self, reactor, host, port, endpoint, timestamps=None): self._host = host self._port = port self._reactor = reactor self._endpoint = endpoint self._timestamps = None self._timer = None if timestamps is not None: self._timestamps = timestamps self._timer = IReactorTime(reactor) def noteTime(self, event): if self._timer: self._timestamps[event] = self._timer.seconds() def connect(self, protocolFactory): """ Return a deferred firing when the SOCKS connection is established. """ def createWrappingFactory(f): """ Wrap creation of _WrappingFactory since __init__() doesn't take a canceller as of Twisted 12.1 or something. """ if len(inspect.getargspec(_WrappingFactory.__init__)[0]) == 3: def _canceller(deferred): connector.stopConnecting() deferred.errback( error.ConnectingCancelledError( connector.getDestination())) return _WrappingFactory(f, _canceller) else: # Twisted >= 12.1. return _WrappingFactory(f) self.noteTime('START') try: # Connect with an intermediate SOCKS factory/protocol, # which then hands control to the provided protocolFactory # once a SOCKS connection has been established. f = self.factory() f.postHandshakeEndpoint = self._endpoint f.postHandshakeFactory = protocolFactory f.handshakeDone = defer.Deferred() f._timestamps = self._timestamps f._timer = self._timer wf = createWrappingFactory(f) self._reactor.connectTCP(self._host, self._port, wf) self.noteTime('SOCKET') return f.handshakeDone except: return defer.fail()
class SOCKSWrapper(object): implements(IStreamClientEndpoint) factory = SOCKSv4ClientFactory def __init__(self, reactor, host, port, endpoint, timestamps=None): self._host = host self._port = port self._reactor = reactor self._endpoint = endpoint self._timestamps = None self._timer = None if timestamps is not None: self._timestamps = timestamps self._timer = IReactorTime(reactor) def noteTime(self, event): if self._timer: self._timestamps[event] = self._timer.seconds() def connect(self, protocolFactory): """ Return a deferred firing when the SOCKS connection is established. """ def createWrappingFactory(f): """ Wrap creation of _WrappingFactory since __init__() doesn't take a canceller as of Twisted 12.1 or something. """ if len(inspect.getargspec(_WrappingFactory.__init__)[0]) == 3: def _canceller(deferred): connector.stopConnecting() deferred.errback(error.ConnectingCancelledError(connector.getDestination())) return _WrappingFactory(f, _canceller) else: # Twisted >= 12.1. return _WrappingFactory(f) self.noteTime("START") try: # Connect with an intermediate SOCKS factory/protocol, # which then hands control to the provided protocolFactory # once a SOCKS connection has been established. f = self.factory() f.postHandshakeEndpoint = self._endpoint f.postHandshakeFactory = protocolFactory f.handshakeDone = defer.Deferred() f._timestamps = self._timestamps f._timer = self._timer wf = createWrappingFactory(f) self._reactor.connectTCP(self._host, self._port, wf) self.noteTime("SOCKET") return f.handshakeDone except: return defer.fail()
class TelemetryStore(CollectionState): """ Accepts telemetry messages and exports the accumulated information obtained from them. """ implements(ITelemetryStore) def __init__(self, time_source=the_reactor): self.__interesting_objects = {} CollectionState.__init__(self, self.__interesting_objects, dynamic=True) self.__objects = {} self.__expiry_times = {} self.__time_source = IReactorTime(time_source) # not exported def receive(self, message): """Store the supplied telemetry message object.""" message = ITelemetryMessage(message) object_id = unicode(message.get_object_id()) if object_id in self.__objects: obj = self.__objects[object_id] else: obj = self.__objects[object_id] = ITelemetryObject( # TODO: Should probably have a context object supplying last message time and delete_me() message.get_object_constructor()(object_id=object_id)) obj.receive(message) self.__expiry_times[object_id] = obj.get_object_expiry() if obj.is_interesting(): self.__interesting_objects[object_id] = obj # logically independent but this is a convenient time, and this approach allows us to borrow the receive time rather than reading the system clock ourselves. # TODO: But it doesn't deal with timing out when we aren't receiving messages self.__flush_expired() def __flush_expired(self): current_time = self.__time_source.seconds() deletes = [] for object_id, expiry in self.__expiry_times.iteritems(): if expiry <= current_time: deletes.append(object_id) for object_id in deletes: del self.__objects[object_id] del self.__expiry_times[object_id] if object_id in self.__interesting_objects: del self.__interesting_objects[object_id]
class TelemetryStore(CollectionState): """ Accepts telemetry messages and exports the accumulated information obtained from them. """ implements(ITelemetryStore) def __init__(self, time_source=the_reactor): self.__interesting_objects = CellDict(dynamic=True) CollectionState.__init__(self, self.__interesting_objects) self.__objects = {} self.__expiry_times = {} self.__time_source = IReactorTime(time_source) # not exported def receive(self, message): """Store the supplied telemetry message object.""" message = ITelemetryMessage(message) object_id = unicode(message.get_object_id()) if object_id in self.__objects: obj = self.__objects[object_id] else: obj = self.__objects[object_id] = ITelemetryObject( # TODO: Should probably have a context object supplying last message time and delete_me() message.get_object_constructor()(object_id=object_id)) obj.receive(message) self.__expiry_times[object_id] = obj.get_object_expiry() if obj.is_interesting(): self.__interesting_objects[object_id] = obj # logically independent but this is a convenient time, and this approach allows us to borrow the receive time rather than reading the system clock ourselves. # TODO: But it doesn't deal with timing out when we aren't receiving messages self.__flush_expired() def __flush_expired(self): current_time = self.__time_source.seconds() deletes = [] for object_id, expiry in self.__expiry_times.iteritems(): if expiry <= current_time: deletes.append(object_id) for object_id in deletes: del self.__objects[object_id] del self.__expiry_times[object_id] if object_id in self.__interesting_objects: del self.__interesting_objects[object_id]
async def _handle_response( reactor: IReactorTime, timeout_sec: float, request: MatrixFederationRequest, response: IResponse, start_ms: int, parser: ByteParser[T], max_response_size: Optional[int] = None, ) -> T: """ Reads the body of a response with a timeout and sends it to a parser Args: reactor: twisted reactor, for the timeout timeout_sec: number of seconds to wait for response to complete request: the request that triggered the response response: response to the request start_ms: Timestamp when request was made parser: The parser for the response max_response_size: The maximum size to read from the response, if None uses the default. Returns: The parsed response """ if max_response_size is None: max_response_size = MAX_RESPONSE_SIZE try: check_content_type_is(response.headers, parser.CONTENT_TYPE) d = read_body_with_max_size(response, parser, max_response_size) d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) length = await make_deferred_yieldable(d) value = parser.finish() except BodyExceededMaxSize as e: # The response was too big. logger.warning( "{%s} [%s] JSON response exceeded max size %i - %s %s", request.txn_id, request.destination, MAX_RESPONSE_SIZE, request.method, request.uri.decode("ascii"), ) raise RequestSendFailed(e, can_retry=False) from e except ValueError as e: # The content was invalid. logger.warning( "{%s} [%s] Failed to parse response - %s %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), ) raise RequestSendFailed(e, can_retry=False) from e except defer.TimeoutError as e: logger.warning( "{%s} [%s] Timed out reading response - %s %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), ) raise RequestSendFailed(e, can_retry=True) from e except ResponseFailed as e: logger.warning( "{%s} [%s] Failed to read response - %s %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), ) raise RequestSendFailed(e, can_retry=True) from e except Exception as e: logger.warning( "{%s} [%s] Error reading response %s %s: %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), e, ) raise time_taken_secs = reactor.seconds() - start_ms / 1000 logger.info( "{%s} [%s] Completed request: %d %s in %.2f secs, got %d bytes - %s %s", request.txn_id, request.destination, response.code, response.phrase.decode("ascii", errors="replace"), time_taken_secs, length, request.method, request.uri.decode("ascii"), ) return value
class TelemetryStore(CollectionState): """ Accepts telemetry messages and exports the accumulated information obtained from them. """ def __init__(self, time_source=the_reactor): self.__interesting_objects = CellDict(dynamic=True) CollectionState.__init__(self, self.__interesting_objects) self.__objects = {} self.__expiry_times = {} self.__time_source = IReactorTime(time_source) self.__flush_call = None # not exported def receive(self, message): """Store the supplied telemetry message object.""" message = ITelemetryMessage(message) object_id = unicode(message.get_object_id()) if object_id in self.__objects: obj = self.__objects[object_id] else: obj = self.__objects[object_id] = ITelemetryObject( # TODO: Should probably have a context object supplying last message time and delete_me() message.get_object_constructor()(object_id=object_id)) obj.receive(message) expiry = obj.get_object_expiry() self.__expiry_times[object_id] = expiry if obj.is_interesting(): self.__interesting_objects[object_id] = obj self.__maybe_schedule_flush() def __flush_expired(self): current_time = self.__time_source.seconds() deletes = [] for object_id, expiry in self.__expiry_times.iteritems(): if expiry <= current_time: deletes.append(object_id) for object_id in deletes: del self.__objects[object_id] del self.__expiry_times[object_id] if object_id in self.__interesting_objects: del self.__interesting_objects[object_id] self.__maybe_schedule_flush() def __maybe_schedule_flush(self): """Schedule a call to __flush_expired if there is not one already.""" if self.__flush_call and self.__flush_call.active(): # Could need to schedule one earlier than already scheduled. self.__flush_call.cancel() if self.__expiry_times: now = self.__time_source.seconds() next_expiry = min(self.__expiry_times.itervalues()) sec_until_expiry = max(0, next_expiry - now) self.__flush_call = self.__time_source.callLater( sec_until_expiry, self.__flush_expired)
async def _handle_json_response( reactor: IReactorTime, timeout_sec: float, request: MatrixFederationRequest, response: IResponse, start_ms: int, ) -> JsonDict: """ Reads the JSON body of a response, with a timeout Args: reactor: twisted reactor, for the timeout timeout_sec: number of seconds to wait for response to complete request: the request that triggered the response response: response to the request start_ms: Timestamp when request was made Returns: The parsed JSON response """ try: check_content_type_is_json(response.headers) buf = StringIO() d = read_body_with_max_size(response, BinaryIOWrapper(buf), MAX_RESPONSE_SIZE) d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) def parse(_len: int): return json_decoder.decode(buf.getvalue()) d.addCallback(parse) body = await make_deferred_yieldable(d) except BodyExceededMaxSize as e: # The response was too big. logger.warning( "{%s} [%s] JSON response exceeded max size %i - %s %s", request.txn_id, request.destination, MAX_RESPONSE_SIZE, request.method, request.uri.decode("ascii"), ) raise RequestSendFailed(e, can_retry=False) from e except ValueError as e: # The JSON content was invalid. logger.warning( "{%s} [%s] Failed to parse JSON response - %s %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), ) raise RequestSendFailed(e, can_retry=False) from e except defer.TimeoutError as e: logger.warning( "{%s} [%s] Timed out reading response - %s %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), ) raise RequestSendFailed(e, can_retry=True) from e except Exception as e: logger.warning( "{%s} [%s] Error reading response %s %s: %s", request.txn_id, request.destination, request.method, request.uri.decode("ascii"), e, ) raise time_taken_secs = reactor.seconds() - start_ms / 1000 logger.info( "{%s} [%s] Completed request: %d %s in %.2f secs - %s %s", request.txn_id, request.destination, response.code, response.phrase.decode("ascii", errors="replace"), time_taken_secs, request.method, request.uri.decode("ascii"), ) return body