def request(self, method, uri, *args, **kwargs): # A small wrapper around self.agent.request() so we can easily attach # counters to it outgoing_requests_counter.inc(method) logger.info("Sending request %s %s", method, uri) try: request_deferred = self.agent.request(method, uri, *args, **kwargs) add_timeout_to_deferred( request_deferred, 60, cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable(request_deferred) incoming_responses_counter.inc(method, response.code) logger.info("Received response to %s %s: %s", method, uri, response.code) defer.returnValue(response) except Exception as e: incoming_responses_counter.inc(method, "ERR") logger.info("Error sending request to %s %s: %s %s", method, uri, type(e).__name__, e.message) raise e
def wait_for_events(self, user_id, timeout, callback, room_ids=None, from_token=StreamToken.START): """Wait until the callback returns a non empty response or the timeout fires. """ user_stream = self.user_to_user_stream.get(user_id) if user_stream is None: current_token = yield self.event_sources.get_current_token() if room_ids is None: room_ids = yield self.store.get_rooms_for_user(user_id) user_stream = _NotifierUserStream( user_id=user_id, rooms=room_ids, current_token=current_token, time_now_ms=self.clock.time_msec(), ) self._register_with_keys(user_stream) result = None prev_token = from_token if timeout: end_time = self.clock.time_msec() + timeout while not result: try: now = self.clock.time_msec() if end_time <= now: break # Now we wait for the _NotifierUserStream to be told there # is a new token. listener = user_stream.new_listener(prev_token) add_timeout_to_deferred( listener.deferred, (end_time - now) / 1000., ) with PreserveLoggingContext(): yield listener.deferred current_token = user_stream.current_token result = yield callback(prev_token, current_token) if result: break # Update the prev_token to the current_token since nothing # has happened between the old prev_token and the current_token prev_token = current_token except DeferredTimeoutError: break except defer.CancelledError: break if result is None: # This happened if there was no timeout or if the timeout had # already expired. current_token = user_stream.current_token result = yield callback(prev_token, current_token) defer.returnValue(result)
def request(self, method, uri, *args, **kwargs): # A small wrapper around self.agent.request() so we can easily attach # counters to it outgoing_requests_counter.labels(method).inc() # log request but strip `access_token` (AS requests for example include this) logger.info("Sending request %s %s", method, redact_uri(uri)) try: request_deferred = self.agent.request(method, uri, *args, **kwargs) add_timeout_to_deferred( request_deferred, 60, self.hs.get_reactor(), cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable(request_deferred) incoming_responses_counter.labels(method, response.code).inc() logger.info("Received response to %s %s: %s", method, redact_uri(uri), response.code) defer.returnValue(response) except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info("Error sending request to %s %s: %s %s", method, redact_uri(uri), type(e).__name__, e.message) raise
def wait_for_replication(self, callback, timeout): """Wait for an event to happen. Args: callback: Gets called whenever an event happens. If this returns a truthy value then ``wait_for_replication`` returns, otherwise it waits for another event. timeout: How many milliseconds to wait for callback return a truthy value. Returns: A deferred that resolves with the value returned by the callback. """ listener = _NotificationListener(None) end_time = self.clock.time_msec() + timeout while True: listener.deferred = self.replication_deferred.observe() result = yield callback() if result: break now = self.clock.time_msec() if end_time <= now: break add_timeout_to_deferred( listener.deferred.addTimeout, (end_time - now) / 1000., self.hs.get_reactor(), ) try: with PreserveLoggingContext(): yield listener.deferred except DeferredTimeoutError: break except defer.CancelledError: break defer.returnValue(result)
def wait_for_replication(self, callback, timeout): """Wait for an event to happen. Args: callback: Gets called whenever an event happens. If this returns a truthy value then ``wait_for_replication`` returns, otherwise it waits for another event. timeout: How many milliseconds to wait for callback return a truthy value. Returns: A deferred that resolves with the value returned by the callback. """ listener = _NotificationListener(None) end_time = self.clock.time_msec() + timeout while True: listener.deferred = self.replication_deferred.observe() result = yield callback() if result: break now = self.clock.time_msec() if end_time <= now: break add_timeout_to_deferred( listener.deferred.addTimeout, (end_time - now) / 1000., ) try: with PreserveLoggingContext(): yield listener.deferred except DeferredTimeoutError: break except defer.CancelledError: break defer.returnValue(result)
def _request(self, destination, method, path, body_callback, headers_dict={}, param_bytes=b"", query_bytes=b"", retry_on_dns_fail=True, timeout=None, long_retries=False, ignore_backoff=False, backoff_on_404=False): """ Creates and sends a request to the given server Args: destination (str): The remote server to send the HTTP request to. method (str): HTTP method path (str): The HTTP path ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): Back off if we get a 404 Returns: Deferred: resolves with the http response object on success. Fails with ``HTTPRequestException``: if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist (May also fail with plenty of other Exceptions for things like DNS failures, connection failures, SSL failures.) """ if (self.hs.config.federation_domain_whitelist and destination not in self.hs.config.federation_domain_whitelist): raise FederationDeniedError(destination) limiter = yield synapse.util.retryutils.get_retry_limiter( destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) destination = destination.encode("ascii") path_bytes = path.encode("ascii") with limiter: headers_dict[b"User-Agent"] = [self.version_string] headers_dict[b"Host"] = [destination] url_bytes = self._create_url(destination, path_bytes, param_bytes, query_bytes) txn_id = "%s-O-%s" % (method, self._next_id) self._next_id = (self._next_id + 1) % (sys.maxint - 1) outbound_logger.info("{%s} [%s] Sending request: %s %s", txn_id, destination, method, url_bytes) # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES http_url_bytes = urlparse.urlunparse( ("", "", path_bytes, param_bytes, query_bytes, "")) log_result = None try: while True: producer = None if body_callback: producer = body_callback(method, http_url_bytes, headers_dict) try: request_deferred = self.agent.request( method, url_bytes, Headers(headers_dict), producer) add_timeout_to_deferred( request_deferred, timeout / 1000. if timeout else 60, cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable( request_deferred, ) log_result = "%d %s" % ( response.code, response.phrase, ) break except Exception as e: if not retry_on_dns_fail and isinstance( e, DNSLookupError): logger.warn("DNS Lookup failed to %s with %s", destination, e) log_result = "DNS Lookup failed to %s with %s" % ( destination, e) raise logger.warn( "{%s} Sending request failed to %s: %s %s: %s", txn_id, destination, method, url_bytes, _flatten_response_never_received(e), ) log_result = _flatten_response_never_received(e) if retries_left and not timeout: if long_retries: delay = 4**(MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2**(MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) yield sleep(delay) retries_left -= 1 else: raise finally: outbound_logger.info( "{%s} [%s] Result: %s", txn_id, destination, log_result, ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? with logcontext.PreserveLoggingContext(): body = yield readBody(response) raise HttpResponseException(response.code, response.phrase, body) defer.returnValue(response)
def _request(self, destination, method, path, body_callback, headers_dict={}, param_bytes=b"", query_bytes=b"", retry_on_dns_fail=True, timeout=None, long_retries=False, ignore_backoff=False, backoff_on_404=False): """ Creates and sends a request to the given server Args: destination (str): The remote server to send the HTTP request to. method (str): HTTP method path (str): The HTTP path ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): Back off if we get a 404 Returns: Deferred: resolves with the http response object on success. Fails with ``HTTPRequestException``: if we get an HTTP response code >= 300. Fails with ``NotRetryingDestination`` if we are not yet ready to retry this server. Fails with ``FederationDeniedError`` if this destination is not on our federation whitelist (May also fail with plenty of other Exceptions for things like DNS failures, connection failures, SSL failures.) """ if ( self.hs.config.federation_domain_whitelist and destination not in self.hs.config.federation_domain_whitelist ): raise FederationDeniedError(destination) limiter = yield synapse.util.retryutils.get_retry_limiter( destination, self.clock, self._store, backoff_on_404=backoff_on_404, ignore_backoff=ignore_backoff, ) destination = destination.encode("ascii") path_bytes = path.encode("ascii") with limiter: headers_dict[b"User-Agent"] = [self.version_string] headers_dict[b"Host"] = [destination] url_bytes = self._create_url( destination, path_bytes, param_bytes, query_bytes ) txn_id = "%s-O-%s" % (method, self._next_id) self._next_id = (self._next_id + 1) % (sys.maxint - 1) outbound_logger.info( "{%s} [%s] Sending request: %s %s", txn_id, destination, method, url_bytes ) # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: retries_left = MAX_LONG_RETRIES else: retries_left = MAX_SHORT_RETRIES http_url_bytes = urlparse.urlunparse( ("", "", path_bytes, param_bytes, query_bytes, "") ) log_result = None try: while True: producer = None if body_callback: producer = body_callback(method, http_url_bytes, headers_dict) try: request_deferred = self.agent.request( method, url_bytes, Headers(headers_dict), producer ) add_timeout_to_deferred( request_deferred, timeout / 1000. if timeout else 60, cancelled_to_request_timed_out_error, ) response = yield make_deferred_yieldable( request_deferred, ) log_result = "%d %s" % (response.code, response.phrase,) break except Exception as e: if not retry_on_dns_fail and isinstance(e, DNSLookupError): logger.warn( "DNS Lookup failed to %s with %s", destination, e ) log_result = "DNS Lookup failed to %s with %s" % ( destination, e ) raise logger.warn( "{%s} Sending request failed to %s: %s %s: %s", txn_id, destination, method, url_bytes, _flatten_response_never_received(e), ) log_result = _flatten_response_never_received(e) if retries_left and not timeout: if long_retries: delay = 4 ** (MAX_LONG_RETRIES + 1 - retries_left) delay = min(delay, 60) delay *= random.uniform(0.8, 1.4) else: delay = 0.5 * 2 ** (MAX_SHORT_RETRIES - retries_left) delay = min(delay, 2) delay *= random.uniform(0.8, 1.4) yield sleep(delay) retries_left -= 1 else: raise finally: outbound_logger.info( "{%s} [%s] Result: %s", txn_id, destination, log_result, ) if 200 <= response.code < 300: pass else: # :'( # Update transactions table? with logcontext.PreserveLoggingContext(): body = yield readBody(response) raise HttpResponseException( response.code, response.phrase, body ) defer.returnValue(response)