Exemplo n.º 1
0
    def test_sleep(self):
        clock = Clock(reactor)

        @defer.inlineCallbacks
        def competing_callback():
            with LoggingContext("competing"):
                yield clock.sleep(0)
                self._check_test_key("competing")

        reactor.callLater(0, competing_callback)

        with LoggingContext("one"):
            yield clock.sleep(0)
            self._check_test_key("one")
Exemplo n.º 2
0
    def test_sleep(self):
        clock = Clock(reactor)

        @defer.inlineCallbacks
        def competing_callback():
            with LoggingContext() as competing_context:
                competing_context.request = "competing"
                yield clock.sleep(0)
                self._check_test_key("competing")

        reactor.callLater(0, competing_callback)

        with LoggingContext() as context_one:
            context_one.request = "one"
            yield clock.sleep(0)
            self._check_test_key("one")
Exemplo n.º 3
0
    def test_verify_json_objects_for_server_awaits_previous_requests(self):
        clock = Clock(reactor)
        key1 = signedjson.key.generate_signing_key(1)

        kr = keyring.Keyring(self.hs)
        json1 = {}
        signedjson.sign.sign_json(json1, "server10", key1)

        persp_resp = {
            "server_keys": [
                self.mock_perspective_server.get_signed_key(
                    "server10", signedjson.key.get_verify_key(key1)),
            ]
        }
        persp_deferred = defer.Deferred()

        @defer.inlineCallbacks
        def get_perspectives(**kwargs):
            self.assertEquals(
                LoggingContext.current_context().request,
                "11",
            )
            with logcontext.PreserveLoggingContext():
                yield persp_deferred
            defer.returnValue(persp_resp)

        self.http_client.post_json.side_effect = get_perspectives

        with LoggingContext("11") as context_11:
            context_11.request = "11"

            # start off a first set of lookups
            res_deferreds = kr.verify_json_objects_for_server([
                ("server10", json1), ("server11", {})
            ])

            # the unsigned json should be rejected pretty quickly
            self.assertTrue(res_deferreds[1].called)
            try:
                yield res_deferreds[1]
                self.assertFalse("unsigned json didn't cause a failure")
            except SynapseError:
                pass

            self.assertFalse(res_deferreds[0].called)
            res_deferreds[0].addBoth(self.check_context, None)

            # wait a tick for it to send the request to the perspectives server
            # (it first tries the datastore)
            yield clock.sleep(1)  # XXX find out why this takes so long!
            self.http_client.post_json.assert_called_once()

            self.assertIs(LoggingContext.current_context(), context_11)

            context_12 = LoggingContext("12")
            context_12.request = "12"
            with logcontext.PreserveLoggingContext(context_12):
                # a second request for a server with outstanding requests
                # should block rather than start a second call
                self.http_client.post_json.reset_mock()
                self.http_client.post_json.return_value = defer.Deferred()

                res_deferreds_2 = kr.verify_json_objects_for_server(
                    [("server10", json1)], )
                yield clock.sleep(1)
                self.http_client.post_json.assert_not_called()
                res_deferreds_2[0].addBoth(self.check_context, None)

            # complete the first request
            with logcontext.PreserveLoggingContext():
                persp_deferred.callback(persp_resp)
            self.assertIs(LoggingContext.current_context(), context_11)

            with logcontext.PreserveLoggingContext():
                yield res_deferreds[0]
                yield res_deferreds_2[0]
Exemplo n.º 4
0
    def test_verify_json_objects_for_server_awaits_previous_requests(self):
        clock = Clock(reactor)
        key1 = signedjson.key.generate_signing_key(1)

        kr = keyring.Keyring(self.hs)
        json1 = {}
        signedjson.sign.sign_json(json1, "server10", key1)

        persp_resp = {
            "server_keys": [
                self.mock_perspective_server.get_signed_key(
                    "server10", signedjson.key.get_verify_key(key1)
                )
            ]
        }
        persp_deferred = defer.Deferred()

        @defer.inlineCallbacks
        def get_perspectives(**kwargs):
            self.assertEquals(LoggingContext.current_context().request, "11")
            with logcontext.PreserveLoggingContext():
                yield persp_deferred
            defer.returnValue(persp_resp)

        self.http_client.post_json.side_effect = get_perspectives

        with LoggingContext("11") as context_11:
            context_11.request = "11"

            # start off a first set of lookups
            res_deferreds = kr.verify_json_objects_for_server(
                [("server10", json1), ("server11", {})]
            )

            # the unsigned json should be rejected pretty quickly
            self.assertTrue(res_deferreds[1].called)
            try:
                yield res_deferreds[1]
                self.assertFalse("unsigned json didn't cause a failure")
            except SynapseError:
                pass

            self.assertFalse(res_deferreds[0].called)
            res_deferreds[0].addBoth(self.check_context, None)

            # wait a tick for it to send the request to the perspectives server
            # (it first tries the datastore)
            yield clock.sleep(1)  # XXX find out why this takes so long!
            self.http_client.post_json.assert_called_once()

            self.assertIs(LoggingContext.current_context(), context_11)

            context_12 = LoggingContext("12")
            context_12.request = "12"
            with logcontext.PreserveLoggingContext(context_12):
                # a second request for a server with outstanding requests
                # should block rather than start a second call
                self.http_client.post_json.reset_mock()
                self.http_client.post_json.return_value = defer.Deferred()

                res_deferreds_2 = kr.verify_json_objects_for_server(
                    [("server10", json1)]
                )
                yield clock.sleep(1)
                self.http_client.post_json.assert_not_called()
                res_deferreds_2[0].addBoth(self.check_context, None)

            # complete the first request
            with logcontext.PreserveLoggingContext():
                persp_deferred.callback(persp_resp)
            self.assertIs(LoggingContext.current_context(), context_11)

            with logcontext.PreserveLoggingContext():
                yield res_deferreds[0]
                yield res_deferreds_2[0]
Exemplo n.º 5
0
class WellKnownResolver(object):
    """Handles well-known lookups for matrix servers.
    """
    def __init__(
        self,
        reactor,
        agent,
        user_agent,
        well_known_cache=None,
        had_well_known_cache=None,
    ):
        self._reactor = reactor
        self._clock = Clock(reactor)

        if well_known_cache is None:
            well_known_cache = _well_known_cache

        if had_well_known_cache is None:
            had_well_known_cache = _had_valid_well_known_cache

        self._well_known_cache = well_known_cache
        self._had_valid_well_known_cache = had_well_known_cache
        self._well_known_agent = RedirectAgent(agent)
        self.user_agent = user_agent

    @defer.inlineCallbacks
    def get_well_known(self, server_name):
        """Attempt to fetch and parse a .well-known file for the given server

        Args:
            server_name (bytes): name of the server, from the requested url

        Returns:
            Deferred[WellKnownLookupResult]: The result of the lookup
        """
        try:
            prev_result, expiry, ttl = self._well_known_cache.get_with_expiry(
                server_name)

            now = self._clock.time()
            if now < expiry - WELL_KNOWN_GRACE_PERIOD_FACTOR * ttl:
                return WellKnownLookupResult(delegated_server=prev_result)
        except KeyError:
            prev_result = None

        # TODO: should we linearise so that we don't end up doing two .well-known
        # requests for the same server in parallel?
        try:
            with Measure(self._clock, "get_well_known"):
                result, cache_period = yield self._fetch_well_known(
                    server_name)

        except _FetchWellKnownFailure as e:
            if prev_result and e.temporary:
                # This is a temporary failure and we have a still valid cached
                # result, so lets return that. Hopefully the next time we ask
                # the remote will be back up again.
                return WellKnownLookupResult(delegated_server=prev_result)

            result = None

            if self._had_valid_well_known_cache.get(server_name, False):
                # We have recently seen a valid well-known record for this
                # server, so we cache the lack of well-known for a shorter time.
                cache_period = WELL_KNOWN_DOWN_CACHE_PERIOD
            else:
                cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD

            # add some randomness to the TTL to avoid a stampeding herd
            cache_period *= random.uniform(
                1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
                1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
            )

        if cache_period > 0:
            self._well_known_cache.set(server_name, result, cache_period)

        return WellKnownLookupResult(delegated_server=result)

    @defer.inlineCallbacks
    def _fetch_well_known(self, server_name):
        """Actually fetch and parse a .well-known, without checking the cache

        Args:
            server_name (bytes): name of the server, from the requested url

        Raises:
            _FetchWellKnownFailure if we fail to lookup a result

        Returns:
            Deferred[Tuple[bytes,int]]: The lookup result and cache period.
        """

        had_valid_well_known = self._had_valid_well_known_cache.get(
            server_name, False)

        # We do this in two steps to differentiate between possibly transient
        # errors (e.g. can't connect to host, 503 response) and more permenant
        # errors (such as getting a 404 response).
        response, body = yield self._make_well_known_request(
            server_name, retry=had_valid_well_known)

        try:
            if response.code != 200:
                raise Exception("Non-200 response %s" % (response.code, ))

            parsed_body = json.loads(body.decode("utf-8"))
            logger.info("Response from .well-known: %s", parsed_body)

            result = parsed_body["m.server"].encode("ascii")
        except defer.CancelledError:
            # Bail if we've been cancelled
            raise
        except Exception as e:
            logger.info("Error parsing well-known for %s: %s", server_name, e)
            raise _FetchWellKnownFailure(temporary=False)

        cache_period = _cache_period_from_headers(
            response.headers, time_now=self._reactor.seconds)
        if cache_period is None:
            cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
            # add some randomness to the TTL to avoid a stampeding herd every 24 hours
            # after startup
            cache_period *= random.uniform(
                1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
                1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
            )
        else:
            cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
            cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD)

        # We got a success, mark as such in the cache
        self._had_valid_well_known_cache.set(
            server_name,
            bool(result),
            cache_period + WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID,
        )

        return result, cache_period

    @defer.inlineCallbacks
    def _make_well_known_request(self, server_name, retry):
        """Make the well known request.

        This will retry the request if requested and it fails (with unable
        to connect or receives a 5xx error).

        Args:
            server_name (bytes)
            retry (bool): Whether to retry the request if it fails.

        Returns:
            Deferred[tuple[IResponse, bytes]] Returns the response object and
            body. Response may be a non-200 response.
        """
        uri = b"https://%s/.well-known/matrix/server" % (server_name, )
        uri_str = uri.decode("ascii")

        headers = {
            b"User-Agent": [self.user_agent],
        }

        i = 0
        while True:
            i += 1

            logger.info("Fetching %s", uri_str)
            try:
                response = yield make_deferred_yieldable(
                    self._well_known_agent.request(b"GET",
                                                   uri,
                                                   headers=Headers(headers)))
                body = yield make_deferred_yieldable(readBody(response))

                if 500 <= response.code < 600:
                    raise Exception("Non-200 response %s" % (response.code, ))

                return response, body
            except defer.CancelledError:
                # Bail if we've been cancelled
                raise
            except Exception as e:
                if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS:
                    logger.info("Error fetching %s: %s", uri_str, e)
                    raise _FetchWellKnownFailure(temporary=True)

                logger.info("Error fetching %s: %s. Retrying", uri_str, e)

            # Sleep briefly in the hopes that they come back up
            yield self._clock.sleep(0.5)