Esempio n. 1
0
        async def second_lookup():
            with LoggingContext("context_12") as context_12:
                context_12.request = "context_12"

                res_deferreds_2 = kr.verify_json_objects_for_server([
                    ("server10", json1, 0, "test")
                ])
                res_deferreds_2[0].addBoth(self.check_context, None)
                second_lookup_state[0] = 1
                await make_deferred_yieldable(res_deferreds_2[0])
                second_lookup_state[0] = 2
Esempio n. 2
0
    def test_rejected_message_event_state(self):
        """
        Check that we store the state group correctly for rejected non-state events.

        Regression test for #6289.
        """
        OTHER_SERVER = "otherserver"
        OTHER_USER = "******" + OTHER_SERVER

        # create the room
        user_id = self.register_user("kermit", "test")
        tok = self.login("kermit", "test")
        room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
        room_version = self.get_success(self.store.get_room_version(room_id))

        # pretend that another server has joined
        join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER,
                                                     room_id)

        # check the state group
        sg = self.successResultOf(
            self.store._get_state_group_for_event(join_event.event_id))

        # build and send an event which will be rejected
        ev = event_from_pdu_json(
            {
                "type": EventTypes.Message,
                "content": {},
                "room_id": room_id,
                "sender": "@yetanotheruser:"******"depth": join_event["depth"] + 1,
                "prev_events": [join_event.event_id],
                "auth_events": [],
                "origin_server_ts": self.clock.time_msec(),
            },
            room_version,
        )

        with LoggingContext("send_rejected"):
            d = run_in_background(
                self.hs.get_federation_event_handler().on_receive_pdu,
                OTHER_SERVER, ev)
        self.get_success(d)

        # that should have been rejected
        e = self.get_success(
            self.store.get_event(ev.event_id, allow_rejected=True))
        self.assertIsNotNone(e.rejected_reason)

        # ... and the state group should be the same as before
        sg2 = self.successResultOf(
            self.store._get_state_group_for_event(ev.event_id))

        self.assertEqual(sg, sg2)
Esempio n. 3
0
def main() -> None:
    with LoggingContext("main"):
        # check base requirements
        check_requirements()
        hs = setup(sys.argv[1:])

        # redirect stdio to the logs, if configured.
        if not hs.config.logging.no_redirect_stdio:
            redirect_stdio_to_logs()

        run(hs)
Esempio n. 4
0
    def test_make_deferred_yieldable_on_non_deferred(self):
        """Check that make_deferred_yieldable does the right thing when its
        argument isn't actually a deferred"""

        with LoggingContext("one"):
            d1 = make_deferred_yieldable("bum")
            self._check_test_key("one")

            r = yield d1
            self.assertEqual(r, "bum")
            self._check_test_key("one")
Esempio n. 5
0
    def test_cant_hide_direct_ancestors(self):
        """
        If you send a message, you must be able to provide the direct
        prev_events that said event references.
        """

        async def post_json(destination, path, data, headers=None, timeout=0):
            # If it asks us for new missing events, give them NOTHING
            if path.startswith("/_matrix/federation/v1/get_missing_events/"):
                return {"events": []}

        self.http_client.post_json = post_json

        # Figure out what the most recent event is
        most_recent = self.get_success(
            self.store.get_latest_event_ids_in_room(self.room_id)
        )[0]

        # Now lie about an event
        lying_event = make_event_from_dict(
            {
                "room_id": self.room_id,
                "sender": "@baduser:test.serv",
                "event_id": "one:test.serv",
                "depth": 1000,
                "origin_server_ts": 1,
                "type": "m.room.message",
                "origin": "test.serv",
                "content": {"body": "hewwo?"},
                "auth_events": [],
                "prev_events": [("two:test.serv", {}), (most_recent, {})],
            }
        )

        with LoggingContext("test-context"):
            failure = self.get_failure(
                self.handler.on_receive_pdu(
                    "test.serv", lying_event, sent_to_us_directly=True
                ),
                FederationError,
            )

        # on_receive_pdu should throw an error
        self.assertEqual(
            failure.value.args[0],
            (
                "ERROR 403: Your server isn't divulging details about prev_events "
                "referenced in this event."
            ),
        )

        # Make sure the invalid event isn't there
        extrem = self.get_success(self.store.get_latest_event_ids_in_room(self.room_id))
        self.assertEqual(extrem[0], "$join:test.serv")
Esempio n. 6
0
    def __enter__(self):
        self.start = self.clock.time()
        self.start_context = LoggingContext.current_context()
        if not self.start_context:
            self.start_context = LoggingContext("Measure")
            self.start_context.__enter__()
            self.created_context = True

        self.start_usage = self.start_context.get_resource_usage()

        in_flight.register((self.name, ), self._update_in_flight)
Esempio n. 7
0
    def test_dedupe(self):
        """Test that if we request the same event multiple times we only pull it
        out once.
        """

        with LoggingContext("test") as ctx:
            d = yieldable_gather_results(self.store.get_event,
                                         [self.event_id, self.event_id])
            self.get_success(d)

            # We should have fetched the event from the DB
            self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1)
    def test_make_deferred_yieldable_with_chained_deferreds(self):
        sentinel_context = current_context()

        with LoggingContext("one"):
            d1 = make_deferred_yieldable(_chained_deferred_function())
            # make sure that the context was reset by make_deferred_yieldable
            self.assertIs(current_context(), sentinel_context)

            yield d1

            # now it should be restored
            self._check_test_key("one")
Esempio n. 9
0
        def do_lookup():
            with LoggingContext() as c1:
                c1.name = "c1"
                try:
                    d = obj.fn(1)
                    self.assertEqual(
                        LoggingContext.current_context(), LoggingContext.sentinel
                    )
                    yield d
                    self.fail("No exception thrown")
                except SynapseError:
                    pass

                self.assertEqual(LoggingContext.current_context(), c1)
Esempio n. 10
0
    def test_sleep(self):
        clock = Clock(reactor)

        @defer.inlineCallbacks
        def competing_callback():
            with LoggingContext("competing"):
                yield clock.sleep(0)
                self._check_test_key("competing")

        reactor.callLater(0, competing_callback)

        with LoggingContext("one"):
            yield clock.sleep(0)
            self._check_test_key("one")
Esempio n. 11
0
        def do_request():
            with LoggingContext("one") as context:
                fetch_d = self.cl.get_json("testserv:8008", "foo/bar")

                # Nothing happened yet
                self.assertNoResult(fetch_d)

                # should have reset logcontext to the sentinel
                check_logcontext(SENTINEL_CONTEXT)

                try:
                    fetch_res = yield fetch_d
                    return fetch_res
                finally:
                    check_logcontext(context)
Esempio n. 12
0
        def second_lookup():
            with LoggingContext("12") as context_12:
                context_12.request = "12"
                self.http_client.post_json.reset_mock()
                self.http_client.post_json.return_value = defer.Deferred()

                res_deferreds_2 = kr.verify_json_objects_for_server(
                    [("server10", json1, 0, "test")]
                )
                res_deferreds_2[0].addBoth(self.check_context, None)
                yield make_deferred_yieldable(res_deferreds_2[0])

                # let verify_json_objects_for_server finish its work before we kill the
                # logcontext
                yield self.clock.sleep(0)
Esempio n. 13
0
 def __init__(self, clock, name):
     self.clock = clock
     self.name = name
     curr_context = current_context()
     if not curr_context:
         logger.warning(
             "Starting metrics collection %r from sentinel context: metrics will be lost",
             name,
         )
         parent_context = None
     else:
         assert isinstance(curr_context, LoggingContext)
         parent_context = curr_context
     self._logging_context = LoggingContext("Measure[%s]" % (self.name, ),
                                            parent_context)
     self.start = None
Esempio n. 14
0
        def do_lookup():
            with LoggingContext() as c1:
                c1.name = "c1"
                try:
                    d = obj.fn(1)
                    self.assertEqual(LoggingContext.current_context(),
                                     LoggingContext.sentinel)
                    yield d
                    self.fail("No exception thrown")
                except SynapseError:
                    pass

                self.assertEqual(LoggingContext.current_context(), c1)

            # the cache should now be empty
            self.assertEqual(len(obj.fn.cache.cache), 0)
Esempio n. 15
0
        def do_lookup():

            with LoggingContext("one") as ctx:
                resolve_d = resolver.resolve_service(service_name)

                self.assertNoResult(resolve_d)

                # should have reset to the sentinel context
                self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)

                result = yield resolve_d

                # should have restored our context
                self.assertIs(LoggingContext.current_context(), ctx)

                return result
Esempio n. 16
0
    def test_cache(self):
        class Cls(object):
            def __init__(self):
                self.mock = mock.Mock()

            @descriptors.cached()
            def fn(self, arg1, arg2):
                pass

            @descriptors.cachedList("fn", "args1", inlineCallbacks=True)
            def list_fn(self, args1, arg2):
                assert LoggingContext.current_context().request == "c1"
                # we want this to behave like an asynchronous function
                yield run_on_reactor()
                assert LoggingContext.current_context().request == "c1"
                return self.mock(args1, arg2)

        with LoggingContext() as c1:
            c1.request = "c1"
            obj = Cls()
            obj.mock.return_value = {10: "fish", 20: "chips"}
            d1 = obj.list_fn([10, 20], 2)
            self.assertEqual(LoggingContext.current_context(),
                             LoggingContext.sentinel)
            r = yield d1
            self.assertEqual(LoggingContext.current_context(), c1)
            obj.mock.assert_called_once_with([10, 20], 2)
            self.assertEqual(r, {10: "fish", 20: "chips"})
            obj.mock.reset_mock()

            # a call with different params should call the mock again
            obj.mock.return_value = {30: "peas"}
            r = yield obj.list_fn([20, 30], 2)
            obj.mock.assert_called_once_with([30], 2)
            self.assertEqual(r, {20: "chips", 30: "peas"})
            obj.mock.reset_mock()

            # all the values should now be cached
            r = yield obj.fn(10, 2)
            self.assertEqual(r, "fish")
            r = yield obj.fn(20, 2)
            self.assertEqual(r, "chips")
            r = yield obj.fn(30, 2)
            self.assertEqual(r, "peas")
            r = yield obj.list_fn([10, 20, 30], 2)
            obj.mock.assert_not_called()
            self.assertEqual(r, {10: "fish", 20: "chips", 30: "peas"})
Esempio n. 17
0
        def do_lookup():
            with LoggingContext("c1") as c1:
                try:
                    d = obj.fn(1)
                    self.assertEqual(
                        current_context(),
                        SENTINEL_CONTEXT,
                    )
                    yield d
                    self.fail("No exception thrown")
                except SynapseError:
                    pass

                self.assertEqual(current_context(), c1)

            # the cache should now be empty
            self.assertEqual(len(obj.fn.cache.cache), 0)
Esempio n. 18
0
    def test_cache(self):
        class Cls:
            def __init__(self):
                self.mock = mock.Mock()

            @descriptors.cached()
            def fn(self, arg1, arg2):
                pass

            @descriptors.cachedList("fn", "args1")
            async def list_fn(self, args1, arg2):
                assert current_context().name == "c1"
                # we want this to behave like an asynchronous function
                await run_on_reactor()
                assert current_context().name == "c1"
                return self.mock(args1, arg2)

        with LoggingContext("c1") as c1:
            obj = Cls()
            obj.mock.return_value = {10: "fish", 20: "chips"}
            d1 = obj.list_fn([10, 20], 2)
            self.assertEqual(current_context(), SENTINEL_CONTEXT)
            r = yield d1
            self.assertEqual(current_context(), c1)
            obj.mock.assert_called_once_with([10, 20], 2)
            self.assertEqual(r, {10: "fish", 20: "chips"})
            obj.mock.reset_mock()

            # a call with different params should call the mock again
            obj.mock.return_value = {30: "peas"}
            r = yield obj.list_fn([20, 30], 2)
            obj.mock.assert_called_once_with([30], 2)
            self.assertEqual(r, {20: "chips", 30: "peas"})
            obj.mock.reset_mock()

            # all the values should now be cached
            r = yield obj.fn(10, 2)
            self.assertEqual(r, "fish")
            r = yield obj.fn(20, 2)
            self.assertEqual(r, "chips")
            r = yield obj.fn(30, 2)
            self.assertEqual(r, "peas")
            r = yield obj.list_fn([10, 20, 30], 2)
            obj.mock.assert_not_called()
            self.assertEqual(r, {10: "fish", 20: "chips", 30: "peas"})
Esempio n. 19
0
    def _build_and_send_join_event(self, other_server, other_user, room_id):
        join_event = self.get_success(
            self.handler.on_make_join_request(other_server, room_id,
                                              other_user))
        # the auth code requires that a signature exists, but doesn't check that
        # signature... go figure.
        join_event.signatures[other_server] = {"x": "y"}
        with LoggingContext("send_join"):
            d = run_in_background(self.handler.on_send_join_request,
                                  other_server, join_event)
        self.get_success(d)

        # sanity-check: the room should show that the new user is a member
        r = self.get_success(self.store.get_current_state_ids(room_id))
        self.assertEqual(r[(EventTypes.Member, other_user)],
                         join_event.event_id)

        return join_event
Esempio n. 20
0
        async def first_lookup():
            with LoggingContext("context_11",
                                request=FakeRequest("context_11")):
                res_deferreds = kr.verify_json_objects_for_server([
                    ("server10", json1, 0), ("server11", {}, 0)
                ])

                # the unsigned json should be rejected pretty quickly
                self.assertTrue(res_deferreds[1].called)
                try:
                    await res_deferreds[1]
                    self.assertFalse("unsigned json didn't cause a failure")
                except SynapseError:
                    pass

                self.assertFalse(res_deferreds[0].called)
                res_deferreds[0].addBoth(self.check_context, None)

                await make_deferred_yieldable(res_deferreds[0])
Esempio n. 21
0
    def test_make_deferred_yieldable(self):
        # a function which returns an incomplete deferred, but doesn't follow
        # the synapse rules.
        def blocking_function():
            d = defer.Deferred()
            reactor.callLater(0, d.callback, None)
            return d

        sentinel_context = current_context()

        with LoggingContext("one"):
            d1 = make_deferred_yieldable(blocking_function())
            # make sure that the context was reset by make_deferred_yieldable
            self.assertIs(current_context(), sentinel_context)

            yield d1

            # now it should be restored
            self._check_test_key("one")
Esempio n. 22
0
def s3_download_task(bucket, api_kwargs, key, deferred, parent_logcontext):
    """Attempts to download a file from S3.

    Args:
        bucket (str): The S3 bucket which may have the file
        api_kwargs (dict): Keyword arguments to pass when invoking the API.
            Generally `endpoint_url`.
        key (str): The key of the file
        deferred (Deferred[_S3Responder|None]): If file exists
            resolved with an _S3Responder instance, if it doesn't
            exist then resolves with None.
        parent_logcontext (LoggingContext): the logcontext to report logs and metrics
            against.
    """
    with LoggingContext(parent_context=parent_logcontext):
        logger.info("Fetching %s from S3", key)

        local_data = threading.local()

        try:
            s3 = local_data.b3_client
        except AttributeError:
            b3_session = boto3.session.Session()
            local_data.b3_client = s3 = b3_session.client("s3", **api_kwargs)

        try:
            resp = s3.get_object(Bucket=bucket, Key=key)
        except botocore.exceptions.ClientError as e:
            if e.response["Error"]["Code"] in (
                    "404",
                    "NoSuchKey",
            ):
                logger.info("Media %s not found in S3", key)
                reactor.callFromThread(deferred.callback, None)
                return

            reactor.callFromThread(deferred.errback, Failure())
            return

        producer = _S3Responder()
        reactor.callFromThread(deferred.callback, producer)
        _stream_to_producer(reactor, producer, resp["Body"], timeout=90.0)
Esempio n. 23
0
    def test_nested_spans(self) -> None:
        """Starting two spans off inside each other should work"""

        with LoggingContext("root context"):
            with start_active_span("root span", tracer=self._tracer) as root_scope:
                self.assertEqual(self._tracer.active_span, root_scope.span)

                scope1 = start_active_span(
                    "child1",
                    tracer=self._tracer,
                )
                self.assertEqual(
                    self._tracer.active_span, scope1.span, "child1 was not activated"
                )
                self.assertEqual(
                    scope1.span.context.parent_id, root_scope.span.context.span_id
                )

                scope2 = start_active_span_follows_from(
                    "child2",
                    contexts=(scope1,),
                    tracer=self._tracer,
                )
                self.assertEqual(self._tracer.active_span, scope2.span)
                self.assertEqual(
                    scope2.span.context.parent_id, scope1.span.context.span_id
                )

                with scope1, scope2:
                    pass

                # the root scope should be restored
                self.assertEqual(self._tracer.active_span, root_scope.span)
                self.assertIsNotNone(scope2.span.end_time)
                self.assertIsNotNone(scope1.span.end_time)

            self.assertIsNone(self._tracer.active_span)

        # the spans should be reported in order of their finishing.
        self.assertEqual(
            self._reporter.get_spans(), [scope2.span, scope1.span, root_scope.span]
        )
Esempio n. 24
0
    def render(self, resrc: Resource) -> None:
        # this is called once a Resource has been found to serve the request; in our
        # case the Resource in question will normally be a JsonResource.

        # create a LogContext for this request
        request_id = self.get_request_id()
        self.logcontext = LoggingContext(
            request_id,
            request=ContextRequest(
                request_id=request_id,
                ip_address=self.getClientAddress().host,
                site_tag=self.synapse_site.site_tag,
                # The requester is going to be unknown at this point.
                requester=None,
                authenticated_entity=None,
                method=self.get_method(),
                url=self.get_redacted_uri(),
                protocol=self.clientproto.decode("ascii", errors="replace"),
                user_agent=get_request_user_agent(self),
            ),
        )

        # override the Server header which is set by twisted
        self.setHeader("Server", self.synapse_site.server_version_string)

        with PreserveLoggingContext(self.logcontext):
            # we start the request metrics timer here with an initial stab
            # at the servlet name. For most requests that name will be
            # JsonResource (or a subclass), and JsonResource._async_render
            # will update it once it picks a servlet.
            servlet_name = resrc.__class__.__name__
            self._started_processing(servlet_name)

            Request.render(self, resrc)

            # record the arrival of the request *after*
            # dispatching to the handler, so that the handler
            # can update the servlet name in the request
            # metrics
            requests_counter.labels(self.get_method(),
                                    self.request_metrics.name).inc()
Esempio n. 25
0
    def _test_run_in_background(self, function):
        sentinel_context = LoggingContext.current_context()

        callback_completed = [False]

        with LoggingContext() as context_one:
            context_one.request = "one"

            # fire off function, but don't wait on it.
            d2 = run_in_background(function)

            def cb(res):
                callback_completed[0] = True
                return res

            d2.addCallback(cb)

            self._check_test_key("one")

        # now wait for the function under test to have run, and check that
        # the logcontext is left in a sane state.
        d2 = defer.Deferred()

        def check_logcontext():
            if not callback_completed[0]:
                reactor.callLater(0.01, check_logcontext)
                return

            # make sure that the context was reset before it got thrown back
            # into the reactor
            try:
                self.assertIs(LoggingContext.current_context(),
                              sentinel_context)
                d2.callback(None)
            except BaseException:
                d2.errback(twisted.python.failure.Failure())

        reactor.callLater(0.01, check_logcontext)

        # test is done once d2 finishes
        return d2
Esempio n. 26
0
    def _make_get_request(self, uri):
        """
        Sends a simple GET request via the agent, and checks its logcontext management
        """
        with LoggingContext("one") as context:
            fetch_d = self.agent.request(b"GET", uri)

            # Nothing happened yet
            self.assertNoResult(fetch_d)

            # should have reset logcontext to the sentinel
            _check_logcontext(SENTINEL_CONTEXT)

            try:
                fetch_res = yield fetch_d
                return fetch_res
            except Exception as e:
                logger.info("Fetch of %s failed: %s", uri.decode("ascii"), e)
                raise
            finally:
                _check_logcontext(context)
Esempio n. 27
0
 def __init__(self, clock: Clock, name: str) -> None:
     """
     Args:
         clock: An object with a "time()" method, which returns the current
             time in seconds.
         name: The name of the metric to report.
     """
     self.clock = clock
     self.name = name
     curr_context = current_context()
     if not curr_context:
         logger.warning(
             "Starting metrics collection %r from sentinel context: metrics will be lost",
             name,
         )
         parent_context = None
     else:
         assert isinstance(curr_context, LoggingContext)
         parent_context = curr_context
     self._logging_context = LoggingContext(str(curr_context), parent_context)
     self.start: Optional[float] = None
Esempio n. 28
0
def swift_download_task(container, api_kwargs, object_name, deferred,
                        parent_logcontext):
    """Attempts to download a file from swift.

    Args:
        container (str): The swift bucket which may have the file
        api_kwargs (dict): Keyword arguments to pass when invoking the API.
            Generally `os_auth_url`.
        object_name (str): The object_name of the file
        deferred (Deferred[_SwiftResponder|None]): If file exists
            resolved with an _SwiftResponder instance, if it doesn't
            exist then resolves with None.
        parent_logcontext (LoggingContext): the logcontext to report logs and metrics
            against.
    """
    with LoggingContext(parent_context=parent_logcontext):
        logger.info("Fetching %s from swift", object_name)

        local_data = threading.local()

        try:
            connection = local_data.connection
        except AttributeError:
            connection = openstack.connection.from_config(**api_kwargs)
            local_data.connection = connection

        try:
            resp = connection.download_object(object_name, container)
        except openstack.exceptions.ResourceNotFound:
            logger.info("Media %s not found in swift", object_name)
            reactor.callFromThread(deferred.callback, None)
            return

            reactor.callFromThread(deferred.errback, Failure())
            return

        producer = _SwiftResponder()
        reactor.callFromThread(deferred.callback, producer)
        _stream_to_producer(reactor, producer, resp["Body"], timeout=90.0)
Esempio n. 29
0
    def test_make_deferred_yieldable_with_await(self):
        # an async function which returns an incomplete coroutine, but doesn't
        # follow the synapse rules.

        async def blocking_function():
            d = defer.Deferred()
            reactor.callLater(0, d.callback, None)
            await d

        sentinel_context = current_context()

        with LoggingContext() as context_one:
            context_one.request = "one"

            d1 = make_deferred_yieldable(blocking_function())
            # make sure that the context was reset by make_deferred_yieldable
            self.assertIs(current_context(), sentinel_context)

            yield d1

            # now it should be restored
            self._check_test_key("one")
Esempio n. 30
0
    def test_start_active_span(self) -> None:
        # the scope manager assumes a logging context of some sort.
        with LoggingContext("root context"):
            self.assertIsNone(self._tracer.active_span)

            # start_active_span should start and activate a span.
            scope = start_active_span("span", tracer=self._tracer)
            span = scope.span
            self.assertEqual(self._tracer.active_span, span)
            self.assertIsNotNone(span.start_time)

            # entering the context doesn't actually do a whole lot.
            with scope as ctx:
                self.assertIs(ctx, scope)
                self.assertEqual(self._tracer.active_span, span)

            # ... but leaving it unsets the active span, and finishes the span.
            self.assertIsNone(self._tracer.active_span)
            self.assertIsNotNone(span.end_time)

        # the span should have been reported
        self.assertEqual(self._reporter.get_spans(), [span])