Exemplo n.º 1
0
    def test_verify_json_for_server(self):
        kr = keyring.Keyring(self.hs)

        key1 = signedjson.key.generate_signing_key(1)
        yield self.hs.datastore.store_server_verify_key(
            "server9", "", time.time() * 1000, signedjson.key.get_verify_key(key1)
        )
        json1 = {}
        signedjson.sign.sign_json(json1, "server9", key1)

        sentinel_context = LoggingContext.current_context()

        with LoggingContext("one") as context_one:
            context_one.request = "one"

            defer = kr.verify_json_for_server("server9", {})
            try:
                yield defer
                self.fail("should fail on unsigned json")
            except SynapseError:
                pass
            self.assertIs(LoggingContext.current_context(), context_one)

            defer = kr.verify_json_for_server("server9", json1)
            self.assertFalse(defer.called)
            self.assertIs(LoggingContext.current_context(), sentinel_context)
            yield defer

            self.assertIs(LoggingContext.current_context(), context_one)
Exemplo n.º 2
0
        def setUp(orig):
            # enable debugging of delayed calls - this means that we get a
            # traceback when a unit test exits leaving things on the reactor.
            twisted.internet.base.DelayedCall.debug = True

            # if we're not starting in the sentinel logcontext, then to be honest
            # all future bets are off.
            if LoggingContext.current_context() is not LoggingContext.sentinel:
                self.fail(
                    "Test starting with non-sentinel logging context %s"
                    % (LoggingContext.current_context(),)
                )

            old_level = logging.getLogger().level
            if level is not None and old_level != level:

                @around(self)
                def tearDown(orig):
                    ret = orig()
                    logging.getLogger().setLevel(old_level)
                    return ret

                logging.getLogger().setLevel(level)

            return orig()
Exemplo n.º 3
0
 def assert_sentinel_context(self):
     if LoggingContext.current_context() != LoggingContext.sentinel:
         self.fail(
             "Expected sentinel context but got %s" % (
                 LoggingContext.current_context(),
             )
         )
Exemplo n.º 4
0
class Measure(object):
    __slots__ = [
        "clock", "name", "start_context", "start", "new_context", "ru_utime",
        "ru_stime", "db_txn_count", "db_txn_duration", "created_context"
    ]

    def __init__(self, clock, name):
        self.clock = clock
        self.name = name
        self.start_context = None
        self.start = None
        self.created_context = False

    def __enter__(self):
        self.start = self.clock.time_msec()
        self.start_context = LoggingContext.current_context()
        if not self.start_context:
            logger.warn("Entered Measure without log context: %s", self.name)
            self.start_context = LoggingContext("Measure")
            self.start_context.__enter__()
            self.created_context = True

        self.ru_utime, self.ru_stime = self.start_context.get_resource_usage()
        self.db_txn_count = self.start_context.db_txn_count
        self.db_txn_duration = self.start_context.db_txn_duration

    def __exit__(self, exc_type, exc_val, exc_tb):
        if exc_type is not None or not self.start_context:
            return

        duration = self.clock.time_msec() - self.start
        block_timer.inc_by(duration, self.name)

        context = LoggingContext.current_context()

        if context != self.start_context:
            logger.warn(
                "Context have unexpectedly changed from '%s' to '%s'. (%r)",
                context, self.start_context, self.name
            )
            return

        if not context:
            logger.warn("Expected context. (%r)", self.name)
            return

        ru_utime, ru_stime = context.get_resource_usage()

        block_ru_utime.inc_by(ru_utime - self.ru_utime, self.name)
        block_ru_stime.inc_by(ru_stime - self.ru_stime, self.name)
        block_db_txn_count.inc_by(
            context.db_txn_count - self.db_txn_count, self.name
        )
        block_db_txn_duration.inc_by(
            context.db_txn_duration - self.db_txn_duration, self.name
        )

        if self.created_context:
            self.start_context.__exit__(exc_type, exc_val, exc_tb)
Exemplo n.º 5
0
        def tearDown(orig):
            ret = orig()
            # force a GC to workaround problems with deferreds leaking logcontexts when
            # they are GCed (see the logcontext docs)
            gc.collect()
            LoggingContext.set_current_context(LoggingContext.sentinel)

            return ret
Exemplo n.º 6
0
 def errback(res, deferred_name):
     self.assertIs(
         LoggingContext.current_context(),
         context_one,
         "errback %s run in unexpected logcontext %s"
         % (deferred_name, LoggingContext.current_context()),
     )
     return res
Exemplo n.º 7
0
 def check_ctx(r):
     if LoggingContext.current_context() != start_context:
         err = "%s completion of %s changed context from %s to %s" % (
             "Failure" if isinstance(r, Failure) else "Success",
             f, start_context, LoggingContext.current_context(),
         )
         print(err, file=sys.stderr)
         raise Exception(err)
     return r
Exemplo n.º 8
0
    def __enter__(self):
        self.start = self.clock.time()
        self.start_context = LoggingContext.current_context()
        if not self.start_context:
            self.start_context = LoggingContext("Measure")
            self.start_context.__enter__()
            self.created_context = True

        self.start_usage = self.start_context.get_resource_usage()

        in_flight.register((self.name,), self._update_in_flight)
Exemplo n.º 9
0
    def __enter__(self):
        self.start = self.clock.time_msec()
        self.start_context = LoggingContext.current_context()
        if not self.start_context:
            self.start_context = LoggingContext("Measure")
            self.start_context.__enter__()
            self.created_context = True

        self.ru_utime, self.ru_stime = self.start_context.get_resource_usage()
        self.db_txn_count = self.start_context.db_txn_count
        self.db_txn_duration = self.start_context.db_txn_duration
Exemplo n.º 10
0
    def test_make_deferred_yieldable_with_chained_deferreds(self):
        sentinel_context = LoggingContext.current_context()

        with LoggingContext() as context_one:
            context_one.request = "one"

            d1 = logcontext.make_deferred_yieldable(_chained_deferred_function())
            # make sure that the context was reset by make_deferred_yieldable
            self.assertIs(LoggingContext.current_context(), sentinel_context)

            yield d1

            # now it should be restored
            self._check_test_key("one")
Exemplo n.º 11
0
    def runInteraction(self, desc, func, *args, **kwargs):
        """Wraps the .runInteraction() method on the underlying db_pool."""
        current_context = LoggingContext.current_context()

        start_time = time.time() * 1000

        after_callbacks = []

        def inner_func(conn, *args, **kwargs):
            with LoggingContext("runInteraction") as context:
                sql_scheduling_timer.inc_by(time.time() * 1000 - start_time)

                if self.database_engine.is_connection_closed(conn):
                    logger.debug("Reconnecting closed database connection")
                    conn.reconnect()

                current_context.copy_to(context)
                return self._new_transaction(
                    conn, desc, after_callbacks, func, *args, **kwargs
                )

        result = yield preserve_context_over_fn(
            self._db_pool.runWithConnection,
            inner_func, *args, **kwargs
        )

        for after_callback, after_args in after_callbacks:
            after_callback(*after_args)
        defer.returnValue(result)
Exemplo n.º 12
0
    def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0,
                               full_state=False):
        """Get the sync for a client if we have new data for it now. Otherwise
        wait for new data to arrive on the server. If the timeout expires, then
        return an empty sync result.
        Returns:
            A Deferred SyncResult.
        """

        context = LoggingContext.current_context()
        if context:
            if since_token is None:
                context.tag = "initial_sync"
            elif full_state:
                context.tag = "full_state_sync"
            else:
                context.tag = "incremental_sync"

        if timeout == 0 or since_token is None or full_state:
            # we are going to return immediately, so don't bother calling
            # notifier.wait_for_events.
            result = yield self.current_sync_for_user(
                sync_config, since_token, full_state=full_state,
            )
            defer.returnValue(result)
        else:
            def current_sync_callback(before_token, after_token):
                return self.current_sync_for_user(sync_config, since_token)

            result = yield self.notifier.wait_for_events(
                sync_config.user.to_string(), timeout, current_sync_callback,
                from_token=since_token,
            )
            defer.returnValue(result)
Exemplo n.º 13
0
    def finished_processing(self):

        try:
            context = LoggingContext.current_context()
            ru_utime, ru_stime = context.get_resource_usage()
            db_txn_count = context.db_txn_count
            db_txn_duration = context.db_txn_duration
        except:
            ru_utime, ru_stime = (0, 0)
            db_txn_count, db_txn_duration = (0, 0)

        self.site.access_logger.info(
            "%s - %s - {%s}"
            " Processed request: %dms (%dms, %dms) (%dms/%d)"
            " %sB %s \"%s %s %s\" \"%s\"",
            self.getClientIP(),
            self.site.site_tag,
            self.authenticated_entity,
            int(time.time() * 1000) - self.start_time,
            int(ru_utime * 1000),
            int(ru_stime * 1000),
            int(db_txn_duration * 1000),
            int(db_txn_count),
            self.sentLength,
            self.code,
            self.method,
            self.get_redacted_uri(),
            self.clientproto,
            self.get_user_agent(),
        )
Exemplo n.º 14
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        if exc_type is not None or not self.start_context:
            return

        duration = self.clock.time_msec() - self.start
        block_timer.inc_by(duration, self.name)

        context = LoggingContext.current_context()

        if context != self.start_context:
            logger.warn(
                "Context have unexpectedly changed from '%s' to '%s'. (%r)",
                context, self.start_context, self.name
            )
            return

        if not context:
            logger.warn("Expected context. (%r)", self.name)
            return

        ru_utime, ru_stime = context.get_resource_usage()

        block_ru_utime.inc_by(ru_utime - self.ru_utime, self.name)
        block_ru_stime.inc_by(ru_stime - self.ru_stime, self.name)
        block_db_txn_count.inc_by(
            context.db_txn_count - self.db_txn_count, self.name
        )
        block_db_txn_duration.inc_by(
            context.db_txn_duration - self.db_txn_duration, self.name
        )

        if self.created_context:
            self.start_context.__exit__(exc_type, exc_val, exc_tb)
Exemplo n.º 15
0
    def runInteraction(self, desc, func, *args, **kwargs):
        """Wraps the .runInteraction() method on the underlying db_pool."""
        current_context = LoggingContext.current_context()

        def inner_func(txn, *args, **kwargs):
            with LoggingContext("runInteraction") as context:
                current_context.copy_to(context)
                start = time.clock() * 1000
                txn_id = SQLBaseStore._TXN_ID

                # We don't really need these to be unique, so lets stop it from
                # growing really large.
                self._TXN_ID = (self._TXN_ID + 1) % (sys.maxint - 1)

                name = "%s-%x" % (desc, txn_id, )

                transaction_logger.debug("[TXN START] {%s}", name)
                try:
                    return func(LoggingTransaction(txn, name), *args, **kwargs)
                except:
                    logger.exception("[TXN FAIL] {%s}", name)
                    raise
                finally:
                    end = time.clock() * 1000
                    transaction_logger.debug(
                        "[TXN END] {%s} %f",
                        name, end - start
                    )
        with PreserveLoggingContext():
            result = yield self._db_pool.runInteraction(
                inner_func, *args, **kwargs
            )
        defer.returnValue(result)
Exemplo n.º 16
0
        def do_lookup():

            with LoggingContext("one") as ctx:
                resolve_d = resolver.resolve_service(service_name)

                self.assertNoResult(resolve_d)

                # should have reset to the sentinel context
                self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)

                result = yield resolve_d

                # should have restored our context
                self.assertIs(LoggingContext.current_context(), ctx)

                defer.returnValue(result)
Exemplo n.º 17
0
    def stop(self, clock, request):
        context = LoggingContext.current_context()

        tag = ""
        if context:
            tag = context.tag

            if context != self.start_context:
                logger.warn(
                    "Context have unexpectedly changed %r, %r",
                    context, self.start_context
                )
                return

        incoming_requests_counter.inc(request.method, self.name, tag)

        response_timer.inc_by(
            clock.time_msec() - self.start, request.method,
            self.name, tag
        )

        ru_utime, ru_stime = context.get_resource_usage()

        response_ru_utime.inc_by(
            ru_utime, request.method, self.name, tag
        )
        response_ru_stime.inc_by(
            ru_stime, request.method, self.name, tag
        )
        response_db_txn_count.inc_by(
            context.db_txn_count, request.method, self.name, tag
        )
        response_db_txn_duration.inc_by(
            context.db_txn_duration, request.method, self.name, tag
        )
Exemplo n.º 18
0
    def _wait_for_sync_for_user(self, sync_config, since_token, timeout,
                                full_state):
        context = LoggingContext.current_context()
        if context:
            if since_token is None:
                context.tag = "initial_sync"
            elif full_state:
                context.tag = "full_state_sync"
            else:
                context.tag = "incremental_sync"

        if timeout == 0 or since_token is None or full_state:
            # we are going to return immediately, so don't bother calling
            # notifier.wait_for_events.
            result = yield self.current_sync_for_user(
                sync_config, since_token, full_state=full_state,
            )
            defer.returnValue(result)
        else:
            def current_sync_callback(before_token, after_token):
                return self.current_sync_for_user(sync_config, since_token)

            result = yield self.notifier.wait_for_events(
                sync_config.user.to_string(), timeout, current_sync_callback,
                from_token=since_token,
            )
            defer.returnValue(result)
Exemplo n.º 19
0
    def call_later(self, delay, callback):
        current_context = LoggingContext.current_context()

        def wrapped_callback():
            LoggingContext.thread_local.current_context = current_context
            callback()
        return reactor.callLater(delay, wrapped_callback)
Exemplo n.º 20
0
 def get_perspectives(**kwargs):
     self.assertEquals(
         LoggingContext.current_context().request, "11",
     )
     with logcontext.PreserveLoggingContext():
         yield persp_deferred
     defer.returnValue(persp_resp)
Exemplo n.º 21
0
    def with_logging_context(fn):
        context = LoggingContext.current_context()

        def restore_context_callback(x):
            with PreserveLoggingContext(context):
                return fn(x)

        return restore_context_callback
Exemplo n.º 22
0
    def test_logcontexts_with_async_result(self):
        @defer.inlineCallbacks
        def cb():
            yield Clock(reactor).sleep(0)
            defer.returnValue("yay")

        @defer.inlineCallbacks
        def test():
            with LoggingContext("c") as c1:
                res = yield self.cache.fetch_or_execute(self.mock_key, cb)
                self.assertIs(LoggingContext.current_context(), c1)
                self.assertEqual(res, "yay")

        # run the test twice in parallel
        d = defer.gatherResults([test(), test()])
        self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
        yield d
        self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
Exemplo n.º 23
0
Arquivo: debug.py Projeto: Xe/synapse
    def with_logging_context(fn):
        context = LoggingContext.current_context()

        def restore_context_callback(x):
            with PreserveLoggingContext():
                LoggingContext.thread_local.current_context = context
                return fn(x)

        return restore_context_callback
Exemplo n.º 24
0
    def call_later(self, delay, callback, *args, **kwargs):
        current_context = LoggingContext.current_context()

        def wrapped_callback(*args, **kwargs):
            with PreserveLoggingContext():
                LoggingContext.thread_local.current_context = current_context
                callback(*args, **kwargs)

        with PreserveLoggingContext():
            return reactor.callLater(delay, wrapped_callback, *args, **kwargs)
Exemplo n.º 25
0
    def test_wait_for_previous_lookups(self):
        sentinel_context = LoggingContext.current_context()

        kr = keyring.Keyring(self.hs)

        lookup_1_deferred = defer.Deferred()
        lookup_2_deferred = defer.Deferred()

        with LoggingContext("one") as context_one:
            context_one.request = "one"

            wait_1_deferred = kr.wait_for_previous_lookups(
                ["server1"],
                {"server1": lookup_1_deferred},
            )

            # there were no previous lookups, so the deferred should be ready
            self.assertTrue(wait_1_deferred.called)
            # ... so we should have preserved the LoggingContext.
            self.assertIs(LoggingContext.current_context(), context_one)
            wait_1_deferred.addBoth(self.check_context, "one")

        with LoggingContext("two") as context_two:
            context_two.request = "two"

            # set off another wait. It should block because the first lookup
            # hasn't yet completed.
            wait_2_deferred = kr.wait_for_previous_lookups(
                ["server1"],
                {"server1": lookup_2_deferred},
            )
            self.assertFalse(wait_2_deferred.called)
            # ... so we should have reset the LoggingContext.
            self.assertIs(LoggingContext.current_context(), sentinel_context)
            wait_2_deferred.addBoth(self.check_context, "two")

            # let the first lookup complete (in the sentinel context)
            lookup_1_deferred.callback(None)

            # now the second wait should complete and restore our
            # loggingcontext.
            yield wait_2_deferred
Exemplo n.º 26
0
    def test_wait_for_previous_lookups(self):
        sentinel_context = LoggingContext.current_context()

        kr = keyring.Keyring(self.hs)

        lookup_1_deferred = defer.Deferred()
        lookup_2_deferred = defer.Deferred()

        with LoggingContext("one") as context_one:
            context_one.request = "one"

            wait_1_deferred = kr.wait_for_previous_lookups(
                ["server1"],
                {"server1": lookup_1_deferred},
            )

            # there were no previous lookups, so the deferred should be ready
            self.assertTrue(wait_1_deferred.called)
            # ... so we should have preserved the LoggingContext.
            self.assertIs(LoggingContext.current_context(), context_one)
            wait_1_deferred.addBoth(self.check_context, "one")

        with LoggingContext("two") as context_two:
            context_two.request = "two"

            # set off another wait. It should block because the first lookup
            # hasn't yet completed.
            wait_2_deferred = kr.wait_for_previous_lookups(
                ["server1"],
                {"server1": lookup_2_deferred},
            )
            self.assertFalse(wait_2_deferred.called)
            # ... so we should have reset the LoggingContext.
            self.assertIs(LoggingContext.current_context(), sentinel_context)
            wait_2_deferred.addBoth(self.check_context, "two")

            # let the first lookup complete (in the sentinel context)
            lookup_1_deferred.callback(None)

            # now the second wait should complete and restore our
            # loggingcontext.
            yield wait_2_deferred
Exemplo n.º 27
0
    def start(self, time_sec, name, method):
        self.start = time_sec
        self.start_context = LoggingContext.current_context()
        self.name = name
        self.method = method

        # _request_stats records resource usage that we have already added
        # to the "in flight" metrics.
        self._request_stats = self.start_context.get_resource_usage()

        _in_flight_requests.add(self)
Exemplo n.º 28
0
        def inner_func(conn, *args, **kwargs):
            with LoggingContext("runInteraction") as context:
                sql_scheduling_timer.inc_by(time.time() * 1000 - start_time)

                if self.database_engine.is_connection_closed(conn):
                    logger.debug("Reconnecting closed database connection")
                    conn.reconnect()

                current_context.copy_to(context)
                return self._new_transaction(conn, desc, after_callbacks, func,
                                             *args, **kwargs)
Exemplo n.º 29
0
        def inner_func(conn, *args, **kwargs):
            with LoggingContext("runWithConnection") as context:
                sql_scheduling_timer.inc_by(time.time() * 1000 - start_time)

                if self.database_engine.is_connection_closed(conn):
                    logger.debug("Reconnecting closed database connection")
                    conn.reconnect()

                current_context.copy_to(context)

                return func(conn, *args, **kwargs)
Exemplo n.º 30
0
    def wrapped_request_handler(self, request):
        global _next_request_id
        request_id = "%s-%s" % (request.method, _next_request_id)
        _next_request_id += 1

        with LoggingContext(request_id) as request_context:
            if report_metrics:
                request_metrics = RequestMetrics()
                request_metrics.start(self.clock)

            request_context.request = request_id
            with request.processing():
                try:
                    with PreserveLoggingContext(request_context):
                        yield request_handler(self, request)
                except CodeMessageException as e:
                    code = e.code
                    if isinstance(e, SynapseError):
                        logger.info(
                            "%s SynapseError: %s - %s", request, code, e.msg
                        )
                    else:
                        logger.exception(e)
                    outgoing_responses_counter.inc(request.method, str(code))
                    respond_with_json(
                        request, code, cs_exception(e), send_cors=True,
                        pretty_print=_request_user_agent_is_curl(request),
                        version_string=self.version_string,
                    )
                except:
                    logger.exception(
                        "Failed handle request %s.%s on %r: %r",
                        request_handler.__module__,
                        request_handler.__name__,
                        self,
                        request
                    )
                    respond_with_json(
                        request,
                        500,
                        {
                            "error": "Internal server error",
                            "errcode": Codes.UNKNOWN,
                        },
                        send_cors=True
                    )
                finally:
                    try:
                        if report_metrics:
                            request_metrics.stop(
                                self.clock, request, self.__class__.__name__
                            )
                    except:
                        pass
Exemplo n.º 31
0
    def _get_events(self, event_ids, check_redacted=True,
                    get_prev_content=False, allow_rejected=False):
        if not event_ids:
            defer.returnValue([])

        event_id_list = event_ids
        event_ids = set(event_ids)

        event_entry_map = self._get_events_from_cache(
            event_ids,
            allow_rejected=allow_rejected,
        )

        missing_events_ids = [e for e in event_ids if e not in event_entry_map]

        if missing_events_ids:
            log_ctx = LoggingContext.current_context()
            log_ctx.record_event_fetch(len(missing_events_ids))

            missing_events = yield self._enqueue_events(
                missing_events_ids,
                check_redacted=check_redacted,
                allow_rejected=allow_rejected,
            )

            event_entry_map.update(missing_events)

        events = []
        for event_id in event_id_list:
            entry = event_entry_map.get(event_id, None)
            if not entry:
                continue

            if allow_rejected or not entry.event.rejected_reason:
                if check_redacted and entry.redacted_event:
                    event = entry.redacted_event
                else:
                    event = entry.event

                events.append(event)

                if get_prev_content:
                    if "replaces_state" in event.unsigned:
                        prev = yield self.get_event(
                            event.unsigned["replaces_state"],
                            get_prev_content=False,
                            allow_none=True,
                        )
                        if prev:
                            event.unsigned = dict(event.unsigned)
                            event.unsigned["prev_content"] = prev.content
                            event.unsigned["prev_sender"] = prev.sender

        defer.returnValue(events)
Exemplo n.º 32
0
    def call_later(self, delay, callback, *args, **kwargs):
        current_context = LoggingContext.current_context()

        def wrapped_callback():
            LoggingContext.thread_local.current_context = current_context
            callback(*args, **kwargs)

        t = [self.now + delay, wrapped_callback, False]
        self.timers.append(t)

        return t
Exemplo n.º 33
0
    def call_later(self, delay, callback, *args, **kwargs):
        current_context = LoggingContext.current_context()

        def wrapped_callback():
            LoggingContext.thread_local.current_context = current_context
            callback(*args, **kwargs)

        t = [self.now + delay, wrapped_callback, False]
        self.timers.append(t)

        return t
    def on_new_receipts(self, min_stream_id, max_stream_id):
        # Note that the min here shouldn't be relied upon to be accurate.

        # We could check the receipts are actually m.read receipts here,
        # but currently that's the only type of receipt anyway...
        with LoggingContext("push.on_new_receipts"):
            with Measure(self.clock, "push.on_new_receipts"):
                badge = yield push_tools.get_badge_count(
                    self.hs.get_datastore(), self.user_id
                )
            yield self._send_badge(badge)
Exemplo n.º 35
0
    def _get_events(self, event_ids, check_redacted=True,
                    get_prev_content=False, allow_rejected=False):
        if not event_ids:
            defer.returnValue([])

        event_id_list = event_ids
        event_ids = set(event_ids)

        event_entry_map = self._get_events_from_cache(
            event_ids,
            allow_rejected=allow_rejected,
        )

        missing_events_ids = [e for e in event_ids if e not in event_entry_map]

        if missing_events_ids:
            log_ctx = LoggingContext.current_context()
            log_ctx.record_event_fetch(len(missing_events_ids))

            missing_events = yield self._enqueue_events(
                missing_events_ids,
                check_redacted=check_redacted,
                allow_rejected=allow_rejected,
            )

            event_entry_map.update(missing_events)

        events = []
        for event_id in event_id_list:
            entry = event_entry_map.get(event_id, None)
            if not entry:
                continue

            if allow_rejected or not entry.event.rejected_reason:
                if check_redacted and entry.redacted_event:
                    event = entry.redacted_event
                else:
                    event = entry.event

                events.append(event)

                if get_prev_content:
                    if "replaces_state" in event.unsigned:
                        prev = yield self.get_event(
                            event.unsigned["replaces_state"],
                            get_prev_content=False,
                            allow_none=True,
                        )
                        if prev:
                            event.unsigned = dict(event.unsigned)
                            event.unsigned["prev_content"] = prev.content
                            event.unsigned["prev_sender"] = prev.sender

        defer.returnValue(events)
Exemplo n.º 36
0
    def test_logcontexts_with_async_result(self):
        @defer.inlineCallbacks
        def cb():
            yield Clock(reactor).sleep(0)
            defer.returnValue("yay")

        @defer.inlineCallbacks
        def test():
            with LoggingContext("c") as c1:
                res = yield self.cache.fetch_or_execute(self.mock_key, cb)
                self.assertIs(LoggingContext.current_context(), c1)
                self.assertEqual(res, "yay")

        # run the test twice in parallel
        d = defer.gatherResults([test(), test()])
        self.assertIs(LoggingContext.current_context(),
                      LoggingContext.sentinel)
        yield d
        self.assertIs(LoggingContext.current_context(),
                      LoggingContext.sentinel)
Exemplo n.º 37
0
        def inner_func(conn, *args, **kwargs):
            with LoggingContext("runWithConnection", parent_context) as context:
                sched_duration_sec = time.time() - start_time
                sql_scheduling_timer.observe(sched_duration_sec)
                context.add_database_scheduled(sched_duration_sec)

                if self.database_engine.is_connection_closed(conn):
                    logger.debug("Reconnecting closed database connection")
                    conn.reconnect()

                return func(conn, *args, **kwargs)
Exemplo n.º 38
0
    def _test_run_in_background(self, function):
        sentinel_context = LoggingContext.current_context()

        callback_completed = [False]

        with LoggingContext() as context_one:
            context_one.request = "one"

            # fire off function, but don't wait on it.
            d2 = logcontext.run_in_background(function)

            def cb(res):
                callback_completed[0] = True
                return res

            d2.addCallback(cb)

            self._check_test_key("one")

        # now wait for the function under test to have run, and check that
        # the logcontext is left in a sane state.
        d2 = defer.Deferred()

        def check_logcontext():
            if not callback_completed[0]:
                reactor.callLater(0.01, check_logcontext)
                return

            # make sure that the context was reset before it got thrown back
            # into the reactor
            try:
                self.assertIs(LoggingContext.current_context(),
                              sentinel_context)
                d2.callback(None)
            except BaseException:
                d2.errback(twisted.python.failure.Failure())

        reactor.callLater(0.01, check_logcontext)

        # test is done once d2 finishes
        return d2
Exemplo n.º 39
0
    def test_make_deferred_yieldable(self):
        # a function which retuns an incomplete deferred, but doesn't follow
        # the synapse rules.
        def blocking_function():
            d = defer.Deferred()
            reactor.callLater(0, d.callback, None)
            return d

        sentinel_context = LoggingContext.current_context()

        with LoggingContext() as context_one:
            context_one.request = "one"

            d1 = logcontext.make_deferred_yieldable(blocking_function())
            # make sure that the context was reset by make_deferred_yieldable
            self.assertIs(LoggingContext.current_context(), sentinel_context)

            yield d1

            # now it should be restored
            self._check_test_key("one")
Exemplo n.º 40
0
    def test_make_deferred_yieldable(self):
        # a function which retuns an incomplete deferred, but doesn't follow
        # the synapse rules.
        def blocking_function():
            d = defer.Deferred()
            reactor.callLater(0, d.callback, None)
            return d

        sentinel_context = LoggingContext.current_context()

        with LoggingContext() as context_one:
            context_one.request = "one"

            d1 = logcontext.make_deferred_yieldable(blocking_function())
            # make sure that the context was reset by make_deferred_yieldable
            self.assertIs(LoggingContext.current_context(), sentinel_context)

            yield d1

            # now it should be restored
            self._check_test_key("one")
Exemplo n.º 41
0
        def check_logcontext():
            if not callback_completed[0]:
                reactor.callLater(0.01, check_logcontext)
                return

            # make sure that the context was reset before it got thrown back
            # into the reactor
            try:
                self.assertIs(LoggingContext.current_context(), sentinel_context)
                d2.callback(None)
            except BaseException:
                d2.errback(twisted.python.failure.Failure())
Exemplo n.º 42
0
    def test_make_deferred_yieldable_on_non_deferred(self):
        """Check that make_deferred_yieldable does the right thing when its
        argument isn't actually a deferred"""

        with LoggingContext() as context_one:
            context_one.request = "one"

            d1 = logcontext.make_deferred_yieldable("bum")
            self._check_test_key("one")

            r = yield d1
            self.assertEqual(r, "bum")
            self._check_test_key("one")
Exemplo n.º 43
0
    def test_logcontext_is_preserved_on_cancellation(self):
        blocking_was_cancelled = [False]

        @defer.inlineCallbacks
        def blocking():
            non_completing_d = Deferred()
            with logcontext.PreserveLoggingContext():
                try:
                    yield non_completing_d
                except CancelledError:
                    blocking_was_cancelled[0] = True
                    raise

        with logcontext.LoggingContext("one") as context_one:
            # the errbacks should be run in the test logcontext
            def errback(res, deferred_name):
                self.assertIs(
                    LoggingContext.current_context(), context_one,
                    "errback %s run in unexpected logcontext %s" % (
                        deferred_name, LoggingContext.current_context(),
                    )
                )
                return res

            original_deferred = blocking()
            original_deferred.addErrback(errback, "orig")
            timing_out_d = timeout_deferred(original_deferred, 1.0, self.clock)
            self.assertNoResult(timing_out_d)
            self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
            timing_out_d.addErrback(errback, "timingout")

            self.clock.pump((1.0, ))

            self.assertTrue(
                blocking_was_cancelled[0],
                "non-completing deferred was not cancelled",
            )
            self.failureResultOf(timing_out_d, defer.TimeoutError, )
            self.assertIs(LoggingContext.current_context(), context_one)
Exemplo n.º 44
0
 def wrapped_request_handler(self, request):
     global _next_request_id
     request_id = "%s-%s" % (request.method, _next_request_id)
     _next_request_id += 1
     with LoggingContext(request_id) as request_context:
         request_context.request = request_id
         code = None
         start = self.clock.time_msec()
         try:
             logger.info(
                 "Received request: %s %s",
                 request.method, request.path
             )
             yield request_handler(self, request)
             code = request.code
         except CodeMessageException as e:
             code = e.code
             if isinstance(e, SynapseError):
                 logger.info(
                     "%s SynapseError: %s - %s", request, code, e.msg
                 )
             else:
                 logger.exception(e)
             outgoing_responses_counter.inc(request.method, str(code))
             respond_with_json(
                 request, code, cs_exception(e), send_cors=True,
                 pretty_print=_request_user_agent_is_curl(request),
                 version_string=self.version_string,
             )
         except:
             code = 500
             logger.exception(
                 "Failed handle request %s.%s on %r: %r",
                 request_handler.__module__,
                 request_handler.__name__,
                 self,
                 request
             )
             respond_with_json(
                 request,
                 500,
                 {"error": "Internal server error"},
                 send_cors=True
             )
         finally:
             code = str(code) if code else "-"
             end = self.clock.time_msec()
             logger.info(
                 "Processed request: %dms %s %s %s",
                 end-start, code, request.method, request.path
             )
Exemplo n.º 45
0
    def _finished_processing(self):
        try:
            context = LoggingContext.current_context()
            usage = context.get_resource_usage()
        except Exception:
            usage = ContextResourceUsage()

        end_time = time.time()

        # need to decode as it could be raw utf-8 bytes
        # from a IDN servname in an auth header
        authenticated_entity = self.authenticated_entity
        if authenticated_entity is not None:
            authenticated_entity = authenticated_entity.decode(
                "utf-8", "replace")

        # ...or could be raw utf-8 bytes in the User-Agent header.
        # N.B. if you don't do this, the logger explodes cryptically
        # with maximum recursion trying to log errors about
        # the charset problem.
        # c.f. https://github.com/matrix-org/synapse/issues/3471
        user_agent = self.get_user_agent()
        if user_agent is not None:
            user_agent = user_agent.decode("utf-8", "replace")

        self.site.access_logger.info(
            "%s - %s - {%s}"
            " Processed request: %.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)"
            " %sB %s \"%s %s %s\" \"%s\" [%d dbevts]",
            self.getClientIP(),
            self.site.site_tag,
            authenticated_entity,
            end_time - self.start_time,
            usage.ru_utime,
            usage.ru_stime,
            usage.db_sched_duration_sec,
            usage.db_txn_duration_sec,
            int(usage.db_txn_count),
            self.sentLength,
            self.code,
            self.method,
            self.get_redacted_uri(),
            self.clientproto,
            user_agent,
            usage.evt_db_fetch_count,
        )

        try:
            self.request_metrics.stop(end_time, self)
        except Exception as e:
            logger.warn("Failed to stop metrics: %r", e)
Exemplo n.º 46
0
    def test_logcontext_is_preserved_on_cancellation(self):
        blocking_was_cancelled = [False]

        @defer.inlineCallbacks
        def blocking():
            non_completing_d = Deferred()
            with logcontext.PreserveLoggingContext():
                try:
                    yield non_completing_d
                except CancelledError:
                    blocking_was_cancelled[0] = True
                    raise

        with logcontext.LoggingContext("one") as context_one:
            # the errbacks should be run in the test logcontext
            def errback(res, deferred_name):
                self.assertIs(
                    LoggingContext.current_context(),
                    context_one,
                    "errback %s run in unexpected logcontext %s"
                    % (deferred_name, LoggingContext.current_context()),
                )
                return res

            original_deferred = blocking()
            original_deferred.addErrback(errback, "orig")
            timing_out_d = timeout_deferred(original_deferred, 1.0, self.clock)
            self.assertNoResult(timing_out_d)
            self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
            timing_out_d.addErrback(errback, "timingout")

            self.clock.pump((1.0,))

            self.assertTrue(
                blocking_was_cancelled[0], "non-completing deferred was not cancelled"
            )
            self.failureResultOf(timing_out_d, defer.TimeoutError)
            self.assertIs(LoggingContext.current_context(), context_one)
Exemplo n.º 47
0
    def run():
        count = _background_process_counts.get(desc, 0)
        _background_process_counts[desc] = count + 1
        _background_process_start_count.labels(desc).inc()

        with LoggingContext(desc) as context:
            context.request = "%s-%i" % (desc, count)
            proc = _BackgroundProcess(desc, context)
            _background_processes.setdefault(desc, set()).add(proc)
            try:
                yield func(*args, **kwargs)
            finally:
                proc.update_metrics()
                _background_processes[desc].remove(proc)
Exemplo n.º 48
0
        def setUp(orig):
            # enable debugging of delayed calls - this means that we get a
            # traceback when a unit test exits leaving things on the reactor.
            twisted.internet.base.DelayedCall.debug = True

            # if we're not starting in the sentinel logcontext, then to be honest
            # all future bets are off.
            if LoggingContext.current_context() is not LoggingContext.sentinel:
                self.fail(
                    "Test starting with non-sentinel logging context %s" %
                    (LoggingContext.current_context(), ))

            old_level = logging.getLogger().level
            if old_level != level:

                @around(self)
                def tearDown(orig):
                    ret = orig()
                    logging.getLogger().setLevel(old_level)
                    return ret

            logging.getLogger().setLevel(level)
            return orig()
Exemplo n.º 49
0
    def test_sleep(self):
        @defer.inlineCallbacks
        def competing_callback():
            with LoggingContext() as competing_context:
                competing_context.test_key = "competing"
                yield sleep(0)
                self._check_test_key("competing")

        reactor.callLater(0, competing_callback)

        with LoggingContext() as context_one:
            context_one.test_key = "one"
            yield sleep(0)
            self._check_test_key("one")
Exemplo n.º 50
0
        def wrapped(*args, **kwargs):
            start_context = LoggingContext.current_context()

            try:
                res = orig(*args, **kwargs)
            except Exception:
                if LoggingContext.current_context() != start_context:
                    err = "%s changed context from %s to %s on exception" % (
                        f,
                        start_context,
                        LoggingContext.current_context(),
                    )
                    print(err, file=sys.stderr)
                    raise Exception(err)
                raise

            if not isinstance(res, Deferred) or res.called:
                if LoggingContext.current_context() != start_context:
                    err = "%s changed context from %s to %s" % (
                        f,
                        start_context,
                        LoggingContext.current_context(),
                    )
                    # print the error to stderr because otherwise all we
                    # see in travis-ci is the 500 error
                    print(err, file=sys.stderr)
                    raise Exception(err)
                return res

            if LoggingContext.current_context() != LoggingContext.sentinel:
                err = (
                    "%s returned incomplete deferred in non-sentinel context "
                    "%s (start was %s)") % (
                        f, LoggingContext.current_context(), start_context)
                print(err, file=sys.stderr)
                raise Exception(err)

            def check_ctx(r):
                if LoggingContext.current_context() != start_context:
                    err = "%s completion of %s changed context from %s to %s" % (
                        "Failure" if isinstance(r, Failure) else "Success",
                        f,
                        start_context,
                        LoggingContext.current_context(),
                    )
                    print(err, file=sys.stderr)
                    raise Exception(err)
                return r

            res.addBoth(check_ctx)
            return res
Exemplo n.º 51
0
    def test_does_not_cache_failures(self):
        """Checks that, if the callback returns a failure, it is called again
        for the next request.
        """
        called = [False]

        def cb():
            if called[0]:
                # return a valid result the second time
                return defer.succeed(self.mock_http_response)

            called[0] = True
            return defer.fail(Exception("boo"))

        with LoggingContext("test") as test_context:
            try:
                yield self.cache.fetch_or_execute(self.mock_key, cb)
            except Exception as e:
                self.assertEqual(e.message, "boo")
            self.assertIs(LoggingContext.current_context(), test_context)

            res = yield self.cache.fetch_or_execute(self.mock_key, cb)
            self.assertEqual(res, self.mock_http_response)
            self.assertIs(LoggingContext.current_context(), test_context)
Exemplo n.º 52
0
        def second_lookup():
            with LoggingContext("12") as context_12:
                context_12.request = "12"
                self.http_client.post_json.reset_mock()
                self.http_client.post_json.return_value = defer.Deferred()

                res_deferreds_2 = kr.verify_json_objects_for_server([
                    ("server10", json1)
                ])
                res_deferreds_2[0].addBoth(self.check_context, None)
                yield logcontext.make_deferred_yieldable(res_deferreds_2[0])

                # let verify_json_objects_for_server finish its work before we kill the
                # logcontext
                yield self.clock.sleep(0)
Exemplo n.º 53
0
        def do_request():
            with LoggingContext("one") as context:
                fetch_d = self.cl.get_json("testserv:8008", "foo/bar")

                # Nothing happened yet
                self.assertNoResult(fetch_d)

                # should have reset logcontext to the sentinel
                check_logcontext(LoggingContext.sentinel)

                try:
                    fetch_res = yield fetch_d
                    defer.returnValue(fetch_res)
                finally:
                    check_logcontext(context)
Exemplo n.º 54
0
    def stop(self, time_sec, request):
        _in_flight_requests.discard(self)

        context = LoggingContext.current_context()

        tag = ""
        if context:
            tag = context.tag

            if context != self.start_context:
                logger.warn(
                    "Context have unexpectedly changed %r, %r",
                    context, self.start_context
                )
                return

        outgoing_responses_counter.labels(request.method, str(request.code)).inc()

        response_count.labels(request.method, self.name, tag).inc()

        response_timer.labels(request.method, self.name, tag).observe(
            time_sec - self.start
        )

        resource_usage = context.get_resource_usage()

        response_ru_utime.labels(request.method, self.name, tag).inc(
            resource_usage.ru_utime,
        )
        response_ru_stime.labels(request.method, self.name, tag).inc(
            resource_usage.ru_stime,
        )
        response_db_txn_count.labels(request.method, self.name, tag).inc(
            resource_usage.db_txn_count
        )
        response_db_txn_duration.labels(request.method, self.name, tag).inc(
            resource_usage.db_txn_duration_sec
        )
        response_db_sched_duration.labels(request.method, self.name, tag).inc(
            resource_usage.db_sched_duration_sec
        )

        response_size.labels(request.method, self.name, tag).inc(request.sentLength)

        # We always call this at the end to ensure that we update the metrics
        # regardless of whether a call to /metrics while the request was in
        # flight.
        self.update_metrics()
Exemplo n.º 55
0
    def call_later(self, delay, callback, *args, **kwargs):
        """Call something later

        Args:
            delay(float): How long to wait in seconds.
            callback(function): Function to call
            *args: Postional arguments to pass to function.
            **kwargs: Key arguments to pass to function.
        """
        current_context = LoggingContext.current_context()

        def wrapped_callback(*args, **kwargs):
            with PreserveLoggingContext(current_context):
                callback(*args, **kwargs)

        with PreserveLoggingContext():
            return reactor.callLater(delay, wrapped_callback, *args, **kwargs)
Exemplo n.º 56
0
    def wait_for_previous_lookups(self, server_names, server_to_deferred):
        """Waits for any previous key lookups for the given servers to finish.

        Args:
            server_names (list): list of server_names we want to lookup
            server_to_deferred (dict): server_name to deferred which gets
                resolved once we've finished looking up keys for that server.
                The Deferreds should be regular twisted ones which call their
                callbacks with no logcontext.

        Returns: a Deferred which resolves once all key lookups for the given
            servers have completed. Follows the synapse rules of logcontext
            preservation.
        """
        loop_count = 1
        while True:
            wait_on = [
                (server_name, self.key_downloads[server_name])
                for server_name in server_names
                if server_name in self.key_downloads
            ]
            if not wait_on:
                break
            logger.info(
                "Waiting for existing lookups for %s to complete [loop %i]",
                [w[0] for w in wait_on], loop_count,
            )
            with PreserveLoggingContext():
                yield defer.DeferredList((w[1] for w in wait_on))

            loop_count += 1

        ctx = LoggingContext.current_context()

        def rm(r, server_name_):
            with PreserveLoggingContext(ctx):
                logger.debug("Releasing key lookup lock on %s", server_name_)
                self.key_downloads.pop(server_name_, None)
            return r

        for server_name, deferred in server_to_deferred.items():
            logger.debug("Got key lookup lock on %s", server_name)
            self.key_downloads[server_name] = deferred
            deferred.addBoth(rm, server_name)
Exemplo n.º 57
0
    def stop(self, time_msec, request):
        context = LoggingContext.current_context()

        tag = ""
        if context:
            tag = context.tag

            if context != self.start_context:
                logger.warn(
                    "Context have unexpectedly changed %r, %r",
                    context, self.start_context
                )
                return

        outgoing_responses_counter.inc(request.method, str(request.code))

        response_count.inc(request.method, self.name, tag)

        response_timer.inc_by(
            time_msec - self.start, request.method,
            self.name, tag
        )

        ru_utime, ru_stime = context.get_resource_usage()

        response_ru_utime.inc_by(
            ru_utime, request.method, self.name, tag
        )
        response_ru_stime.inc_by(
            ru_stime, request.method, self.name, tag
        )
        response_db_txn_count.inc_by(
            context.db_txn_count, request.method, self.name, tag
        )
        response_db_txn_duration.inc_by(
            context.db_txn_duration_ms / 1000., request.method, self.name, tag
        )
        response_db_sched_duration.inc_by(
            context.db_sched_duration_ms / 1000., request.method, self.name, tag
        )

        response_size.inc_by(request.sentLength, request.method, self.name, tag)
Exemplo n.º 58
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        if isinstance(exc_type, Exception) or not self.start_context:
            return

        in_flight.unregister((self.name, ), self._update_in_flight)

        duration = self.clock.time() - self.start

        block_counter.labels(self.name).inc()
        block_timer.labels(self.name).inc(duration)

        context = LoggingContext.current_context()

        if context != self.start_context:
            logger.warn(
                "Context has unexpectedly changed from '%s' to '%s'. (%r)",
                self.start_context,
                context,
                self.name,
            )
            return

        if not context:
            logger.warn("Expected context. (%r)", self.name)
            return

        current = context.get_resource_usage()
        usage = current - self.start_usage
        try:
            block_ru_utime.labels(self.name).inc(usage.ru_utime)
            block_ru_stime.labels(self.name).inc(usage.ru_stime)
            block_db_txn_count.labels(self.name).inc(usage.db_txn_count)
            block_db_txn_duration.labels(self.name).inc(
                usage.db_txn_duration_sec)
            block_db_sched_duration.labels(self.name).inc(
                usage.db_sched_duration_sec)
        except ValueError:
            logger.warn("Failed to save metrics! OLD: %r, NEW: %r",
                        self.start_usage, current)

        if self.created_context:
            self.start_context.__exit__(exc_type, exc_val, exc_tb)
Exemplo n.º 59
0
    def wait_for_sync_for_user(self,
                               sync_config,
                               since_token=None,
                               timeout=0,
                               full_state=False):
        """Get the sync for a client if we have new data for it now. Otherwise
        wait for new data to arrive on the server. If the timeout expires, then
        return an empty sync result.
        Returns:
            A Deferred SyncResult.
        """

        context = LoggingContext.current_context()
        if context:
            if since_token is None:
                context.tag = "initial_sync"
            elif full_state:
                context.tag = "full_state_sync"
            else:
                context.tag = "incremental_sync"

        if timeout == 0 or since_token is None or full_state:
            # we are going to return immediately, so don't bother calling
            # notifier.wait_for_events.
            result = yield self.current_sync_for_user(
                sync_config,
                since_token,
                full_state=full_state,
            )
            defer.returnValue(result)
        else:

            def current_sync_callback(before_token, after_token):
                return self.current_sync_for_user(sync_config, since_token)

            result = yield self.notifier.wait_for_events(
                sync_config.user.to_string(),
                timeout,
                current_sync_callback,
                from_token=since_token,
            )
            defer.returnValue(result)