Esempio n. 1
0
 def inner_fn():
     with PreserveLoggingContext():
         yield complete_lookup
     defer.returnValue(1)
Esempio n. 2
0
 def inner_fn():
     with PreserveLoggingContext():
         yield complete_lookup
     return 1
Esempio n. 3
0
 def lineReceived(self, line: bytes) -> None:
     """Called when we've received a line"""
     with PreserveLoggingContext(self._logging_context):
         self._parse_and_dispatch_line(line)
Esempio n. 4
0
    async def wait_for_events(
        self,
        user_id: str,
        timeout: int,
        callback: Callable[[StreamToken, StreamToken], Awaitable[T]],
        room_ids=None,
        from_token=StreamToken.START,
    ) -> T:
        """Wait until the callback returns a non empty response or the
        timeout fires.
        """
        user_stream = self.user_to_user_stream.get(user_id)
        if user_stream is None:
            current_token = self.event_sources.get_current_token()
            if room_ids is None:
                room_ids = await self.store.get_rooms_for_user(user_id)
            user_stream = _NotifierUserStream(
                user_id=user_id,
                rooms=room_ids,
                current_token=current_token,
                time_now_ms=self.clock.time_msec(),
            )
            self._register_with_keys(user_stream)

        result = None
        prev_token = from_token
        if timeout:
            end_time = self.clock.time_msec() + timeout

            while not result:
                try:
                    now = self.clock.time_msec()
                    if end_time <= now:
                        break

                    # Now we wait for the _NotifierUserStream to be told there
                    # is a new token.
                    listener = user_stream.new_listener(prev_token)
                    listener.deferred = timeout_deferred(
                        listener.deferred,
                        (end_time - now) / 1000.0,
                        self.hs.get_reactor(),
                    )
                    with PreserveLoggingContext():
                        await listener.deferred

                    current_token = user_stream.current_token

                    result = await callback(prev_token, current_token)
                    if result:
                        break

                    # Update the prev_token to the current_token since nothing
                    # has happened between the old prev_token and the current_token
                    prev_token = current_token
                except defer.TimeoutError:
                    break
                except defer.CancelledError:
                    break

        if result is None:
            # This happened if there was no timeout or if the timeout had
            # already expired.
            current_token = user_stream.current_token
            result = await callback(prev_token, current_token)

        return result
Esempio n. 5
0
    async def on_rdata(
        self, stream_name: str, instance_name: str, token: int, rows: list
    ):
        """Called to handle a batch of replication data with a given stream token.

        By default this just pokes the slave store. Can be overridden in subclasses to
        handle more.

        Args:
            stream_name: name of the replication stream for this batch of rows
            instance_name: the instance that wrote the rows.
            token: stream token for this batch of rows
            rows: a list of Stream.ROW_TYPE objects as returned by Stream.parse_row.
        """
        self.store.process_replication_rows(stream_name, instance_name, token, rows)

        if self.send_handler:
            await self.send_handler.process_replication_rows(stream_name, token, rows)

        if stream_name == TypingStream.NAME:
            self._typing_handler.process_replication_rows(token, rows)
            self.notifier.on_new_event(
                "typing_key", token, rooms=[row.room_id for row in rows]
            )
        elif stream_name == PushRulesStream.NAME:
            self.notifier.on_new_event(
                "push_rules_key", token, users=[row.user_id for row in rows]
            )
        elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME):
            self.notifier.on_new_event(
                "account_data_key", token, users=[row.user_id for row in rows]
            )
        elif stream_name == ReceiptsStream.NAME:
            self.notifier.on_new_event(
                "receipt_key", token, rooms=[row.room_id for row in rows]
            )
            await self._pusher_pool.on_new_receipts(
                token, token, {row.room_id for row in rows}
            )
        elif stream_name == ToDeviceStream.NAME:
            entities = [row.entity for row in rows if row.entity.startswith("@")]
            if entities:
                self.notifier.on_new_event("to_device_key", token, users=entities)
        elif stream_name == DeviceListsStream.NAME:
            all_room_ids = set()  # type: Set[str]
            for row in rows:
                if row.entity.startswith("@"):
                    room_ids = await self.store.get_rooms_for_user(row.entity)
                    all_room_ids.update(room_ids)
            self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids)
        elif stream_name == GroupServerStream.NAME:
            self.notifier.on_new_event(
                "groups_key", token, users=[row.user_id for row in rows]
            )
        elif stream_name == PushersStream.NAME:
            for row in rows:
                if row.deleted:
                    self.stop_pusher(row.user_id, row.app_id, row.pushkey)
                else:
                    await self.start_pusher(row.user_id, row.app_id, row.pushkey)
        elif stream_name == EventsStream.NAME:
            # We shouldn't get multiple rows per token for events stream, so
            # we don't need to optimise this for multiple rows.
            for row in rows:
                if row.type != EventsStreamEventRow.TypeId:
                    continue
                assert isinstance(row, EventsStreamRow)
                assert isinstance(row.data, EventsStreamEventRow)

                if row.data.rejected:
                    continue

                extra_users = ()  # type: Tuple[UserID, ...]
                if row.data.type == EventTypes.Member and row.data.state_key:
                    extra_users = (UserID.from_string(row.data.state_key),)

                max_token = self.store.get_room_max_token()
                event_pos = PersistedEventPosition(instance_name, token)
                self.notifier.on_new_room_event_args(
                    event_pos=event_pos,
                    max_room_stream_token=max_token,
                    extra_users=extra_users,
                    room_id=row.data.room_id,
                    event_type=row.data.type,
                    state_key=row.data.state_key,
                    membership=row.data.membership,
                )

        await self._presence_handler.process_replication_rows(
            stream_name, instance_name, token, rows
        )

        # Notify any waiting deferreds. The list is ordered by position so we
        # just iterate through the list until we reach a position that is
        # greater than the received row position.
        waiting_list = self._streams_to_waiters.get(stream_name, [])

        # Index of first item with a position after the current token, i.e we
        # have called all deferreds before this index. If not overwritten by
        # loop below means either a) no items in list so no-op or b) all items
        # in list were called and so the list should be cleared. Setting it to
        # `len(list)` works for both cases.
        index_of_first_deferred_not_called = len(waiting_list)

        for idx, (position, deferred) in enumerate(waiting_list):
            if position <= token:
                try:
                    with PreserveLoggingContext():
                        deferred.callback(None)
                except Exception:
                    # The deferred has been cancelled or timed out.
                    pass
            else:
                # The list is sorted by position so we don't need to continue
                # checking any further entries in the list.
                index_of_first_deferred_not_called = idx
                break

        # Drop all entries in the waiting list that were called in the above
        # loop. (This maintains the order so no need to resort)
        waiting_list[:] = waiting_list[index_of_first_deferred_not_called:]
Esempio n. 6
0
def run_as_background_process(
    desc: str,
    func: Callable[..., Awaitable[Optional[R]]],
    *args: Any,
    bg_start_span: bool = True,
    **kwargs: Any,
) -> "defer.Deferred[Optional[R]]":
    """Run the given function in its own logcontext, with resource metrics

    This should be used to wrap processes which are fired off to run in the
    background, instead of being associated with a particular request.

    It returns a Deferred which completes when the function completes, but it doesn't
    follow the synapse logcontext rules, which makes it appropriate for passing to
    clock.looping_call and friends (or for firing-and-forgetting in the middle of a
    normal synapse async function).

    Args:
        desc: a description for this background process type
        func: a function, which may return a Deferred or a coroutine
        bg_start_span: Whether to start an opentracing span. Defaults to True.
            Should only be disabled for processes that will not log to or tag
            a span.
        args: positional args for func
        kwargs: keyword args for func

    Returns:
        Deferred which returns the result of func, or `None` if func raises.
        Note that the returned Deferred does not follow the synapse logcontext
        rules.
    """

    async def run() -> Optional[R]:
        with _bg_metrics_lock:
            count = _background_process_counts.get(desc, 0)
            _background_process_counts[desc] = count + 1

        _background_process_start_count.labels(desc).inc()
        _background_process_in_flight_count.labels(desc).inc()

        with BackgroundProcessLoggingContext(desc, count) as context:
            try:
                if bg_start_span:
                    ctx = start_active_span(
                        f"bgproc.{desc}", tags={SynapseTags.REQUEST_ID: str(context)}
                    )
                else:
                    ctx = nullcontext()
                with ctx:
                    return await func(*args, **kwargs)
            except Exception:
                logger.exception(
                    "Background process '%s' threw an exception",
                    desc,
                )
                return None
            finally:
                _background_process_in_flight_count.labels(desc).dec()

    with PreserveLoggingContext():
        # Note that we return a Deferred here so that it can be used in a
        # looping_call and other places that expect a Deferred.
        return defer.ensureDeferred(run())
Esempio n. 7
0
 def messageReceived(self, pattern: str, channel: str, message: str):
     """Received a message from redis."""
     with PreserveLoggingContext(self._logging_context):
         self._parse_and_dispatch_message(message)
Esempio n. 8
0
 def nonblocking_function():
     with PreserveLoggingContext():
         yield defer.succeed(None)
Esempio n. 9
0
 def fire(evs, exc):
     for _, d in evs:
         if not d.called:
             with PreserveLoggingContext():
                 d.errback(exc)
Esempio n. 10
0
    async def on_rdata(self, stream_name: str, instance_name: str, token: int,
                       rows: list):
        """Called to handle a batch of replication data with a given stream token.

        By default this just pokes the slave store. Can be overridden in subclasses to
        handle more.

        Args:
            stream_name: name of the replication stream for this batch of rows
            instance_name: the instance that wrote the rows.
            token: stream token for this batch of rows
            rows: a list of Stream.ROW_TYPE objects as returned by Stream.parse_row.
        """
        self.store.process_replication_rows(stream_name, instance_name, token,
                                            rows)

        if stream_name == EventsStream.NAME:
            # We shouldn't get multiple rows per token for events stream, so
            # we don't need to optimise this for multiple rows.
            for row in rows:
                if row.type != EventsStreamEventRow.TypeId:
                    continue
                assert isinstance(row, EventsStreamRow)

                event = await self.store.get_event(row.data.event_id,
                                                   allow_rejected=True)
                if event.rejected_reason:
                    continue

                extra_users = ()  # type: Tuple[str, ...]
                if event.type == EventTypes.Member:
                    extra_users = (event.state_key, )
                max_token = self.store.get_room_max_stream_ordering()
                self.notifier.on_new_room_event(event, token, max_token,
                                                extra_users)

            await self.pusher_pool.on_new_notifications(token, token)

        # Notify any waiting deferreds. The list is ordered by position so we
        # just iterate through the list until we reach a position that is
        # greater than the received row position.
        waiting_list = self._streams_to_waiters.get(stream_name, [])

        # Index of first item with a position after the current token, i.e we
        # have called all deferreds before this index. If not overwritten by
        # loop below means either a) no items in list so no-op or b) all items
        # in list were called and so the list should be cleared. Setting it to
        # `len(list)` works for both cases.
        index_of_first_deferred_not_called = len(waiting_list)

        for idx, (position, deferred) in enumerate(waiting_list):
            if position <= token:
                try:
                    with PreserveLoggingContext():
                        deferred.callback(None)
                except Exception:
                    # The deferred has been cancelled or timed out.
                    pass
            else:
                # The list is sorted by position so we don't need to continue
                # checking any futher entries in the list.
                index_of_first_deferred_not_called = idx
                break

        # Drop all entries in the waiting list that were called in the above
        # loop. (This maintains the order so no need to resort)
        waiting_list[:] = waiting_list[index_of_first_deferred_not_called:]
Esempio n. 11
0
 async def get_perspectives(**kwargs):
     self.assertEquals(current_context().request, "11")
     with PreserveLoggingContext():
         await persp_deferred
     return persp_resp
def run_as_background_process(desc: str,
                              func,
                              *args,
                              bg_start_span=True,
                              **kwargs):
    """Run the given function in its own logcontext, with resource metrics

    This should be used to wrap processes which are fired off to run in the
    background, instead of being associated with a particular request.

    It returns a Deferred which completes when the function completes, but it doesn't
    follow the synapse logcontext rules, which makes it appropriate for passing to
    clock.looping_call and friends (or for firing-and-forgetting in the middle of a
    normal synapse async function).

    Args:
        desc: a description for this background process type
        func: a function, which may return a Deferred or a coroutine
        bg_start_span: Whether to start an opentracing span. Defaults to True.
            Should only be disabled for processes that will not log to or tag
            a span.
        args: positional args for func
        kwargs: keyword args for func

    Returns: Deferred which returns the result of func, but note that it does not
        follow the synapse logcontext rules.
    """
    async def run():
        with _bg_metrics_lock:
            count = _background_process_counts.get(desc, 0)
            _background_process_counts[desc] = count + 1

        _background_process_start_count.labels(desc).inc()
        _background_process_in_flight_count.labels(desc).inc()

        with BackgroundProcessLoggingContext(desc) as context:
            context.request = "%s-%i" % (desc, count)
            try:
                ctx = noop_context_manager()
                if bg_start_span:
                    ctx = start_active_span(
                        desc, tags={"request_id": context.request})
                with ctx:
                    result = func(*args, **kwargs)

                    if inspect.isawaitable(result):
                        result = await result

                    return result
            except Exception:
                logger.exception(
                    "Background process '%s' threw an exception",
                    desc,
                )
            finally:
                _background_process_in_flight_count.labels(desc).dec()

    with PreserveLoggingContext():
        # Note that we return a Deferred here so that it can be used in a
        # looping_call and other places that expect a Deferred.
        return defer.ensureDeferred(run())
Esempio n. 13
0
 async def _send():
     with PreserveLoggingContext():
         # Note that we use the other connection as we can't send
         # commands using the subscription connection.
         await self.outbound_redis_connection.publish(
             self.stream_name, encoded_string)