Example #1
0
    async def consume_rpcs(self, apis: Sequence[Api]) -> Sequence[RpcMessage]:
        # Get the name of each stream
        streams = ['{}:stream'.format(api.meta.name) for api in apis]
        # Get where we last left off in each stream
        latest_ids = [self._latest_ids.get(stream, '$') for stream in streams]

        logger.debug(LBullets(
            'Consuming RPCs from', items=[
                '{} ({})'.format(s, self._latest_ids.get(s, '$')) for s in streams
            ]
        ))

        pool = await self.get_redis_pool()
        with await pool as redis:
            # TODO: Count/timeout configurable
            stream_messages = await redis.xread(streams, latest_ids=latest_ids, count=10)

        rpc_messages = []
        for stream, message_id, fields in stream_messages:
            stream = decode(stream, 'utf8')
            message_id = decode(message_id, 'utf8')
            decoded_fields = decode_message_fields(fields)

            # See comment on events transport re updating message_id
            self._latest_ids[stream] = message_id
            rpc_messages.append(
                RpcMessage.from_dict(decoded_fields)
            )
            logger.debug(LBullets(
                L("⬅ Received message {} on stream {}", Bold(message_id), Bold(stream)),
                items=decoded_fields
            ))

        return rpc_messages
Example #2
0
    async def _reclaim_lost_messages(self, stream_names: List[str],
                                     consumer_group: str,
                                     expected_events: set):
        """Reclaim messages that other consumers in the group failed to acknowledge"""
        with await self.connection_manager() as redis:
            for stream in stream_names:
                old_messages = await redis.xpending(
                    stream,
                    consumer_group,
                    "-",
                    "+",
                    count=self.reclaim_batch_size)
                timeout = self.acknowledgement_timeout * 1000
                for (
                        message_id,
                        consumer_name,
                        ms_since_last_delivery,
                        num_deliveries,
                ) in old_messages:
                    message_id = decode(message_id, "utf8")
                    consumer_name = decode(consumer_name, "utf8")

                    if ms_since_last_delivery > timeout:
                        logger.info(
                            L(
                                "Found timed out event {} in stream {}. Abandoned by {}. Attempting to reclaim...",
                                Bold(message_id),
                                Bold(stream),
                                Bold(consumer_name),
                            ))

                    result = await redis.xclaim(stream, consumer_group,
                                                self.consumer_name,
                                                int(timeout), message_id)
                    for claimed_message_id, fields in result:
                        claimed_message_id = decode(claimed_message_id, "utf8")
                        event_message = self._fields_to_message(
                            fields,
                            expected_events,
                            native_id=claimed_message_id)
                        if not event_message:
                            # noop message, or message an event we don't care about
                            continue
                        logger.debug(
                            LBullets(
                                L(
                                    "⬅ Reclaimed timed out event {} on stream {}. Abandoned by {}.",
                                    Bold(message_id),
                                    Bold(stream),
                                    Bold(consumer_name),
                                ),
                                items=dict(
                                    **event_message.get_metadata(),
                                    kwargs=event_message.get_kwargs(),
                                ),
                            ))
                        yield event_message, stream
Example #3
0
    async def _execute_node(self, pool, command, *args, **kwargs):
        """Execute redis command and returns Future waiting for the answer.

        :param command str
        :param pool obj
        Raises:
        * TypeError if any of args can not be encoded as bytes.
        * ReplyError on redis '-ERR' responses.
        * ProtocolError when response can not be decoded meaning connection
          is broken.
        """
        cmd = decode(command, 'utf-8').lower()
        try:
            with await pool as conn:
                return await getattr(conn, cmd)(*args, **kwargs)
        except ReplyError as err:
            address = parse_moved_response_error(err)
            if address is None:
                raise

            logger.debug('Got MOVED command: {}'.format(err))
            self._moved_count += 1
            if self._moved_count >= self.MAX_MOVED_COUNT:
                await self.initialize()
                pool = self.get_node(command, *args, **kwargs)
                with await pool as conn:
                    return await getattr(conn, cmd)(*args, **kwargs)
            else:
                conn = await self.create_connection(address)
                res = await getattr(conn, cmd)(*args, **kwargs)
                conn.close()
                await conn.wait_closed()
                return res
Example #4
0
def redis_steam_id_to_datetime(message_id):
    message_id = decode(message_id, "utf8")
    milliseconds, seq = map(int, message_id.split("-"))
    # Treat the sequence value as additional microseconds to ensure correct sequencing
    microseconds = (milliseconds % 1000 * 1000) + seq
    dt = datetime.utcfromtimestamp(milliseconds // 1000).replace(microsecond=microseconds)
    return dt
Example #5
0
    async def _execute_node(self, pool, command, *args, **kwargs):
        """Execute redis command and returns Future waiting for the answer.

        :param command str
        :param pool obj
        Raises:
        * TypeError if any of args can not be encoded as bytes.
        * ReplyError on redis '-ERR' responses.
        * ProtocolError when response can not be decoded meaning connection
          is broken.
        """
        cmd = decode(command, 'utf-8').lower()
        try:
            with await pool as conn:
                return await getattr(conn, cmd)(*args, **kwargs)
        except ReplyError as err:
            address = parse_moved_response_error(err)
            if address is None:
                raise

            logger.debug('Got MOVED command: {}'.format(err))
            self._moved_count += 1
            if self._moved_count >= self.MAX_MOVED_COUNT:
                await self.initialize()
                pool = self.get_node(command, *args, **kwargs)
                with await pool as conn:
                    return await getattr(conn, cmd)(*args, **kwargs)
            else:
                conn = await self.create_connection(address)
                res = await getattr(conn, cmd)(*args, **kwargs)
                conn.close()
                await conn.wait_closed()
                return res
Example #6
0
    async def history(
        self,
        api_name,
        event_name,
        start: datetime = None,
        stop: datetime = None,
        start_inclusive: bool = True,
        batch_size: int = 100,
    ) -> AsyncGenerator[EventMessage, None]:
        """Retrieve historical events for the given API

        Will not have any impact on existing consumer groups.
        """
        redis_start = datetime_to_redis_steam_id(start) if start else "-"
        redis_stop = datetime_to_redis_steam_id(stop) if stop else "+"

        if start and not start_inclusive:
            redis_start = redis_stream_id_add_one(redis_start)

        stream_name = self._get_stream_names([(api_name, event_name)])[0]

        logger.debug(
            f"Getting history for stream {stream_name} from {redis_start} ({start}) "
            f"to {redis_stop} ({stop}) in batches of {batch_size}"
        )

        with await self.connection_manager() as redis:
            messages = True
            while messages:
                messages = await redis.xrevrange(
                    stream_name, redis_stop, redis_start, count=batch_size
                )
                if not messages:
                    return
                for message_id, fields in messages:
                    message_id = decode(message_id, "utf8")
                    redis_stop = redis_stream_id_subtract_one(message_id)
                    try:
                        event_message = self._fields_to_message(
                            fields,
                            expected_event_names={event_name},
                            stream=stream_name,
                            native_id=message_id,
                            consumer_group=None,
                        )
                    except (NoopMessage, IgnoreMessage):
                        logger.debug(
                            f"Ignoring NOOP event with ID {message_id} discovered during fetching"
                            " of event history"
                        )
                    else:
                        yield event_message
Example #7
0
    async def _consume_rpcs(self, apis: Sequence[Api]) -> Sequence[RpcMessage]:
        # Get the name of each list queue
        queue_keys = ["{}:rpc_queue".format(api.meta.name) for api in apis]

        logger.debug(
            LBullets(
                "Consuming RPCs from",
                items=[
                    "{} ({})".format(s, self._latest_ids.get(s, "$"))
                    for s in queue_keys
                ],
            ))

        with await self.connection_manager() as redis:
            try:
                try:
                    stream, data = await redis.blpop(*queue_keys)
                except RuntimeError:
                    # For some reason aio-redis likes to eat the CancelledError and
                    # turn it into a Runtime error:
                    # https://github.com/aio-libs/aioredis/blob/9f5964/aioredis/connection.py#L184
                    raise asyncio.CancelledError(
                        "aio-redis task was cancelled and decided it should be a RuntimeError"
                    )
            except asyncio.CancelledError:
                # We need to manually close the connection here otherwise the aioredis
                # pool will emit warnings saying that this connection still has pending
                # commands (i.e. the above blocking pop)
                redis.close()
                raise

            stream = decode(stream, "utf8")
            rpc_message = self.deserializer(data)
            expiry_key = f"rpc_expiry_key:{rpc_message.id}"
            key_deleted = await redis.delete(expiry_key)

            if not key_deleted:
                return []

            logger.debug(
                LBullets(
                    L("⬅ Received RPC message on stream {}", Bold(stream)),
                    items=dict(**rpc_message.get_metadata(),
                               kwargs=rpc_message.get_kwargs()),
                ))

            return [rpc_message]
Example #8
0
    async def history(
        self,
        api_name,
        event_name,
        start: datetime = None,
        stop: datetime = None,
        start_inclusive: bool = True,
    ) -> AsyncGenerator[EventMessage, None]:
        # TODO: Test
        redis_start = datetime_to_redis_steam_id(start) if start else "-"
        redis_stop = datetime_to_redis_steam_id(stop) if stop else "+"

        if start and not start_inclusive:
            redis_start = redis_stream_id_add_one(redis_start)

        stream_name = self._get_stream_names([(api_name, event_name)])[0]
        batch_size = 1000

        logger.debug(
            f"Getting history for stream {stream_name} from {redis_start} ({start}) "
            f"to {redis_stop} ({stop}) in batches of {batch_size}")

        with await self.connection_manager() as redis:
            messages = await redis.xrange(stream_name,
                                          redis_start,
                                          redis_stop,
                                          count=batch_size)
            if not messages:
                return
            for message_id, fields in messages:
                message_id = decode(message_id, "utf8")
                event_message = self._fields_to_message(
                    fields,
                    expected_event_names={event_name},
                    stream=stream_name,
                    native_id=message_id,
                    consumer_group=None,
                )
                if event_message:
                    yield event_message
Example #9
0
    async def fetch_events(self) -> Tuple[Sequence[EventMessage], Any]:
        pool = await self.get_redis_pool()
        with await pool as redis:
            if not self._streams:
                logger.debug('Event backend has been given no events to consume. Event backend will sleep.')
                self._task = asyncio.ensure_future(asyncio.sleep(3600 * 24 * 365))
            else:
                logger.info(LBullets(
                    'Consuming events from', items={
                        '{} ({})'.format(*v) for v in self._streams.items()
                    }
                ))
                # TODO: Count/timeout
                self._task = asyncio.ensure_future(
                    redis.xread(
                        streams=list(self._streams.keys()),
                        latest_ids=list(self._streams.values()),
                        count=10,  # TODO: Make configurable, add timeout too
                    )
                )

            try:
                stream_messages = await self._task or []
            except asyncio.CancelledError as e:
                if self._reload:
                    # Streams to listen on have changed.
                    # Bail out and let this method get called again,
                    # at which point we'll pickup the new streams.
                    logger.debug('Event transport reloading.')
                    stream_messages = []
                    self._reload = False
                else:
                    raise

        event_messages = []
        latest_ids = {}
        for stream, message_id, fields in stream_messages:
            stream = decode(stream, 'utf8')
            message_id = decode(message_id, 'utf8')
            decoded_fields = decode_message_fields(fields)

            # Keep track of which event ID we are up to. We will store these
            # in consumption_complete(), once we know the events have definitely
            # been consumed.
            latest_ids[stream] = message_id

            # Unfortunately, these is an edge-case when BOTH:
            #  1. We are consuming events from 'now' (i.e. event ID '$'), the default
            #  2. There is an unhandled error when processing the FIRST batch of events
            # In which case, the next iteration would start again from '$', in which
            # case we would loose events. Therefore 'subtract one' from the message ID
            # and store that immediately. Subtracting one is imprecise, as there is a SLIM
            # chance we could grab another event in the process. However, if events are
            # being consumed from 'now' then the developer presumably doesn't care about
            # a high level of precision.
            if self._streams[stream] == '$':
                self._streams[stream] = redis_stream_id_subtract_one(message_id)

            event_messages.append(
                EventMessage.from_dict(decoded_fields)
            )
            logger.debug(LBullets(
                L("⬅ Received event {} on stream {}", Bold(message_id), Bold(stream)),
                items=decoded_fields
            ))

        return event_messages, latest_ids
Example #10
0
def _decode(s, encoding):
    if encoding:
        return decode(s, encoding)
    return s
Example #11
0
    async def _reclaim_lost_messages(
            self, stream_names: List[str], consumer_group: str,
            expected_events: set) -> AsyncGenerator[List[EventMessage], None]:
        """Reclaim batches of messages that other consumers in the group failed to acknowledge within a timeout.

        The timeout period is specified by the `acknowledgement_timeout` option.
        """
        with await self.connection_manager() as redis:
            for stream in stream_names:

                old_messages = True
                reclaim_from = None

                # Keep pulling reclaimable messages from Redis until there are none left
                while old_messages:
                    # reclaim_from keeps track of where we are up to in our fetching
                    # of messages
                    if not reclaim_from:
                        # This is our first iteration, so fetch from the start of time
                        reclaim_from = "-"
                    else:
                        # This is a subsequent iteration. XPENDING's 'start' parameter is inclusive,
                        # so we need to add one to the reclaim_from value to ensure we don't get a message
                        # we've already seen
                        reclaim_from = redis_stream_id_add_one(reclaim_from)

                    # Fetch the next batch of messages
                    try:
                        old_messages = await redis.xpending(
                            stream,
                            consumer_group,
                            reclaim_from,
                            "+",
                            count=self.reclaim_batch_size)
                    except ReplyError as e:
                        if "NOGROUP" in str(e):
                            # Group or consumer doesn't exist yet, so stop processing for this loop.
                            break
                        else:
                            raise

                    timeout = self.acknowledgement_timeout * 1000
                    event_messages = []

                    # Try to claim each messages
                    for (
                            message_id,
                            consumer_name,
                            ms_since_last_delivery,
                            num_deliveries,
                    ) in old_messages:
                        message_id = decode(message_id, "utf8")
                        consumer_name = decode(consumer_name, "utf8")
                        reclaim_from = message_id

                        # This 'if' is not strictly required as the subsequent call to xclaim
                        # will honor the timeout parameter. However, using this if here allows
                        # for more sane logging from the point of view of the user. Without it
                        # we would report that we were trying to claim messages which were
                        # clearly not timed out yet.
                        if ms_since_last_delivery > timeout:
                            logger.info(
                                L(
                                    "Found timed out event {} in stream {}. Abandoned by {}."
                                    " Attempting to reclaim...",
                                    Bold(message_id),
                                    Bold(stream),
                                    Bold(consumer_name),
                                ))

                            # *Try* to claim the message...
                            result = await redis.xclaim(
                                stream, consumer_group, self.consumer_name,
                                int(timeout), message_id)

                            # Parse each message we managed to claim
                            for _, fields in result:
                                # Note that sometimes we will claim a message and it will be 'nil'.
                                # In this case the result will be (None, {}). We therefore do not
                                # rely on the values we get back from XCLAIM.
                                # I suspect this may happen if a stream has been trimmed, thereby causing
                                # un-acknowledged messages to be deleted from Redis.
                                try:
                                    event_message = self._fields_to_message(
                                        fields,
                                        expected_events,
                                        stream=stream,
                                        native_id=message_id,
                                        consumer_group=consumer_group,
                                    )
                                except (NoopMessage, IgnoreMessage):
                                    # This listener doesn't need to care about this message, so acknowledge
                                    # it and move on with our lives
                                    logger.debug(
                                        f"Ignoring NOOP event with ID {message_id} discovered"
                                        " during event reclaiming")
                                    await redis.xack(stream, consumer_group,
                                                     message_id)
                                    continue

                                logger.debug(
                                    LBullets(
                                        L(
                                            "⬅ Reclaimed timed out event {} on stream {}. Abandoned"
                                            " by {}.",
                                            Bold(message_id),
                                            Bold(stream),
                                            Bold(consumer_name),
                                        ),
                                        items=dict(
                                            **event_message.get_metadata(),
                                            kwargs=event_message.get_kwargs(),
                                        ),
                                    ))
                                event_messages.append(event_message)

                    # And yield our batch of messages
                    if event_messages:
                        yield event_messages
Example #12
0
    async def _fetch_new_messages(
            self, streams, consumer_group, expected_events,
            forever) -> AsyncGenerator[List[EventMessage], None]:
        """Coroutine to consume new messages

        The consumption has two stages:

          1. Fetch and yield any messages this consumer is responsible for processing but has yet
             to successfully process. This can happen in cases where a message was
             previously consumed but not acknowledged (i.e. due to an error).
             This is a one-off startup stage.
          2. Wait for new messages to arrive. Yield these messages when they arrive, then
             resume waiting for messages

        See Also:

            _reclaim_lost_messages() - Another coroutine which reclaims messages which timed out
                                       while being processed by other consumers in this group

        """
        with await self.connection_manager() as redis:
            # Firstly create the consumer group if we need to
            await self._create_consumer_groups(streams, redis, consumer_group)

            # Get any messages that this consumer has yet to process.
            # This can happen in the case where the processes died before acknowledging.
            pending_messages = await redis.xread_group(
                group_name=consumer_group,
                consumer_name=self.consumer_name,
                streams=list(streams.keys()),
                # Using ID '0' indicates we want unacked pending messages
                latest_ids=["0"] * len(streams),
                timeout=None,  # Don't block, return immediately
            )

            event_messages = []
            for stream, message_id, fields in pending_messages:
                message_id = decode(message_id, "utf8")
                stream = decode(stream, "utf8")
                try:
                    event_message = self._fields_to_message(
                        fields,
                        expected_events,
                        stream=stream,
                        native_id=message_id,
                        consumer_group=consumer_group,
                    )
                except (NoopMessage, IgnoreMessage):
                    # This listener doesn't need to care about this message, so acknowledge
                    # it and move on with our lives
                    await redis.xack(stream, consumer_group, message_id)
                    continue

                logger.debug(
                    LBullets(
                        L(
                            "⬅ Receiving pending event {} on stream {}",
                            Bold(message_id),
                            Bold(stream),
                        ),
                        items=dict(**event_message.get_metadata(),
                                   kwargs=event_message.get_kwargs()),
                    ))
                event_messages.append(event_message)

            if event_messages:
                yield event_messages

            # We've now cleaned up any old messages that were hanging around.
            # Now we get on to the main loop which blocks and waits for new messages

            while True:
                # Fetch some messages.
                # This will block until there are some messages available
                try:
                    stream_messages = await redis.xread_group(
                        group_name=consumer_group,
                        consumer_name=self.consumer_name,
                        streams=list(streams.keys()),
                        # Using ID '>' indicates we only want new messages which have not
                        # been passed to other consumers in this group
                        latest_ids=[">"] * len(streams),
                        count=self.batch_size,
                    )
                except asyncio.CancelledError:
                    # We need to manually close the connection here otherwise the aioredis
                    # pool will emit warnings saying that this connection still has pending
                    # commands (i.e. the above blocking pop)
                    redis.close()
                    raise

                # Handle the messages we have received
                event_messages = []
                for stream, message_id, fields in stream_messages:
                    message_id = decode(message_id, "utf8")
                    stream = decode(stream, "utf8")
                    try:
                        event_message = self._fields_to_message(
                            fields,
                            expected_events,
                            stream=stream,
                            native_id=message_id,
                            consumer_group=consumer_group,
                        )
                    except (NoopMessage, IgnoreMessage):
                        # This listener doesn't need to care about this message, so acknowledge
                        # it and move on with our lives
                        logger.debug(
                            f"Ignoring NOOP event with ID {message_id} discovered during streaming"
                            " of messages")
                        await redis.xack(stream, consumer_group, message_id)
                        continue

                    logger.debug(
                        LBullets(
                            L(
                                "⬅ Received new event {} on stream {}",
                                Bold(message_id),
                                Bold(stream),
                            ),
                            items=dict(**event_message.get_metadata(),
                                       kwargs=event_message.get_kwargs()),
                        ))

                    event_messages.append(event_message)

                if event_messages:
                    yield event_messages

                if not forever:
                    return
Example #13
0
    async def _fetch_new_messages(
            self, streams, consumer_group, expected_events,
            forever) -> AsyncGenerator[List[EventMessage], None]:
        """Coroutine to consume new messages

        The consumption has two stages:

          1. Fetch and yield any messages this consumer is responsible for processing but has yet
             to successfully process. This can happen in cases where a message was
             previously consumed but not acknowledged (i.e. due to an error).
             This is a one-off startup stage.
          2. Wait for new messages to arrive. Yield these messages when they arrive, then
             resume waiting for messages

        See Also:

            _reclaim_lost_messages() - Another coroutine which reclaims messages which timed out
                                       while being processed by other consumers in this group

        """
        with await self.connection_manager() as redis:
            # Firstly create the consumer group if we need to
            await self._create_consumer_groups(streams, redis, consumer_group)

            # Get any messages that this consumer has yet to process.
            # This can happen in the case where the processes died before acknowledging.
            pending_messages = await redis.xread_group(
                group_name=consumer_group,
                consumer_name=self.consumer_name,
                streams=list(streams.keys()),
                # Using ID '0' indicates we want unacked pending messages
                latest_ids=["0"] * len(streams),
                timeout=None,  # Don't block, return immediately
            )

            event_messages = []
            for stream, message_id, fields in pending_messages:
                message_id = decode(message_id, "utf8")
                stream = decode(stream, "utf8")
                event_message = self._fields_to_message(
                    fields,
                    expected_events,
                    stream=stream,
                    native_id=message_id,
                    consumer_group=consumer_group,
                )
                if not event_message:
                    # noop message, or message an event we don't care about
                    continue
                logger.debug(
                    LBullets(
                        L(
                            "⬅ Receiving pending event {} on stream {}",
                            Bold(message_id),
                            Bold(stream),
                        ),
                        items=dict(**event_message.get_metadata(),
                                   kwargs=event_message.get_kwargs()),
                    ))
                event_messages.append(event_message)

            if event_messages:
                yield event_messages

            # We've now cleaned up any old messages that were hanging around.
            # Now we get on to the main loop which blocks and waits for new messages

            while True:
                # Fetch some messages.
                # This will block until there are some messages available
                stream_messages = await redis.xread_group(
                    group_name=consumer_group,
                    consumer_name=self.consumer_name,
                    streams=list(streams.keys()),
                    # Using ID '>' indicates we only want new messages which have not
                    # been passed to other consumers in this group
                    latest_ids=[">"] * len(streams),
                    count=self.batch_size,
                )

                # Handle the messages we have received
                event_messages = []
                for stream, message_id, fields in stream_messages:
                    message_id = decode(message_id, "utf8")
                    stream = decode(stream, "utf8")
                    event_message = self._fields_to_message(
                        fields,
                        expected_events,
                        stream=stream,
                        native_id=message_id,
                        consumer_group=consumer_group,
                    )
                    if not event_message:
                        # noop message, or message an event we don't care about
                        continue
                    logger.debug(
                        LBullets(
                            L(
                                "⬅ Received new event {} on stream {}",
                                Bold(message_id),
                                Bold(stream),
                            ),
                            items=dict(**event_message.get_metadata(),
                                       kwargs=event_message.get_kwargs()),
                        ))
                    # NOTE: YIELD ALL MESSAGES, NOT JUST ONE
                    event_messages.append(event_message)

                if event_messages:
                    yield event_messages

                if not forever:
                    return
Example #14
0
def _decode(s, encoding):
    if encoding:
        return decode(s, encoding)
    return s
Example #15
0
    async def _fetch_new_messages(self, streams, consumer_group,
                                  expected_events, forever):
        with await self.connection_manager() as redis:
            # Firstly create the consumer group if we need to
            await self._create_consumer_groups(streams, redis, consumer_group)

            # Get any messages that this consumer has yet to process.
            # This can happen in the case where the processes died before acknowledging.
            pending_messages = await redis.xread_group(
                group_name=consumer_group,
                consumer_name=self.consumer_name,
                streams=list(streams.keys()),
                # Using ID '0' indicates we want unacked pending messages
                latest_ids=["0"] * len(streams),
                timeout=None,  # Don't block, return immediately
            )
            for stream, message_id, fields in pending_messages:
                message_id = decode(message_id, "utf8")
                event_message = self._fields_to_message(fields,
                                                        expected_events,
                                                        native_id=message_id)
                if not event_message:
                    # noop message, or message an event we don't care about
                    continue
                logger.debug(
                    LBullets(
                        L(
                            "⬅ Receiving pending event {} on stream {}",
                            Bold(message_id),
                            Bold(stream),
                        ),
                        items=dict(**event_message.get_metadata(),
                                   kwargs=event_message.get_kwargs()),
                    ))
                yield event_message, stream

            # We've now cleaned up any old messages that were hanging around.
            # Now we get on to the main loop which blocks and waits for new messages

            while True:
                # Fetch some messages.
                # This will block until there are some messages available
                stream_messages = await redis.xread_group(
                    group_name=consumer_group,
                    consumer_name=self.consumer_name,
                    streams=list(streams.keys()),
                    # Using ID '>' indicates we only want new messages which have not
                    # been passed to other consumers in this group
                    latest_ids=[">"] * len(streams),
                    count=self.batch_size,
                )

                # Handle the messages we have received
                for stream, message_id, fields in stream_messages:
                    message_id = decode(message_id, "utf8")
                    event_message = self._fields_to_message(
                        fields, expected_events, native_id=message_id)
                    if not event_message:
                        # noop message, or message an event we don't care about
                        continue
                    logger.debug(
                        LBullets(
                            L(
                                "⬅ Received new event {} on stream {}",
                                Bold(message_id),
                                Bold(stream),
                            ),
                            items=dict(**event_message.get_metadata(),
                                       kwargs=event_message.get_kwargs()),
                        ))
                    yield event_message, stream

                if not forever:
                    return
Example #16
0
    async def _reclaim_lost_messages(
        self, stream_names: List[str], consumer_group: str, expected_events: set
    ) -> AsyncGenerator[List[EventMessage], None]:
        """Reclaim batches of messages that other consumers in the group failed to acknowledge within a timeout.

        The timeout period is specified by the `acknowledgement_timeout` option.
        """
        with await self.connection_manager() as redis:
            for stream in stream_names:

                old_messages = True
                reclaim_from = None

                # Keep pulling reclaimable messages from Redis until there are none left
                while old_messages:
                    # reclaim_from keeps track of where we are up to in our fetching
                    # of messages
                    if not reclaim_from:
                        # This is our first iteration, so fetch from the start of time
                        reclaim_from = "-"
                    else:
                        # This is a subsequent iteration. XPENDING's 'start' parameter is inclusive,
                        # so we need to add one to the reclaim_from value to ensure we don't get a message
                        # we've already seen
                        reclaim_from = redis_stream_id_add_one(reclaim_from)

                    # Fetch the next batch of messages
                    old_messages = await redis.xpending(
                        stream, consumer_group, reclaim_from, "+", count=self.reclaim_batch_size
                    )

                    timeout = self.acknowledgement_timeout * 1000
                    event_messages = []

                    # Try to claim each messages
                    for (
                        message_id,
                        consumer_name,
                        ms_since_last_delivery,
                        num_deliveries,
                    ) in old_messages:
                        message_id = decode(message_id, "utf8")
                        consumer_name = decode(consumer_name, "utf8")
                        reclaim_from = message_id

                        # This 'if' is not strictly required as the subsequent call to xclaim
                        # will honor the timeout parameter. However, using this if here allows
                        # for more sane logging from the point of view of the user. Without it
                        # we would report that we were trying to claim messages which were
                        # clearly not timed out yet.
                        if ms_since_last_delivery > timeout:
                            logger.info(
                                L(
                                    "Found timed out event {} in stream {}. Abandoned by {}. Attempting to reclaim...",
                                    Bold(message_id),
                                    Bold(stream),
                                    Bold(consumer_name),
                                )
                            )

                            # *Try* to claim the messages...
                            result = await redis.xclaim(
                                stream, consumer_group, self.consumer_name, int(timeout), message_id
                            )

                            # Parse each message we managed to claim
                            for claimed_message_id, fields in result:
                                claimed_message_id = decode(claimed_message_id, "utf8")
                                event_message = self._fields_to_message(
                                    fields,
                                    expected_events,
                                    stream=stream,
                                    native_id=claimed_message_id,
                                    consumer_group=consumer_group,
                                )
                                if not event_message:
                                    # noop message, or message an event we don't care about
                                    continue
                                logger.debug(
                                    LBullets(
                                        L(
                                            "⬅ Reclaimed timed out event {} on stream {}. Abandoned by {}.",
                                            Bold(message_id),
                                            Bold(stream),
                                            Bold(consumer_name),
                                        ),
                                        items=dict(
                                            **event_message.get_metadata(),
                                            kwargs=event_message.get_kwargs(),
                                        ),
                                    )
                                )
                                event_messages.append(event_message)

                            # And yield our batch of messages
                            if event_messages:
                                yield event_messages