Ejemplo n.º 1
0
    async def _create_consumer_groups(self, streams, redis, consumer_group):
        """Ensure the consumer groups exist

        This is means we have to ensure the streams exist too
        """
        for stream, since in streams.items():
            if not await redis.exists(stream):
                # Add a noop to ensure the stream exists
                # TODO: We can now use MKSTREAM, change this logic
                #       Documented here: https://redis.io/topics/streams-intro
                await redis.xadd(stream, fields={"": ""})

            try:
                # Create the group (it may already exist)
                await redis.xgroup_create(stream, consumer_group, latest_id=since)
            except ReplyError as e:
                if "BUSYGROUP" in str(e):
                    # Already exists
                    pass
                else:
                    raise
Ejemplo n.º 2
0
    async def consume(
        self,
        listen_for: List[Tuple[str, str]],
        listener_name: str,
        error_queue: ErrorQueueType,
        since: Union[Since, Sequence[Since]] = "$",
        forever=True,
    ) -> AsyncGenerator[List[RedisEventMessage], None]:
        """Consume events for the given APIs"""
        self._sanity_check_listen_for(listen_for)

        consumer_group = f"{self.service_name}-{listener_name}"

        if not isinstance(since, (list, tuple)):
            # Since has been specified as a single value. Normalise it into
            # the value-per-listener format.
            since = [since] * len(listen_for)
        since = map(normalise_since_value, since)

        stream_names = self._get_stream_names(listen_for)
        # Keys are stream names, values as the latest ID consumed from that stream
        streams = OrderedDict(zip(stream_names, since))
        expected_events = {event_name for _, event_name in listen_for}

        logger.debug(
            LBullets(
                L(
                    "Consuming events as consumer {} in group {} on streams",
                    Bold(self.consumer_name),
                    Bold(consumer_group),
                ),
                items={"{} ({})".format(*v)
                       for v in streams.items()},
            ))

        # Cleanup any old groups & consumers
        await self._cleanup(stream_names)

        # Here we use a queue to combine messages coming from both the
        # fetch messages loop and the reclaim messages loop.
        queue = InternalQueue(maxsize=1)
        initial_reclaiming_complete = asyncio.Event()

        async def consume_loop():
            """Regular event consuming. See _fetch_new_messages()"""
            logger.debug(
                "Will begin consuming events once the initial event reclaiming is complete"
            )
            await initial_reclaiming_complete.wait()
            logger.debug(
                "Event reclaiming is complete, beginning to consume events")

            async for messages in self._fetch_new_messages(
                    streams, consumer_group, expected_events, forever):
                await queue.put(messages)
                # Wait for the queue to empty before getting trying to get another message
                await queue.join()

        retry_consume_loop = retry_on_redis_connection_failure(
            fn=consume_loop,
            retry_delay=self.consumption_restart_delay,
            action="consuming events")

        async def reclaim_loop():
            """
            Reclaim messages which other consumers have failed to
            processes in reasonable time. See _reclaim_lost_messages()
            """
            while True:
                logger.debug("Checking for any events which need reclaiming")
                async for messages in self._reclaim_lost_messages(
                        stream_names, consumer_group, expected_events):
                    await queue.put(messages)
                    # Wait for the queue to empty before getting trying to get another message
                    await queue.join()

                initial_reclaiming_complete.set()
                await asyncio.sleep(self.reclaim_interval)

        consume_task = None
        reclaim_task = None

        try:
            # Run the two above coroutines in their own tasks
            consume_task = asyncio.ensure_future(
                queue_exception_checker(retry_consume_loop, error_queue))
            reclaim_task = asyncio.ensure_future(
                queue_exception_checker(reclaim_loop(), error_queue))

            while True:
                try:
                    messages = await queue.get()
                    logger.debug(
                        f"Got batch of {len(messages)} message(s). Yielding messages to Lightbus"
                        " client")
                    yield messages
                    logger.debug(
                        f"Batch of {len(messages)} message(s) was processed by Lightbus client."
                        " Marking as done.")
                    queue.task_done()
                except GeneratorExit:
                    return
        finally:
            # Make sure we cleanup the tasks we created
            await cancel(consume_task, reclaim_task)