Example #1
0
    async def setup_async(self, plugins: dict = None):
        """Setup lightbus and get it ready to consume events and/or RPCs

        You should call this manually if you are calling `consume_rpcs()`
        directly. This you be handled for you if you are
        calling `run_forever()`.
        """
        logger.info(
            LBullets(
                "Lightbus is setting up",
                items={
                    "service_name (set with -s or LIGHTBUS_SERVICE_NAME)": Bold(
                        self.config.service_name
                    ),
                    "process_name (with with -p or LIGHTBUS_PROCESS_NAME)": Bold(
                        self.config.process_name
                    ),
                },
            )
        )

        # Log the transport information
        rpc_transport = self.transport_registry.get_rpc_transport("default", default=None)
        result_transport = self.transport_registry.get_result_transport("default", default=None)
        event_transport = self.transport_registry.get_event_transport("default", default=None)
        log_transport_information(
            rpc_transport, result_transport, event_transport, self.schema.schema_transport, logger
        )

        # Log the plugins we have
        if plugins is None:
            logger.debug("Auto-loading any installed Lightbus plugins...")
            self.plugin_registry.autoload_plugins(self.config)
        else:
            logger.debug("Loading explicitly specified Lightbus plugins....")
            self.plugin_registry.set_plugins(plugins)

        if self.plugin_registry._plugins:
            logger.info(
                LBullets(
                    "Loaded the following plugins ({})".format(len(self.plugin_registry._plugins)),
                    items=self.plugin_registry._plugins,
                )
            )
        else:
            logger.info("No plugins loaded")

        # Load schema
        logger.debug("Loading schema...")
        await self.schema.load_from_bus()

        logger.info(
            LBullets(
                "Loaded the following remote schemas ({})".format(len(self.schema.remote_schemas)),
                items=self.schema.remote_schemas.keys(),
            )
        )

        for transport in self.transport_registry.get_all_transports():
            await transport.open()
Example #2
0
    async def consume_rpcs(self, apis: Sequence[Api]) -> Sequence[RpcMessage]:
        # Get the name of each stream
        streams = ['{}:stream'.format(api.meta.name) for api in apis]
        # Get where we last left off in each stream
        latest_ids = [self._latest_ids.get(stream, '$') for stream in streams]

        logger.debug(LBullets(
            'Consuming RPCs from', items=[
                '{} ({})'.format(s, self._latest_ids.get(s, '$')) for s in streams
            ]
        ))

        pool = await self.get_redis_pool()
        with await pool as redis:
            # TODO: Count/timeout configurable
            stream_messages = await redis.xread(streams, latest_ids=latest_ids, count=10)

        rpc_messages = []
        for stream, message_id, fields in stream_messages:
            stream = decode(stream, 'utf8')
            message_id = decode(message_id, 'utf8')
            decoded_fields = decode_message_fields(fields)

            # See comment on events transport re updating message_id
            self._latest_ids[stream] = message_id
            rpc_messages.append(
                RpcMessage.from_dict(decoded_fields)
            )
            logger.debug(LBullets(
                L("⬅ Received message {} on stream {}", Bold(message_id), Bold(stream)),
                items=decoded_fields
            ))

        return rpc_messages
Example #3
0
    def run_forever(self, *, loop=None, consume_rpcs=True, consume_events=True, plugins=None):
        logger.info(LBullets(
            "Lightbus getting ready to run. Brokers in use",
            items={
                "RPC transport": L(
                    '{}.{}',
                    self.rpc_transport.__module__, Bold(self.rpc_transport.__class__.__name__)
                ),
                "Result transport": L(
                    '{}.{}', self.result_transport.__module__,
                    Bold(self.result_transport.__class__.__name__)
                ),
                "Event transport": L(
                    '{}.{}', self.event_transport.__module__,
                    Bold(self.event_transport.__class__.__name__)
                ),
            }
        ))

        self.setup(plugins=plugins)
        registry.add(LightbusStateApi())
        registry.add(LightbusMetricsApi())

        if consume_rpcs:
            if registry.public():
                logger.info(LBullets(
                    "APIs in registry ({})".format(len(registry.all())),
                    items=registry.all()
                ))
            else:
                if consume_events:
                    logger.warning(
                        "No APIs have been registered, lightbus may still receive events "
                        "but Lightbus will not handle any incoming RPCs"
                    )
                else:
                    logger.error(
                        "No APIs have been registered, yet Lightbus has been configured to only "
                        "handle RPCs. There is therefore nothing for lightbus to do. Exiting."
                    )
                    return

        block(handle_aio_exceptions(
            plugin_hook('before_server_start', bus_client=self, loop=loop)
        ), timeout=5)

        loop = loop or asyncio.get_event_loop()
        self._run_forever(loop, consume_rpcs, consume_events)

        loop.run_until_complete(handle_aio_exceptions(
            plugin_hook('after_server_stopped', bus_client=self, loop=loop)
        ))
Example #4
0
    async def call_rpc(self, rpc_message: RpcMessage, options: dict):
        queue_key = f"{rpc_message.api_name}:rpc_queue"
        expiry_key = f"rpc_expiry_key:{rpc_message.id}"
        logger.debug(
            LBullets(
                L("Enqueuing message {} in Redis stream {}", Bold(rpc_message),
                  Bold(queue_key)),
                items=dict(**rpc_message.get_metadata(),
                           kwargs=rpc_message.get_kwargs()),
            ))

        with await self.connection_manager() as redis:
            start_time = time.time()
            print("setting " + expiry_key)
            p = redis.pipeline()
            p.rpush(key=queue_key, value=self.serializer(rpc_message))
            p.set(expiry_key, 1)
            p.expire(expiry_key, timeout=self.rpc_timeout)
            await p.execute()

        logger.debug(
            L(
                "Enqueued message {} in Redis in {} stream {}",
                Bold(rpc_message),
                human_time(time.time() - start_time),
                Bold(queue_key),
            ))
Example #5
0
    async def send_event(self, event_message: EventMessage, options: dict):
        """Publish an event"""
        stream = self._get_stream_names(
            listen_for=[(event_message.api_name, event_message.event_name)])[0]

        logger.debug(
            LBullets(
                L(
                    "Enqueuing event message {} in Redis stream {}",
                    Bold(event_message),
                    Bold(stream),
                ),
                items=dict(**event_message.get_metadata(),
                           kwargs=event_message.get_kwargs()),
            ))

        # Performance: I suspect getting a connection from the connection manager each time is causing
        # performance issues. Need to confirm.
        with await self.connection_manager() as redis:
            start_time = time.time()
            await redis.xadd(
                stream=stream,
                fields=self.serializer(event_message),
                max_len=self.max_stream_length or None,
                exact_len=False,
            )

        logger.debug(
            L(
                "Enqueued event message {} in Redis in {} stream {}",
                Bold(event_message.canonical_name),
                human_time(time.time() - start_time),
                Bold(stream),
            ))
Example #6
0
def log_transport_information(
    rpc_transport: "RpcTransport",
    result_transport: "ResultTransport",
    event_transport: "EventTransport",
    schema_transport: "SchemaTransport",
    logger: Logger,
):

    def stringify_transport(transport):
        if not transport:
            return "None"

        # Rely on the transport to display itself sensibly as a string
        return L("{} @ {}", transport.__class__.__name__, Bold(transport))

    logger.info(
        LBullets(
            "Default transports are setup as follows:",
            items={
                "RPC transport": stringify_transport(rpc_transport),
                "Result transport": stringify_transport(result_transport),
                "Event transport": stringify_transport(event_transport),
                "Schema transport": stringify_transport(schema_transport),
            },
        )
    )
Example #7
0
    async def _start_server_inner(self, consume_rpcs=True):
        self.api_registry.add(LightbusStateApi())
        self.api_registry.add(LightbusMetricsApi())

        if consume_rpcs:
            logger.info(
                LBullets(
                    "APIs in registry ({})".format(len(self.api_registry.all())),
                    items=self.api_registry.names(),
                )
            )

        # Setup RPC consumption
        consume_rpc_task = None
        if consume_rpcs and self.api_registry.all():
            consume_rpc_task = asyncio.ensure_future(self.consume_rpcs())
            consume_rpc_task.add_done_callback(make_exception_checker(self, die=True))

        # Setup schema monitoring
        monitor_task = asyncio.ensure_future(self.schema.monitor())
        monitor_task.add_done_callback(make_exception_checker(self, die=True))

        logger.info("Executing before_server_start & on_start hooks...")
        await self._execute_hook("before_server_start")
        logger.info("Execution of before_server_start & on_start hooks was successful")

        self._server_tasks = [consume_rpc_task, monitor_task]
Example #8
0
    async def call_rpc(self, rpc_message: RpcMessage, options: dict,
                       bus_client: "BusClient"):
        queue_key = f"{rpc_message.api_name}:rpc_queue"
        expiry_key = f"rpc_expiry_key:{rpc_message.id}"
        logger.debug(
            LBullets(
                L("Enqueuing message {} in Redis list {}", Bold(rpc_message),
                  Bold(queue_key)),
                items=dict(**rpc_message.get_metadata(),
                           kwargs=rpc_message.get_kwargs()),
            ))

        start_time = time.time()
        for try_number in range(3, 0, -1):
            last_try = try_number == 1
            try:
                await self._call_rpc(rpc_message, queue_key, expiry_key)
                return
            except (PipelineError, ConnectionClosedError,
                    ConnectionResetError):
                if not last_try:
                    await asyncio.sleep(self.rpc_retry_delay)
                else:
                    raise

        logger.debug(
            L(
                "Enqueued message {} in Redis in {} stream {}",
                Bold(rpc_message),
                human_time(time.time() - start_time),
                Bold(queue_key),
            ))
Example #9
0
    async def _consume_rpcs(self, apis: Sequence[Api]) -> Sequence[RpcMessage]:
        # Get the name of each list queue
        queue_keys = ["{}:rpc_queue".format(api.meta.name) for api in apis]

        logger.debug(
            LBullets(
                "Consuming RPCs from",
                items=[
                    "{} ({})".format(s, self._latest_ids.get(s, "$"))
                    for s in queue_keys
                ],
            ))

        with await self.connection_manager() as redis:
            try:
                try:
                    stream, data = await redis.blpop(*queue_keys)
                except RuntimeError:
                    # For some reason aio-redis likes to eat the CancelledError and
                    # turn it into a Runtime error:
                    # https://github.com/aio-libs/aioredis/blob/9f5964/aioredis/connection.py#L184
                    raise asyncio.CancelledError(
                        "aio-redis task was cancelled and decided it should be a RuntimeError"
                    )
            except asyncio.CancelledError:
                # We need to manually close the connection here otherwise the aioredis
                # pool will emit warnings saying that this connection still has pending
                # commands (i.e. the above blocking pop)
                redis.close()
                raise

            stream = decode(stream, "utf8")
            rpc_message = self.deserializer(data)
            expiry_key = f"rpc_expiry_key:{rpc_message.id}"
            key_deleted = await redis.delete(expiry_key)

            if not key_deleted:
                return []

            logger.debug(
                LBullets(
                    L("⬅ Received RPC message on stream {}", Bold(stream)),
                    items=dict(**rpc_message.get_metadata(),
                               kwargs=rpc_message.get_kwargs()),
                ))

            return [rpc_message]
Example #10
0
    async def _reclaim_lost_messages(self, stream_names: List[str],
                                     consumer_group: str,
                                     expected_events: set):
        """Reclaim messages that other consumers in the group failed to acknowledge"""
        with await self.connection_manager() as redis:
            for stream in stream_names:
                old_messages = await redis.xpending(
                    stream,
                    consumer_group,
                    "-",
                    "+",
                    count=self.reclaim_batch_size)
                timeout = self.acknowledgement_timeout * 1000
                for (
                        message_id,
                        consumer_name,
                        ms_since_last_delivery,
                        num_deliveries,
                ) in old_messages:
                    message_id = decode(message_id, "utf8")
                    consumer_name = decode(consumer_name, "utf8")

                    if ms_since_last_delivery > timeout:
                        logger.info(
                            L(
                                "Found timed out event {} in stream {}. Abandoned by {}. Attempting to reclaim...",
                                Bold(message_id),
                                Bold(stream),
                                Bold(consumer_name),
                            ))

                    result = await redis.xclaim(stream, consumer_group,
                                                self.consumer_name,
                                                int(timeout), message_id)
                    for claimed_message_id, fields in result:
                        claimed_message_id = decode(claimed_message_id, "utf8")
                        event_message = self._fields_to_message(
                            fields,
                            expected_events,
                            native_id=claimed_message_id)
                        if not event_message:
                            # noop message, or message an event we don't care about
                            continue
                        logger.debug(
                            LBullets(
                                L(
                                    "⬅ Reclaimed timed out event {} on stream {}. Abandoned by {}.",
                                    Bold(message_id),
                                    Bold(stream),
                                    Bold(consumer_name),
                                ),
                                items=dict(
                                    **event_message.get_metadata(),
                                    kwargs=event_message.get_kwargs(),
                                ),
                            ))
                        yield event_message, stream
Example #11
0
    async def lazy_load_now(self):
        """Perform lazy tasks immediately

        When lightbus is used as a client it performs network tasks
        lazily. This speeds up import of your bus module, and prevents
        getting surprising errors at import time.

        However, in some cases you may wish to hurry up these lazy tasks
        (or perform them at a known point). In which case you can call this
        method to execute them immediately.
        """
        if self._lazy_load_complete:
            return

        # 1. Load the schema
        logger.debug("Loading schema...")
        await self.schema.ensure_loaded_from_bus()

        logger.info(
            LBullets(
                "Loaded the following remote schemas ({})".format(
                    len(self.schema.remote_schemas)),
                items=self.schema.remote_schemas.keys(),
            ))

        # 2. Ensure the schema transport is loaded (other transports will be
        #    loaded as the need arises, but we need schema information from the get-go)
        await self.schema.ensure_loaded_from_bus()

        # 3. Add any local APIs to the schema
        for api in self.api_registry.all():
            await self.schema.add_api(api)

        logger.info(
            LBullets(
                "Loaded the following local schemas ({})".format(
                    len(self.schema.remote_schemas)),
                items=self.schema.local_schemas.keys(),
            ))

        # 4. Done
        self._lazy_load_complete = True
Example #12
0
def log_welcome_message(
    logger: Logger,
    transport_registry: TransportRegistry,
    schema: "Schema",
    plugin_registry: PluginRegistry,
    config: Config,
):
    """Show the worker-startup welcome message
    """
    logger.info(
        LBullets(
            "Lightbus is setting up",
            items={
                "service_name (set with -s or LIGHTBUS_SERVICE_NAME)":
                Bold(config.service_name),
                "process_name (with with -p or LIGHTBUS_PROCESS_NAME)":
                Bold(config.process_name),
            },
        ))

    # Log the transport information
    rpc_transport = transport_registry.get_rpc_transport("default",
                                                         default=None)
    result_transport = transport_registry.get_result_transport("default",
                                                               default=None)
    event_transport = transport_registry.get_event_transport("default",
                                                             default=None)
    log_transport_information(rpc_transport, result_transport, event_transport,
                              schema.schema_transport, logger)

    if plugin_registry._plugins:
        logger.info(
            LBullets(
                "Loaded the following plugins ({})".format(
                    len(plugin_registry._plugins)),
                items=plugin_registry._plugins,
            ))
    else:
        logger.info("No plugins loaded")
Example #13
0
    def _register_listener(self, events: List[Tuple[str, str]]):
        """A context manager to help keep track of what the bus is listening for"""
        logger.info(
            LBullets(f"Registering listener for",
                     items=[Bold(f"{a}.{e}") for a, e in events]))

        for api_name, event_name in events:
            key = (api_name, event_name)
            self._listeners.setdefault(key, 0)
            self._listeners[key] += 1

        yield

        for api_name, event_name in events:
            key = (api_name, event_name)
            self._listeners[key] -= 1
            if not self._listeners[key]:
                self._listeners.pop(key)
Example #14
0
    def setup(self, plugins: dict=None):
        """Setup lightbus and get it ready to consume events and/or RPCs

        You should call this manually if you are calling `consume_rpcs()` or
        `consume_events()` directly. This you be handled for you if you are
        calling `run_forever()`.
        """
        if plugins is None:
            logger.debug("Auto-loading any installed Lightbus plugins...")
            plugins = autoload_plugins()
        else:
            logger.debug("Loading explicitly specified Lightbus plugins....")
            manually_set_plugins(plugins)

        if plugins:
            logger.info(LBullets("Loaded the following plugins ({})".format(len(plugins)), items=plugins))
        else:
            logger.info("No plugins loaded")
Example #15
0
    async def call_rpc(self, rpc_message: RpcMessage, options: dict):
        stream = '{}:stream'.format(rpc_message.api_name)
        logger.debug(
            LBullets(
                L("Enqueuing message {} in Redis stream {}", Bold(rpc_message), Bold(stream)),
                items=rpc_message.to_dict()
            )
        )

        pool = await self.get_redis_pool()
        with await pool as redis:
            start_time = time.time()
            # TODO: MAXLEN
            await redis.xadd(stream=stream, fields=encode_message_fields(rpc_message.to_dict()))

        logger.info(L(
            "Enqueued message {} in Redis in {} stream {}",
            Bold(rpc_message), human_time(time.time() - start_time), Bold(stream)
        ))
Example #16
0
    async def send_event(self, event_message: EventMessage, options: dict):
        """Publish an event"""
        stream = '{}.{}:stream'.format(event_message.api_name, event_message.event_name)
        logger.debug(
            LBullets(
                L("Enqueuing event message {} in Redis stream {}", Bold(event_message), Bold(stream)),
                items=event_message.to_dict()
            )
        )

        pool = await self.get_redis_pool()
        with await pool as redis:
            start_time = time.time()
            # TODO: MAXLEN
            await redis.xadd(stream=stream, fields=encode_message_fields(event_message.to_dict()))

        logger.info(L(
            "Enqueued event message {} in Redis in {} stream {}",
            Bold(event_message), human_time(time.time() - start_time), Bold(stream)
        ))
Example #17
0
    def run_forever(self, *, consume_rpcs=True):
        registry.add(LightbusStateApi())
        registry.add(LightbusMetricsApi())

        if consume_rpcs:
            logger.info(
                LBullets("APIs in registry ({})".format(len(registry.all())),
                         items=registry.names()))

        block(self._plugin_hook("before_server_start"), timeout=5)

        self._run_forever(consume_rpcs)

        self.loop.run_until_complete(self._plugin_hook("after_server_stopped"))

        # Close the bus (which will in turn close the transports)
        self.close()

        # See if we've set the exit code on the event loop
        if hasattr(self.loop, "lightbus_exit_code"):
            raise SystemExit(self.loop.lightbus_exit_code)
Example #18
0
    async def call_rpc(self, rpc_message: RpcMessage, options: dict):
        """Emit a call to a remote procedure

        This only sends the request, it does not await any result (see RedisResultTransport)
        """
        queue_key = f"{rpc_message.api_name}:rpc_queue"
        expiry_key = f"rpc_expiry_key:{rpc_message.id}"
        logger.debug(
            LBullets(
                L("Enqueuing message {} in Redis list {}", Bold(rpc_message),
                  Bold(queue_key)),
                items=dict(**rpc_message.get_metadata(),
                           kwargs=rpc_message.get_kwargs()),
            ))

        start_time = time.time()
        for try_number in range(3, 0, -1):
            last_try = try_number == 1
            try:
                await self._call_rpc(rpc_message, queue_key, expiry_key)
                break
            except (PipelineError, ConnectionClosedError,
                    ConnectionResetError) as e:
                if not last_try:
                    logger.debug(
                        f"Retrying sending of message. Will retry {try_number} more times. "
                        f"Error was {type(e).__name__}: {e}")
                    await asyncio.sleep(self.rpc_retry_delay)
                else:
                    raise

        logger.debug(
            L(
                "Enqueued message {} in Redis in {} stream {}",
                Bold(rpc_message),
                human_time(time.time() - start_time),
                Bold(queue_key),
            ))
Example #19
0
    async def _reclaim_lost_messages(
        self, stream_names: List[str], consumer_group: str, expected_events: set
    ) -> AsyncGenerator[List[EventMessage], None]:
        """Reclaim batches of messages that other consumers in the group failed to acknowledge within a timeout.

        The timeout period is specified by the `acknowledgement_timeout` option.
        """
        with await self.connection_manager() as redis:
            for stream in stream_names:

                old_messages = True
                reclaim_from = None

                # Keep pulling reclaimable messages from Redis until there are none left
                while old_messages:
                    # reclaim_from keeps track of where we are up to in our fetching
                    # of messages
                    if not reclaim_from:
                        # This is our first iteration, so fetch from the start of time
                        reclaim_from = "-"
                    else:
                        # This is a subsequent iteration. XPENDING's 'start' parameter is inclusive,
                        # so we need to add one to the reclaim_from value to ensure we don't get a message
                        # we've already seen
                        reclaim_from = redis_stream_id_add_one(reclaim_from)

                    # Fetch the next batch of messages
                    old_messages = await redis.xpending(
                        stream, consumer_group, reclaim_from, "+", count=self.reclaim_batch_size
                    )

                    timeout = self.acknowledgement_timeout * 1000
                    event_messages = []

                    # Try to claim each messages
                    for (
                        message_id,
                        consumer_name,
                        ms_since_last_delivery,
                        num_deliveries,
                    ) in old_messages:
                        message_id = decode(message_id, "utf8")
                        consumer_name = decode(consumer_name, "utf8")
                        reclaim_from = message_id

                        # This 'if' is not strictly required as the subsequent call to xclaim
                        # will honor the timeout parameter. However, using this if here allows
                        # for more sane logging from the point of view of the user. Without it
                        # we would report that we were trying to claim messages which were
                        # clearly not timed out yet.
                        if ms_since_last_delivery > timeout:
                            logger.info(
                                L(
                                    "Found timed out event {} in stream {}. Abandoned by {}. Attempting to reclaim...",
                                    Bold(message_id),
                                    Bold(stream),
                                    Bold(consumer_name),
                                )
                            )

                            # *Try* to claim the messages...
                            result = await redis.xclaim(
                                stream, consumer_group, self.consumer_name, int(timeout), message_id
                            )

                            # Parse each message we managed to claim
                            for claimed_message_id, fields in result:
                                claimed_message_id = decode(claimed_message_id, "utf8")
                                event_message = self._fields_to_message(
                                    fields,
                                    expected_events,
                                    stream=stream,
                                    native_id=claimed_message_id,
                                    consumer_group=consumer_group,
                                )
                                if not event_message:
                                    # noop message, or message an event we don't care about
                                    continue
                                logger.debug(
                                    LBullets(
                                        L(
                                            "⬅ Reclaimed timed out event {} on stream {}. Abandoned by {}.",
                                            Bold(message_id),
                                            Bold(stream),
                                            Bold(consumer_name),
                                        ),
                                        items=dict(
                                            **event_message.get_metadata(),
                                            kwargs=event_message.get_kwargs(),
                                        ),
                                    )
                                )
                                event_messages.append(event_message)

                            # And yield our batch of messages
                            if event_messages:
                                yield event_messages
Example #20
0
    async def _reclaim_lost_messages(
            self, stream_names: List[str], consumer_group: str,
            expected_events: set) -> AsyncGenerator[List[EventMessage], None]:
        """Reclaim batches of messages that other consumers in the group failed to acknowledge within a timeout.

        The timeout period is specified by the `acknowledgement_timeout` option.
        """
        with await self.connection_manager() as redis:
            for stream in stream_names:

                old_messages = True
                reclaim_from = None

                # Keep pulling reclaimable messages from Redis until there are none left
                while old_messages:
                    # reclaim_from keeps track of where we are up to in our fetching
                    # of messages
                    if not reclaim_from:
                        # This is our first iteration, so fetch from the start of time
                        reclaim_from = "-"
                    else:
                        # This is a subsequent iteration. XPENDING's 'start' parameter is inclusive,
                        # so we need to add one to the reclaim_from value to ensure we don't get a message
                        # we've already seen
                        reclaim_from = redis_stream_id_add_one(reclaim_from)

                    # Fetch the next batch of messages
                    try:
                        old_messages = await redis.xpending(
                            stream,
                            consumer_group,
                            reclaim_from,
                            "+",
                            count=self.reclaim_batch_size)
                    except ReplyError as e:
                        if "NOGROUP" in str(e):
                            # Group or consumer doesn't exist yet, so stop processing for this loop.
                            break
                        else:
                            raise

                    timeout = self.acknowledgement_timeout * 1000
                    event_messages = []

                    # Try to claim each messages
                    for (
                            message_id,
                            consumer_name,
                            ms_since_last_delivery,
                            num_deliveries,
                    ) in old_messages:
                        message_id = decode(message_id, "utf8")
                        consumer_name = decode(consumer_name, "utf8")
                        reclaim_from = message_id

                        # This 'if' is not strictly required as the subsequent call to xclaim
                        # will honor the timeout parameter. However, using this if here allows
                        # for more sane logging from the point of view of the user. Without it
                        # we would report that we were trying to claim messages which were
                        # clearly not timed out yet.
                        if ms_since_last_delivery > timeout:
                            logger.info(
                                L(
                                    "Found timed out event {} in stream {}. Abandoned by {}."
                                    " Attempting to reclaim...",
                                    Bold(message_id),
                                    Bold(stream),
                                    Bold(consumer_name),
                                ))

                            # *Try* to claim the message...
                            result = await redis.xclaim(
                                stream, consumer_group, self.consumer_name,
                                int(timeout), message_id)

                            # Parse each message we managed to claim
                            for _, fields in result:
                                # Note that sometimes we will claim a message and it will be 'nil'.
                                # In this case the result will be (None, {}). We therefore do not
                                # rely on the values we get back from XCLAIM.
                                # I suspect this may happen if a stream has been trimmed, thereby causing
                                # un-acknowledged messages to be deleted from Redis.
                                try:
                                    event_message = self._fields_to_message(
                                        fields,
                                        expected_events,
                                        stream=stream,
                                        native_id=message_id,
                                        consumer_group=consumer_group,
                                    )
                                except (NoopMessage, IgnoreMessage):
                                    # This listener doesn't need to care about this message, so acknowledge
                                    # it and move on with our lives
                                    logger.debug(
                                        f"Ignoring NOOP event with ID {message_id} discovered"
                                        " during event reclaiming")
                                    await redis.xack(stream, consumer_group,
                                                     message_id)
                                    continue

                                logger.debug(
                                    LBullets(
                                        L(
                                            "⬅ Reclaimed timed out event {} on stream {}. Abandoned"
                                            " by {}.",
                                            Bold(message_id),
                                            Bold(stream),
                                            Bold(consumer_name),
                                        ),
                                        items=dict(
                                            **event_message.get_metadata(),
                                            kwargs=event_message.get_kwargs(),
                                        ),
                                    ))
                                event_messages.append(event_message)

                    # And yield our batch of messages
                    if event_messages:
                        yield event_messages
Example #21
0
    async def _fetch_new_messages(
            self, streams, consumer_group, expected_events,
            forever) -> AsyncGenerator[List[EventMessage], None]:
        """Coroutine to consume new messages

        The consumption has two stages:

          1. Fetch and yield any messages this consumer is responsible for processing but has yet
             to successfully process. This can happen in cases where a message was
             previously consumed but not acknowledged (i.e. due to an error).
             This is a one-off startup stage.
          2. Wait for new messages to arrive. Yield these messages when they arrive, then
             resume waiting for messages

        See Also:

            _reclaim_lost_messages() - Another coroutine which reclaims messages which timed out
                                       while being processed by other consumers in this group

        """
        with await self.connection_manager() as redis:
            # Firstly create the consumer group if we need to
            await self._create_consumer_groups(streams, redis, consumer_group)

            # Get any messages that this consumer has yet to process.
            # This can happen in the case where the processes died before acknowledging.
            pending_messages = await redis.xread_group(
                group_name=consumer_group,
                consumer_name=self.consumer_name,
                streams=list(streams.keys()),
                # Using ID '0' indicates we want unacked pending messages
                latest_ids=["0"] * len(streams),
                timeout=None,  # Don't block, return immediately
            )

            event_messages = []
            for stream, message_id, fields in pending_messages:
                message_id = decode(message_id, "utf8")
                stream = decode(stream, "utf8")
                try:
                    event_message = self._fields_to_message(
                        fields,
                        expected_events,
                        stream=stream,
                        native_id=message_id,
                        consumer_group=consumer_group,
                    )
                except (NoopMessage, IgnoreMessage):
                    # This listener doesn't need to care about this message, so acknowledge
                    # it and move on with our lives
                    await redis.xack(stream, consumer_group, message_id)
                    continue

                logger.debug(
                    LBullets(
                        L(
                            "⬅ Receiving pending event {} on stream {}",
                            Bold(message_id),
                            Bold(stream),
                        ),
                        items=dict(**event_message.get_metadata(),
                                   kwargs=event_message.get_kwargs()),
                    ))
                event_messages.append(event_message)

            if event_messages:
                yield event_messages

            # We've now cleaned up any old messages that were hanging around.
            # Now we get on to the main loop which blocks and waits for new messages

            while True:
                # Fetch some messages.
                # This will block until there are some messages available
                try:
                    stream_messages = await redis.xread_group(
                        group_name=consumer_group,
                        consumer_name=self.consumer_name,
                        streams=list(streams.keys()),
                        # Using ID '>' indicates we only want new messages which have not
                        # been passed to other consumers in this group
                        latest_ids=[">"] * len(streams),
                        count=self.batch_size,
                    )
                except asyncio.CancelledError:
                    # We need to manually close the connection here otherwise the aioredis
                    # pool will emit warnings saying that this connection still has pending
                    # commands (i.e. the above blocking pop)
                    redis.close()
                    raise

                # Handle the messages we have received
                event_messages = []
                for stream, message_id, fields in stream_messages:
                    message_id = decode(message_id, "utf8")
                    stream = decode(stream, "utf8")
                    try:
                        event_message = self._fields_to_message(
                            fields,
                            expected_events,
                            stream=stream,
                            native_id=message_id,
                            consumer_group=consumer_group,
                        )
                    except (NoopMessage, IgnoreMessage):
                        # This listener doesn't need to care about this message, so acknowledge
                        # it and move on with our lives
                        logger.debug(
                            f"Ignoring NOOP event with ID {message_id} discovered during streaming"
                            " of messages")
                        await redis.xack(stream, consumer_group, message_id)
                        continue

                    logger.debug(
                        LBullets(
                            L(
                                "⬅ Received new event {} on stream {}",
                                Bold(message_id),
                                Bold(stream),
                            ),
                            items=dict(**event_message.get_metadata(),
                                       kwargs=event_message.get_kwargs()),
                        ))

                    event_messages.append(event_message)

                if event_messages:
                    yield event_messages

                if not forever:
                    return
Example #22
0
    async def consume(
        self,
        listen_for: List[Tuple[str, str]],
        listener_name: str,
        error_queue: ErrorQueueType,
        since: Union[Since, Sequence[Since]] = "$",
        forever=True,
    ) -> AsyncGenerator[List[RedisEventMessage], None]:
        """Consume events for the given APIs"""
        self._sanity_check_listen_for(listen_for)

        consumer_group = f"{self.service_name}-{listener_name}"

        if not isinstance(since, (list, tuple)):
            # Since has been specified as a single value. Normalise it into
            # the value-per-listener format.
            since = [since] * len(listen_for)
        since = map(normalise_since_value, since)

        stream_names = self._get_stream_names(listen_for)
        # Keys are stream names, values as the latest ID consumed from that stream
        streams = OrderedDict(zip(stream_names, since))
        expected_events = {event_name for _, event_name in listen_for}

        logger.debug(
            LBullets(
                L(
                    "Consuming events as consumer {} in group {} on streams",
                    Bold(self.consumer_name),
                    Bold(consumer_group),
                ),
                items={"{} ({})".format(*v)
                       for v in streams.items()},
            ))

        # Cleanup any old groups & consumers
        await self._cleanup(stream_names)

        # Here we use a queue to combine messages coming from both the
        # fetch messages loop and the reclaim messages loop.
        queue = InternalQueue(maxsize=1)
        initial_reclaiming_complete = asyncio.Event()

        async def consume_loop():
            """Regular event consuming. See _fetch_new_messages()"""
            logger.debug(
                "Will begin consuming events once the initial event reclaiming is complete"
            )
            await initial_reclaiming_complete.wait()
            logger.debug(
                "Event reclaiming is complete, beginning to consume events")

            async for messages in self._fetch_new_messages(
                    streams, consumer_group, expected_events, forever):
                await queue.put(messages)
                # Wait for the queue to empty before getting trying to get another message
                await queue.join()

        retry_consume_loop = retry_on_redis_connection_failure(
            fn=consume_loop,
            retry_delay=self.consumption_restart_delay,
            action="consuming events")

        async def reclaim_loop():
            """
            Reclaim messages which other consumers have failed to
            processes in reasonable time. See _reclaim_lost_messages()
            """
            while True:
                logger.debug("Checking for any events which need reclaiming")
                async for messages in self._reclaim_lost_messages(
                        stream_names, consumer_group, expected_events):
                    await queue.put(messages)
                    # Wait for the queue to empty before getting trying to get another message
                    await queue.join()

                initial_reclaiming_complete.set()
                await asyncio.sleep(self.reclaim_interval)

        consume_task = None
        reclaim_task = None

        try:
            # Run the two above coroutines in their own tasks
            consume_task = asyncio.ensure_future(
                queue_exception_checker(retry_consume_loop, error_queue))
            reclaim_task = asyncio.ensure_future(
                queue_exception_checker(reclaim_loop(), error_queue))

            while True:
                try:
                    messages = await queue.get()
                    logger.debug(
                        f"Got batch of {len(messages)} message(s). Yielding messages to Lightbus"
                        " client")
                    yield messages
                    logger.debug(
                        f"Batch of {len(messages)} message(s) was processed by Lightbus client."
                        " Marking as done.")
                    queue.task_done()
                except GeneratorExit:
                    return
        finally:
            # Make sure we cleanup the tasks we created
            await cancel(consume_task, reclaim_task)
Example #23
0
    async def _fetch_new_messages(
            self, streams, consumer_group, expected_events,
            forever) -> AsyncGenerator[List[EventMessage], None]:
        """Coroutine to consume new messages

        The consumption has two stages:

          1. Fetch and yield any messages this consumer is responsible for processing but has yet
             to successfully process. This can happen in cases where a message was
             previously consumed but not acknowledged (i.e. due to an error).
             This is a one-off startup stage.
          2. Wait for new messages to arrive. Yield these messages when they arrive, then
             resume waiting for messages

        See Also:

            _reclaim_lost_messages() - Another coroutine which reclaims messages which timed out
                                       while being processed by other consumers in this group

        """
        with await self.connection_manager() as redis:
            # Firstly create the consumer group if we need to
            await self._create_consumer_groups(streams, redis, consumer_group)

            # Get any messages that this consumer has yet to process.
            # This can happen in the case where the processes died before acknowledging.
            pending_messages = await redis.xread_group(
                group_name=consumer_group,
                consumer_name=self.consumer_name,
                streams=list(streams.keys()),
                # Using ID '0' indicates we want unacked pending messages
                latest_ids=["0"] * len(streams),
                timeout=None,  # Don't block, return immediately
            )

            event_messages = []
            for stream, message_id, fields in pending_messages:
                message_id = decode(message_id, "utf8")
                stream = decode(stream, "utf8")
                event_message = self._fields_to_message(
                    fields,
                    expected_events,
                    stream=stream,
                    native_id=message_id,
                    consumer_group=consumer_group,
                )
                if not event_message:
                    # noop message, or message an event we don't care about
                    continue
                logger.debug(
                    LBullets(
                        L(
                            "⬅ Receiving pending event {} on stream {}",
                            Bold(message_id),
                            Bold(stream),
                        ),
                        items=dict(**event_message.get_metadata(),
                                   kwargs=event_message.get_kwargs()),
                    ))
                event_messages.append(event_message)

            if event_messages:
                yield event_messages

            # We've now cleaned up any old messages that were hanging around.
            # Now we get on to the main loop which blocks and waits for new messages

            while True:
                # Fetch some messages.
                # This will block until there are some messages available
                stream_messages = await redis.xread_group(
                    group_name=consumer_group,
                    consumer_name=self.consumer_name,
                    streams=list(streams.keys()),
                    # Using ID '>' indicates we only want new messages which have not
                    # been passed to other consumers in this group
                    latest_ids=[">"] * len(streams),
                    count=self.batch_size,
                )

                # Handle the messages we have received
                event_messages = []
                for stream, message_id, fields in stream_messages:
                    message_id = decode(message_id, "utf8")
                    stream = decode(stream, "utf8")
                    event_message = self._fields_to_message(
                        fields,
                        expected_events,
                        stream=stream,
                        native_id=message_id,
                        consumer_group=consumer_group,
                    )
                    if not event_message:
                        # noop message, or message an event we don't care about
                        continue
                    logger.debug(
                        LBullets(
                            L(
                                "⬅ Received new event {} on stream {}",
                                Bold(message_id),
                                Bold(stream),
                            ),
                            items=dict(**event_message.get_metadata(),
                                       kwargs=event_message.get_kwargs()),
                        ))
                    # NOTE: YIELD ALL MESSAGES, NOT JUST ONE
                    event_messages.append(event_message)

                if event_messages:
                    yield event_messages

                if not forever:
                    return
Example #24
0
    async def consume(
        self,
        listen_for: List[Tuple[str, str]],
        listener_name: str,
        bus_client: "BusClient",
        since: Union[Since, Sequence[Since]] = "$",
        forever=True,
    ) -> AsyncGenerator[List[RedisEventMessage], None]:
        # TODO: Cleanup consumer groups
        self._sanity_check_listen_for(listen_for)

        consumer_group = f"{self.service_name}-{listener_name}"

        if not isinstance(since, (list, tuple)):
            # Since has been specified as a single value. Normalise it into
            # the value-per-listener format.
            since = [since] * len(listen_for)
        since = map(normalise_since_value, since)

        stream_names = self._get_stream_names(listen_for)
        # Keys are stream names, values as the latest ID consumed from that stream
        streams = OrderedDict(zip(stream_names, since))
        expected_events = {event_name for _, event_name in listen_for}

        logger.debug(
            LBullets(
                L(
                    "Consuming events as consumer {} in group {} on streams",
                    Bold(self.consumer_name),
                    Bold(consumer_group),
                ),
                items={"{} ({})".format(*v)
                       for v in streams.items()},
            ))

        # Here we use a queue to combine messages coming from both the
        # fetch messages loop and the reclaim messages loop.
        queue = asyncio.Queue(maxsize=1)

        async def consume_loop():
            # Regular event consuming. See _fetch_new_messages()
            while True:
                try:
                    async for messages in self._fetch_new_messages(
                            streams, consumer_group, expected_events, forever):
                        await queue.put(messages)
                        # Wait for the queue to empty before getting trying to get another message
                        await queue.join()
                except (ConnectionClosedError, ConnectionResetError):
                    # ConnectionClosedError is from aioredis. However, sometimes the connection
                    # can die outside of aioredis, in which case we get a builtin ConnectionResetError.
                    logger.warning(
                        f"Redis connection lost while consuming events, reconnecting "
                        f"in {self.consumption_restart_delay} seconds...")
                    await asyncio.sleep(self.consumption_restart_delay)

        async def reclaim_loop():
            # Reclaim messages which other consumers have failed to
            # processes in reasonable time. See _reclaim_lost_messages()

            await asyncio.sleep(self.acknowledgement_timeout)
            async for messages in self._reclaim_lost_messages(
                    stream_names, consumer_group, expected_events):
                await queue.put(messages)
                # Wait for the queue to empty before getting trying to get another message
                await queue.join()

        consume_task = None
        reclaim_task = None

        try:
            # Run the two above coroutines in their own tasks
            consume_task = asyncio.ensure_future(consume_loop())
            reclaim_task = asyncio.ensure_future(reclaim_loop())

            # Make sure we surface any exceptions that occur in either task
            consume_task.add_done_callback(make_exception_checker(bus_client))
            reclaim_task.add_done_callback(make_exception_checker(bus_client))

            while True:
                try:
                    messages = await queue.get()
                    yield messages
                    queue.task_done()
                except GeneratorExit:
                    return
        finally:
            # Make sure we cleanup the tasks we created
            await cancel(consume_task, reclaim_task)
Example #25
0
    async def fetch(
        self,
        listen_for,
        context: dict,
        loop: asyncio.AbstractEventLoop,
        consumer_group: str = None,
        since: Union[Since, Sequence[Since]] = "$",
        forever=True,
    ) -> Generator[EventMessage, None, None]:

        if self.consumer_group_prefix:
            consumer_group = f"{self.consumer_group_prefix}-{consumer_group}"

        if not isinstance(since, (list, tuple)):
            since = [since] * len(listen_for)
        since = map(normalise_since_value, since)

        stream_names = self._get_stream_names(listen_for)
        # Keys are stream names, values as the latest ID consumed from that stream
        streams = OrderedDict(zip(stream_names, since))
        expected_events = {event_name for _, event_name in listen_for}

        logger.debug(
            LBullets(
                L(
                    "Consuming events as consumer {} in group {} on streams",
                    Bold(self.consumer_name),
                    Bold(consumer_group),
                ),
                items={"{} ({})".format(*v)
                       for v in streams.items()},
            ))

        # Here we use a queue to combine messages coming from both the
        # fetch messages loop and the reclaim messages loop.
        queue = asyncio.Queue(maxsize=1)

        async def fetch_loop():
            while True:
                try:
                    async for message, stream in self._fetch_new_messages(
                            streams, consumer_group, expected_events, forever):
                        await queue.put((message, stream))
                        # Wait for the queue to empty before getting trying to get another message
                        await queue.join()
                except ConnectionClosedError:
                    logger.warning(
                        f"Redis connection lost while consuming events, reconnecting "
                        f"in {self.consumption_restart_delay} seconds...")
                    await asyncio.sleep(self.consumption_restart_delay)

        async def reclaim_loop():
            await asyncio.sleep(self.acknowledgement_timeout)
            async for message, stream in self._reclaim_lost_messages(
                    stream_names, consumer_group, expected_events):
                await queue.put((message, stream))
                # Wait for the queue to empty before getting trying to get another message
                await queue.join()

        # Make sure we surface any exceptions that occur in either task
        fetch_task = asyncio.ensure_future(fetch_loop(), loop=loop)
        reclaim_task = asyncio.ensure_future(reclaim_loop(), loop=loop)

        fetch_task.add_done_callback(check_for_exception)
        reclaim_task.add_done_callback(check_for_exception)

        try:
            while True:
                try:
                    message, stream = await queue.get()
                    yield message
                    await self._ack(stream, consumer_group, message.native_id)
                    queue.task_done()
                    yield True
                except GeneratorExit:
                    return
        finally:
            await cancel(fetch_task, reclaim_task)
Example #26
0
    async def fetch_events(self) -> Tuple[Sequence[EventMessage], Any]:
        pool = await self.get_redis_pool()
        with await pool as redis:
            if not self._streams:
                logger.debug('Event backend has been given no events to consume. Event backend will sleep.')
                self._task = asyncio.ensure_future(asyncio.sleep(3600 * 24 * 365))
            else:
                logger.info(LBullets(
                    'Consuming events from', items={
                        '{} ({})'.format(*v) for v in self._streams.items()
                    }
                ))
                # TODO: Count/timeout
                self._task = asyncio.ensure_future(
                    redis.xread(
                        streams=list(self._streams.keys()),
                        latest_ids=list(self._streams.values()),
                        count=10,  # TODO: Make configurable, add timeout too
                    )
                )

            try:
                stream_messages = await self._task or []
            except asyncio.CancelledError as e:
                if self._reload:
                    # Streams to listen on have changed.
                    # Bail out and let this method get called again,
                    # at which point we'll pickup the new streams.
                    logger.debug('Event transport reloading.')
                    stream_messages = []
                    self._reload = False
                else:
                    raise

        event_messages = []
        latest_ids = {}
        for stream, message_id, fields in stream_messages:
            stream = decode(stream, 'utf8')
            message_id = decode(message_id, 'utf8')
            decoded_fields = decode_message_fields(fields)

            # Keep track of which event ID we are up to. We will store these
            # in consumption_complete(), once we know the events have definitely
            # been consumed.
            latest_ids[stream] = message_id

            # Unfortunately, these is an edge-case when BOTH:
            #  1. We are consuming events from 'now' (i.e. event ID '$'), the default
            #  2. There is an unhandled error when processing the FIRST batch of events
            # In which case, the next iteration would start again from '$', in which
            # case we would loose events. Therefore 'subtract one' from the message ID
            # and store that immediately. Subtracting one is imprecise, as there is a SLIM
            # chance we could grab another event in the process. However, if events are
            # being consumed from 'now' then the developer presumably doesn't care about
            # a high level of precision.
            if self._streams[stream] == '$':
                self._streams[stream] = redis_stream_id_subtract_one(message_id)

            event_messages.append(
                EventMessage.from_dict(decoded_fields)
            )
            logger.debug(LBullets(
                L("⬅ Received event {} on stream {}", Bold(message_id), Bold(stream)),
                items=decoded_fields
            ))

        return event_messages, latest_ids
Example #27
0
    async def setup_async(self, plugins: dict = None):
        """Setup lightbus and get it ready to consume events and/or RPCs

        You should call this manually if you are calling `consume_rpcs()`
        directly. This you be handled for you if you are
        calling `run_forever()`.
        """
        logger.info(
            LBullets(
                "Lightbus is setting up",
                items={
                    "service_name (set with -s or LIGHTBUS_SERVICE_NAME)":
                    Bold(self.config.service_name),
                    "process_name (with with -p or LIGHTBUS_PROCESS_NAME)":
                    Bold(self.config.process_name),
                },
            ))

        # Log the transport information
        rpc_transport = self.transport_registry.get_rpc_transport("default",
                                                                  default=None)
        result_transport = self.transport_registry.get_result_transport(
            "default", default=None)
        event_transport = self.transport_registry.get_event_transport(
            "default", default=None)
        log_transport_information(rpc_transport, result_transport,
                                  event_transport,
                                  self.schema.schema_transport, logger)

        # Log the plugins we have
        if plugins is None:
            logger.debug("Auto-loading any installed Lightbus plugins...")
            # Force auto-loading as many commands need to do config-less best-effort
            # plugin loading. But now we have the config loaded so we can
            # make sure we load the plugins properly.
            plugins = autoload_plugins(self.config, force=True)
        else:
            logger.debug("Loading explicitly specified Lightbus plugins....")
            manually_set_plugins(plugins)

        if plugins:
            logger.info(
                LBullets("Loaded the following plugins ({})".format(
                    len(plugins)),
                         items=plugins))
        else:
            logger.info("No plugins loaded")

        # Load schema
        logger.debug("Loading schema...")
        await self.schema.load_from_bus()

        # Share the schema of the registered APIs
        for api in registry.all():
            await self.schema.add_api(api)

        logger.info(
            LBullets(
                "Loaded the following remote schemas ({})".format(
                    len(self.schema.remote_schemas)),
                items=self.schema.remote_schemas.keys(),
            ))

        for transport in self.transport_registry.get_all_transports():
            await transport.open()
Example #28
0
    async def start_worker(self):
        """Worker startup procedure
        """
        # Ensure an event loop exists
        get_event_loop()

        self._worker_tasks = set()

        # Start monitoring for errors on the error queue
        error_monitor_task = asyncio.ensure_future(self.error_monitor())
        self._error_monitor_task = error_monitor_task
        self._worker_tasks.add(self._error_monitor_task)

        # Features setup & logging
        if not self.api_registry.all() and Feature.RPCS in self.features:
            logger.info(
                "Disabling serving of RPCs as no APIs have been registered")
            self.features.remove(Feature.RPCS)

        logger.info(
            LBullets(f"Enabled features ({len(self.features)})",
                     items=[f.value for f in self.features]))

        disabled_features = set(ALL_FEATURES) - set(self.features)
        logger.info(
            LBullets(
                f"Disabled features ({len(disabled_features)})",
                items=[f.value for f in disabled_features],
            ))

        # Api logging
        logger.info(
            LBullets(
                "APIs in registry ({})".format(len(self.api_registry.all())),
                items=self.api_registry.names(),
            ))

        # Push all registered APIs into the global schema
        for api in self.api_registry.all():
            await self.schema.add_api(api)

        # We're running as a worker now (e.g. lightbus run), so
        # do the lazy loading immediately
        await self.lazy_load_now()

        # Setup schema monitoring
        monitor_task = asyncio.ensure_future(
            queue_exception_checker(self.schema.monitor(), self.error_queue))

        logger.info("Executing before_worker_start & on_start hooks...")
        await self.hook_registry.execute("before_worker_start")
        logger.info(
            "Execution of before_worker_start & on_start hooks was successful")

        # Setup RPC consumption
        if Feature.RPCS in self.features:
            consume_rpc_task = asyncio.ensure_future(
                queue_exception_checker(self.consume_rpcs(), self.error_queue))
        else:
            consume_rpc_task = None

        # Start off any registered event listeners
        if Feature.EVENTS in self.features:
            await self.event_client.start_registered_listeners()

        # Start off any background tasks
        if Feature.TASKS in self.features:
            for coroutine in self._background_coroutines:
                task = asyncio.ensure_future(
                    queue_exception_checker(coroutine, self.error_queue))
                self._background_tasks.append(task)

        self._worker_tasks.add(consume_rpc_task)
        self._worker_tasks.add(monitor_task)
Example #29
0
    async def _fetch_new_messages(self, streams, consumer_group,
                                  expected_events, forever):
        with await self.connection_manager() as redis:
            # Firstly create the consumer group if we need to
            await self._create_consumer_groups(streams, redis, consumer_group)

            # Get any messages that this consumer has yet to process.
            # This can happen in the case where the processes died before acknowledging.
            pending_messages = await redis.xread_group(
                group_name=consumer_group,
                consumer_name=self.consumer_name,
                streams=list(streams.keys()),
                # Using ID '0' indicates we want unacked pending messages
                latest_ids=["0"] * len(streams),
                timeout=None,  # Don't block, return immediately
            )
            for stream, message_id, fields in pending_messages:
                message_id = decode(message_id, "utf8")
                event_message = self._fields_to_message(fields,
                                                        expected_events,
                                                        native_id=message_id)
                if not event_message:
                    # noop message, or message an event we don't care about
                    continue
                logger.debug(
                    LBullets(
                        L(
                            "⬅ Receiving pending event {} on stream {}",
                            Bold(message_id),
                            Bold(stream),
                        ),
                        items=dict(**event_message.get_metadata(),
                                   kwargs=event_message.get_kwargs()),
                    ))
                yield event_message, stream

            # We've now cleaned up any old messages that were hanging around.
            # Now we get on to the main loop which blocks and waits for new messages

            while True:
                # Fetch some messages.
                # This will block until there are some messages available
                stream_messages = await redis.xread_group(
                    group_name=consumer_group,
                    consumer_name=self.consumer_name,
                    streams=list(streams.keys()),
                    # Using ID '>' indicates we only want new messages which have not
                    # been passed to other consumers in this group
                    latest_ids=[">"] * len(streams),
                    count=self.batch_size,
                )

                # Handle the messages we have received
                for stream, message_id, fields in stream_messages:
                    message_id = decode(message_id, "utf8")
                    event_message = self._fields_to_message(
                        fields, expected_events, native_id=message_id)
                    if not event_message:
                        # noop message, or message an event we don't care about
                        continue
                    logger.debug(
                        LBullets(
                            L(
                                "⬅ Received new event {} on stream {}",
                                Bold(message_id),
                                Bold(stream),
                            ),
                            items=dict(**event_message.get_metadata(),
                                       kwargs=event_message.get_kwargs()),
                        ))
                    yield event_message, stream

                if not forever:
                    return