Exemple #1
0
    def handle_in_background(self, queue: InternalQueue, handler, command, on_done: asyncio.Event):
        """Handle a received command by calling the provided handler

        This execution happens in the background.
        """
        logger.debug(f"Handling command {command}")

        def when_task_finished(fut: asyncio.Future):
            self._running_commands.remove(fut)
            try:
                # Retrieve any error which may have occurred.
                # We ignore the error because we assume any exceptions which the
                # handler threw will have already been placed into the error queue
                # by the queue_exception_checker().
                # Regardless, we must retrieve the result in order to keep Python happy.
                fut.result()
            except:
                pass

            # We use call_soon_threadsafe() to ensure we call the Event's set()
            # in a threadsafe fashion. This is because the Event object may have
            # been created in another thread and be attached to another event loop
            on_done._loop.call_soon_threadsafe(on_done.set)

        # fmt: off
        background_call_task = asyncio.ensure_future(queue_exception_checker(
            handler(command),
            self.error_queue,
        ))
        # fmt: on
        background_call_task.add_done_callback(when_task_finished)
        self._running_commands.add(background_call_task)
Exemple #2
0
 def start(self):
     """Starts the queue monitor"""
     # fmt: off
     self._queue_monitor_task = asyncio.ensure_future(queue_exception_checker(
         self._queue_monitor(),
         self.error_queue,
     ))
Exemple #3
0
    async def handle_receive_result(self, command: commands.ReceiveResultCommand):
        """Client wishes to receive a result from a worker"""

        # TODO: rpc_timeout is in three different places in the config!
        #       Fix this. Really it makes most sense for the use if it goes on the
        #       ApiConfig rather than having to repeat it on both the result & RPC
        #       transports.
        timeout = command.options.get(
            "timeout", self.config.api(command.message.api_name).rpc_timeout
        )
        result_transport = self.transport_registry.get_result_transport(command.message.api_name)

        logger.debug("Starting RPC result listener")
        task = asyncio.ensure_future(
            queue_exception_checker(
                self._result_listener(
                    result_transport=result_transport,
                    timeout=timeout,
                    rpc_message=command.message,
                    return_path=command.message.return_path,
                    options=command.options,
                    result_queue=command.destination_queue,
                ),
                self.error_queue,
            )
        )
        self.listener_tasks.add(task)
Exemple #4
0
    async def _start_listener(self, listener: "Listener"):
        # Setting the maxsize to 1 ensures the transport cannot load
        # messages faster than we can consume them
        queue: InternalQueue[EventMessage] = InternalQueue(maxsize=1)

        async def consume_events():
            while True:
                logger.debug(
                    "Event listener now waiting for event on the internal queue"
                )
                event_message = await queue.get()
                logger.debug(
                    "Event listener has now received an event on the internal queue, processing now"
                )
                await self._on_message(
                    event_message=event_message,
                    listener=listener.callable,
                    options=listener.options,
                    on_error=listener.on_error,
                )
                queue.task_done()

        # Start the consume_events() consumer running
        task = asyncio.ensure_future(
            queue_exception_checker(consume_events(), self.error_queue))
        self._event_listener_tasks.add(task)

        await self.producer.send(
            ConsumeEventsCommand(
                events=listener.events,
                destination_queue=queue,
                listener_name=listener.name,
                options=listener.options,
            )).wait()
Exemple #5
0
    def start(self, handler: Callable):
        """Set the handler function and start the invoker

        Use `stop()` to shutdown the invoker.
        """
        self._consumer_task = asyncio.ensure_future(
            queue_exception_checker(self._consumer_loop(self.queue, handler), self.error_queue)
        )
        self._running_commands = set()
Exemple #6
0
    def start(self, handler: Callable):
        """Set the handler function and start the invoker

        Use `stop()` to shutdown the invoker.
        """
        logger.debug(
            f"Starting consumer for handler {handler.__qualname__}(). This should report ready"
            " shortly...")
        self._consumer_task = asyncio.ensure_future(
            queue_exception_checker(self._consumer_loop(self.queue, handler),
                                    self.error_queue))
        self._running_commands = set()
def test_queue_exception_checker_in_task(erroring_coroutine,
                                         error_queue: ErrorQueueType):
    coroutine = queue_exception_checker(erroring_coroutine(), error_queue)

    with pytest.raises(ExampleException):
        asyncio.run(coroutine)

    assert error_queue.qsize() == 1
    error: Error = error_queue.get_nowait()

    assert error.type == ExampleException
    assert isinstance(error.value, ExampleException)
    assert "test_utilities_unit.py" in str(error)
    assert "ExampleException" in str(error)
Exemple #8
0
    async def handle_consume_rpcs(self, command: commands.ConsumeRpcsCommand):
        """Worker wishes for incoming RPC results to be listened for and processed"""
        # Not all APIs will necessarily be served by the same transport, so group them
        # accordingly
        api_names_by_transport = self.transport_registry.get_rpc_transports(command.api_names)

        for rpc_transport, transport_api_names in api_names_by_transport.items():
            transport_apis = list(map(self.api_registry.get, transport_api_names))

            # fmt: off
            task = asyncio.ensure_future(queue_exception_checker(
                self._consume_rpcs_with_transport(rpc_transport=rpc_transport, apis=transport_apis),
                self.error_queue,
            ))
            # fmt: on
            self.consumer_tasks.add(task)
Exemple #9
0
    async def handle_consume_events(self,
                                    command: commands.ConsumeEventsCommand):
        event_transports = self.transport_registry.get_event_transports(
            api_names=[api_name for api_name, _ in command.events])

        async def listener(event_transport, events_):
            consumer = event_transport.consume(
                listen_for=events_,
                listener_name=command.listener_name,
                error_queue=self.error_queue,
                **command.options,
            )
            async for event_messages in consumer:
                for event_message in event_messages:
                    logger.debug(
                        f"Putting event {event_message.id} onto the internal queue"
                    )
                    await command.destination_queue.put(event_message)

                # Wait for the queue to be emptied before fetching more.
                # We will need to make this configurable if we want to support
                # the pre-fetching of events. This solution is a good default
                # though as it will ensure events are processed in a more ordered fashion
                # in cases where there are multiple workers, and also ensure fewer
                # messages need to be reclaimed in the event of a crash
                logger.debug(
                    f"Waiting for messages to be consumed before adding more")
                await command.destination_queue.join()

        for event_transport_pool, api_names in event_transports.items():
            # Create a listener task for each event transport,
            # passing each a list of events for which it should listen
            events = [(api_name, event_name)
                      for api_name, event_name in command.events
                      if api_name in api_names]

            # fmt: off
            listener_task = asyncio.ensure_future(
                queue_exception_checker(
                    listener(event_transport_pool, events),
                    self.error_queue,
                ))
            # fmt: on
            self.listener_tasks.add(listener_task)
Exemple #10
0
    async def consume(
        self,
        listen_for: List[Tuple[str, str]],
        listener_name: str,
        error_queue: ErrorQueueType,
        since: Union[Since, Sequence[Since]] = "$",
        forever=True,
    ) -> AsyncGenerator[List[RedisEventMessage], None]:
        """Consume events for the given APIs"""
        self._sanity_check_listen_for(listen_for)

        consumer_group = f"{self.service_name}-{listener_name}"

        if not isinstance(since, (list, tuple)):
            # Since has been specified as a single value. Normalise it into
            # the value-per-listener format.
            since = [since] * len(listen_for)
        since = map(normalise_since_value, since)

        stream_names = self._get_stream_names(listen_for)
        # Keys are stream names, values as the latest ID consumed from that stream
        streams = OrderedDict(zip(stream_names, since))
        expected_events = {event_name for _, event_name in listen_for}

        logger.debug(
            LBullets(
                L(
                    "Consuming events as consumer {} in group {} on streams",
                    Bold(self.consumer_name),
                    Bold(consumer_group),
                ),
                items={"{} ({})".format(*v)
                       for v in streams.items()},
            ))

        # Cleanup any old groups & consumers
        await self._cleanup(stream_names)

        # Here we use a queue to combine messages coming from both the
        # fetch messages loop and the reclaim messages loop.
        queue = InternalQueue(maxsize=1)
        initial_reclaiming_complete = asyncio.Event()

        async def consume_loop():
            """Regular event consuming. See _fetch_new_messages()"""
            logger.debug(
                "Will begin consuming events once the initial event reclaiming is complete"
            )
            await initial_reclaiming_complete.wait()
            logger.debug(
                "Event reclaiming is complete, beginning to consume events")

            async for messages in self._fetch_new_messages(
                    streams, consumer_group, expected_events, forever):
                await queue.put(messages)
                # Wait for the queue to empty before getting trying to get another message
                await queue.join()

        retry_consume_loop = retry_on_redis_connection_failure(
            fn=consume_loop,
            retry_delay=self.consumption_restart_delay,
            action="consuming events")

        async def reclaim_loop():
            """
            Reclaim messages which other consumers have failed to
            processes in reasonable time. See _reclaim_lost_messages()
            """
            while True:
                logger.debug("Checking for any events which need reclaiming")
                async for messages in self._reclaim_lost_messages(
                        stream_names, consumer_group, expected_events):
                    await queue.put(messages)
                    # Wait for the queue to empty before getting trying to get another message
                    await queue.join()

                initial_reclaiming_complete.set()
                await asyncio.sleep(self.reclaim_interval)

        consume_task = None
        reclaim_task = None

        try:
            # Run the two above coroutines in their own tasks
            consume_task = asyncio.ensure_future(
                queue_exception_checker(retry_consume_loop, error_queue))
            reclaim_task = asyncio.ensure_future(
                queue_exception_checker(reclaim_loop(), error_queue))

            while True:
                try:
                    messages = await queue.get()
                    logger.debug(
                        f"Got batch of {len(messages)} message(s). Yielding messages to Lightbus"
                        " client")
                    yield messages
                    logger.debug(
                        f"Batch of {len(messages)} message(s) was processed by Lightbus client."
                        " Marking as done.")
                    queue.task_done()
                except GeneratorExit:
                    return
        finally:
            # Make sure we cleanup the tasks we created
            await cancel(consume_task, reclaim_task)
Exemple #11
0
    async def start_worker(self):
        """Worker startup procedure
        """
        # Ensure an event loop exists
        get_event_loop()

        self._worker_tasks = set()

        # Start monitoring for errors on the error queue
        error_monitor_task = asyncio.ensure_future(self.error_monitor())
        self._error_monitor_task = error_monitor_task
        self._worker_tasks.add(self._error_monitor_task)

        # Features setup & logging
        if not self.api_registry.all() and Feature.RPCS in self.features:
            logger.info(
                "Disabling serving of RPCs as no APIs have been registered")
            self.features.remove(Feature.RPCS)

        logger.info(
            LBullets(f"Enabled features ({len(self.features)})",
                     items=[f.value for f in self.features]))

        disabled_features = set(ALL_FEATURES) - set(self.features)
        logger.info(
            LBullets(
                f"Disabled features ({len(disabled_features)})",
                items=[f.value for f in disabled_features],
            ))

        # Api logging
        logger.info(
            LBullets(
                "APIs in registry ({})".format(len(self.api_registry.all())),
                items=self.api_registry.names(),
            ))

        # Push all registered APIs into the global schema
        for api in self.api_registry.all():
            await self.schema.add_api(api)

        # We're running as a worker now (e.g. lightbus run), so
        # do the lazy loading immediately
        await self.lazy_load_now()

        # Setup schema monitoring
        monitor_task = asyncio.ensure_future(
            queue_exception_checker(self.schema.monitor(), self.error_queue))

        logger.info("Executing before_worker_start & on_start hooks...")
        await self.hook_registry.execute("before_worker_start")
        logger.info(
            "Execution of before_worker_start & on_start hooks was successful")

        # Setup RPC consumption
        if Feature.RPCS in self.features:
            consume_rpc_task = asyncio.ensure_future(
                queue_exception_checker(self.consume_rpcs(), self.error_queue))
        else:
            consume_rpc_task = None

        # Start off any registered event listeners
        if Feature.EVENTS in self.features:
            await self.event_client.start_registered_listeners()

        # Start off any background tasks
        if Feature.TASKS in self.features:
            for coroutine in self._background_coroutines:
                task = asyncio.ensure_future(
                    queue_exception_checker(coroutine, self.error_queue))
                self._background_tasks.append(task)

        self._worker_tasks.add(consume_rpc_task)
        self._worker_tasks.add(monitor_task)