示例#1
0
    async def _start_server_inner(self, consume_rpcs=True):
        self.api_registry.add(LightbusStateApi())
        self.api_registry.add(LightbusMetricsApi())

        if consume_rpcs:
            logger.info(
                LBullets(
                    "APIs in registry ({})".format(len(self.api_registry.all())),
                    items=self.api_registry.names(),
                )
            )

        # Setup RPC consumption
        consume_rpc_task = None
        if consume_rpcs and self.api_registry.all():
            consume_rpc_task = asyncio.ensure_future(self.consume_rpcs())
            consume_rpc_task.add_done_callback(make_exception_checker(self, die=True))

        # Setup schema monitoring
        monitor_task = asyncio.ensure_future(self.schema.monitor())
        monitor_task.add_done_callback(make_exception_checker(self, die=True))

        logger.info("Executing before_server_start & on_start hooks...")
        await self._execute_hook("before_server_start")
        logger.info("Execution of before_server_start & on_start hooks was successful")

        self._server_tasks = [consume_rpc_task, monitor_task]
示例#2
0
    def make_task(self) -> asyncio.Task:
        """ Create a task responsible for running the listener(s)

        This will create a task for each transport (see get_event_transports()).
        These tasks will be gathered together into a parent task, which is then
        returned.

        Any unhandled exceptions will be dealt with according to `on_error`.

        See listener() for the coroutine which handles the listening.
        """
        tasks = []
        for _event_transport, _api_names in self.event_transports:
            # Create a listener task for each event transport,
            # passing each a list of events for which it should listen
            events = [
                (api_name, event_name)
                for api_name, event_name in self.events
                if api_name in _api_names
            ]

            task = asyncio.ensure_future(self.listener(_event_transport, events))
            task.is_listener = True  # Used by close()
            tasks.append(task)

        listener_task = asyncio.gather(*tasks)

        exception_checker = make_exception_checker(self.bus_client, die=self.die_on_error)
        listener_task.add_done_callback(exception_checker)

        # Setting is_listener lets Client.close() know that it should mop up this
        # task automatically on shutdown
        listener_task.is_listener = True

        return listener_task
示例#3
0
    async def consume_rpcs(self, apis: List[Api] = None):
        if apis is None:
            apis = self.api_registry.all()

        if not apis:
            raise NoApisToListenOn(
                "No APIs to consume on in consume_rpcs(). Either this method was called with apis=[], "
                "or the API registry is empty."
            )

        # Not all APIs will necessarily be served by the same transport, so group them
        # accordingly
        api_names = [api.meta.name for api in apis]
        api_names_by_transport = self.transport_registry.get_rpc_transports(api_names)

        coroutines = []
        for rpc_transport, transport_api_names in api_names_by_transport:
            transport_apis = list(map(self.api_registry.get, transport_api_names))
            coroutines.append(
                self._consume_rpcs_with_transport(rpc_transport=rpc_transport, apis=transport_apis)
            )

        task = asyncio.ensure_future(asyncio.gather(*coroutines))
        task.add_done_callback(make_exception_checker(self, die=True))
        self._consumers.append(task)
示例#4
0
    def open(self):
        logger.debug("Bus is opening")

        # Start a background task running to handling incoming calls on the _perform_calls queue
        self._perform_calls_task = asyncio.ensure_future(self._perform_calls())

        # Housekeeping for error handling
        self._perform_calls_task.add_done_callback(make_exception_checker())
示例#5
0
    def add_background_task(
        self, coroutine: Union[Coroutine, asyncio.Future], cancel_on_close=True
    ):
        """Run a coroutine in the background

        The provided coroutine will be run in the background once
        Lightbus startup is complete.

        The coroutine will be cancelled when the bus client is closed if
        `cancel_on_close` is set to `True`.

        The Lightbus process will exit if the coroutine raises an exception.
        See lightbus.utilities.async_tools.check_for_exception() for details.
        """
        task = asyncio.ensure_future(coroutine)
        task.add_done_callback(make_exception_checker(self, die=True))
        if cancel_on_close:
            # Store task for closing later
            self._background_tasks.append(task)
示例#6
0
    def start_server(self, consume_rpcs=True):
        """Server startup procedure

        Must be called from within the main thread
        """
        # Ensure an event loop exists
        get_event_loop()

        self._server_shutdown_queue = janus.Queue()
        self._server_tasks = set()

        async def server_shutdown_monitor():
            exit_code = await self._server_shutdown_queue.async_q.get()
            self.exit_code = exit_code
            self.loop.stop()
            self._server_shutdown_queue.async_q.task_done()

        shutdown_monitor_task = asyncio.ensure_future(server_shutdown_monitor())
        shutdown_monitor_task.add_done_callback(make_exception_checker(self, die=True))
        self._shutdown_monitor_task = shutdown_monitor_task

        block(self._start_server_inner())
示例#7
0
    def worker(self, bus_client, after_shutdown: Callable = None):
        """


        A note about error handling in the worker thread:

        There are two scenarios in which the worker thread my encounter an error.

            1. The bus is being used as a client. A bus method is called by the client code,
               and this call raises an exception. This exception is propagated to the client
               code for it to deal with.1
            2. The bus is being used as a server and has various coroutines running at any one
               time. In this case, if a coroutine encounters an error then it should cause the
               lightbus server to exit.

        In response to either of these cases the bus needs to shut itself down. Therefore,
        the worker needs to keep on running for a while in order to handle the various shutdown tasks.

        In case 1 above, we assume the developer will take responsibility for closing the bus
        correctly when they are done with it.

        In case 2 above, the worker needs to signal the main lightbus run process to tell it to begin the
        shutdown procedure

        """
        logger.debug(f"Bus thread {self._thread.name} initialising")

        # Start a new event loop for this new thread
        asyncio.set_event_loop(asyncio.new_event_loop())

        self._call_queue = janus.Queue()
        self._worker_shutdown_queue = janus.Queue()

        async def worker_shutdown_monitor():
            await self._worker_shutdown_queue.async_q.get()
            asyncio.get_event_loop().stop()
            self._worker_shutdown_queue.async_q.task_done()

        shutdown_monitor_task = asyncio.ensure_future(worker_shutdown_monitor())
        shutdown_monitor_task.add_done_callback(make_exception_checker(bus_client, die=True))

        perform_calls_task = asyncio.ensure_future(self.perform_calls())
        perform_calls_task.add_done_callback(make_exception_checker(bus_client, die=True))

        self._ready.set()

        asyncio.get_event_loop().run_forever()

        logging.debug(f"Event loop stopped in bus worker thread {self._thread.name}. Closing down.")
        self._ready.clear()

        if after_shutdown:
            after_shutdown()

        logger.debug("Canceling worker tasks")
        block(cancel(perform_calls_task, shutdown_monitor_task))

        logger.debug("Closing the call queue")
        self._call_queue.close()
        block(self._call_queue.wait_closed())

        logger.debug("Closing the worker shutdown queue")
        self._worker_shutdown_queue.close()
        block(self._worker_shutdown_queue.wait_closed())

        logger.debug("Worker shutdown complete")
示例#8
0
    async def consume(
        self,
        listen_for: List[Tuple[str, str]],
        listener_name: str,
        bus_client: "BusClient",
        since: Union[Since, Sequence[Since]] = "$",
        forever=True,
    ) -> AsyncGenerator[List[RedisEventMessage], None]:
        # TODO: Cleanup consumer groups
        self._sanity_check_listen_for(listen_for)

        consumer_group = f"{self.service_name}-{listener_name}"

        if not isinstance(since, (list, tuple)):
            # Since has been specified as a single value. Normalise it into
            # the value-per-listener format.
            since = [since] * len(listen_for)
        since = map(normalise_since_value, since)

        stream_names = self._get_stream_names(listen_for)
        # Keys are stream names, values as the latest ID consumed from that stream
        streams = OrderedDict(zip(stream_names, since))
        expected_events = {event_name for _, event_name in listen_for}

        logger.debug(
            LBullets(
                L(
                    "Consuming events as consumer {} in group {} on streams",
                    Bold(self.consumer_name),
                    Bold(consumer_group),
                ),
                items={"{} ({})".format(*v)
                       for v in streams.items()},
            ))

        # Here we use a queue to combine messages coming from both the
        # fetch messages loop and the reclaim messages loop.
        queue = asyncio.Queue(maxsize=1)

        async def consume_loop():
            # Regular event consuming. See _fetch_new_messages()
            while True:
                try:
                    async for messages in self._fetch_new_messages(
                            streams, consumer_group, expected_events, forever):
                        await queue.put(messages)
                        # Wait for the queue to empty before getting trying to get another message
                        await queue.join()
                except (ConnectionClosedError, ConnectionResetError):
                    # ConnectionClosedError is from aioredis. However, sometimes the connection
                    # can die outside of aioredis, in which case we get a builtin ConnectionResetError.
                    logger.warning(
                        f"Redis connection lost while consuming events, reconnecting "
                        f"in {self.consumption_restart_delay} seconds...")
                    await asyncio.sleep(self.consumption_restart_delay)

        async def reclaim_loop():
            # Reclaim messages which other consumers have failed to
            # processes in reasonable time. See _reclaim_lost_messages()

            await asyncio.sleep(self.acknowledgement_timeout)
            async for messages in self._reclaim_lost_messages(
                    stream_names, consumer_group, expected_events):
                await queue.put(messages)
                # Wait for the queue to empty before getting trying to get another message
                await queue.join()

        consume_task = None
        reclaim_task = None

        try:
            # Run the two above coroutines in their own tasks
            consume_task = asyncio.ensure_future(consume_loop())
            reclaim_task = asyncio.ensure_future(reclaim_loop())

            # Make sure we surface any exceptions that occur in either task
            consume_task.add_done_callback(make_exception_checker(bus_client))
            reclaim_task.add_done_callback(make_exception_checker(bus_client))

            while True:
                try:
                    messages = await queue.get()
                    yield messages
                    queue.task_done()
                except GeneratorExit:
                    return
        finally:
            # Make sure we cleanup the tasks we created
            await cancel(consume_task, reclaim_task)