コード例 #1
0
async def consumer():
    def _on_exception(e):
        raise e

    consumer = InternalConsumer(queue=InternalQueue(), error_queue=InternalQueue())
    yield consumer
    await consumer.close()
コード例 #2
0
async def producer():
    def _on_exception(e):
        raise e

    producer = InternalProducer(queue=InternalQueue(), error_queue=InternalQueue())
    yield producer
    await producer.close()
コード例 #3
0
ファイル: test_internal_queue.py プロジェクト: imlzg/lightbus
async def test_internal_queue_get_delay():
    queue = InternalQueue()
    task = asyncio.create_task(queue.get())
    await asyncio.sleep(0.001)
    queue.put_nowait(True)
    await asyncio.sleep(0.001)
    assert task.done()
    assert task.result() is True
コード例 #4
0
    async def _start_listener(self, listener: "Listener"):
        # Setting the maxsize to 1 ensures the transport cannot load
        # messages faster than we can consume them
        queue: InternalQueue[EventMessage] = InternalQueue(maxsize=1)

        async def consume_events():
            while True:
                logger.debug(
                    "Event listener now waiting for event on the internal queue"
                )
                event_message = await queue.get()
                logger.debug(
                    "Event listener has now received an event on the internal queue, processing now"
                )
                await self._on_message(
                    event_message=event_message,
                    listener=listener.callable,
                    options=listener.options,
                    on_error=listener.on_error,
                )
                queue.task_done()

        # Start the consume_events() consumer running
        task = asyncio.ensure_future(
            queue_exception_checker(consume_events(), self.error_queue))
        self._event_listener_tasks.add(task)

        await self.producer.send(
            ConsumeEventsCommand(
                events=listener.events,
                destination_queue=queue,
                listener_name=listener.name,
                options=listener.options,
            )).wait()
コード例 #5
0
ファイル: tests_testing.py プロジェクト: imlzg/lightbus
async def test_queue_mock_context_async():
    queue = InternalQueue()

    with QueueMockContext(queue) as m:
        await queue.put(1)
        await queue.put(2)
        await queue.get()

    assert m.put_items == [1, 2]
    assert m.got_items == [1]
コード例 #6
0
ファイル: test_internal_queue.py プロジェクト: imlzg/lightbus
async def test_internal_queue_put_delay():
    queue = InternalQueue(maxsize=1)
    queue.put_nowait(False)
    task = asyncio.create_task(queue.put(True))

    await asyncio.sleep(0.001)
    assert queue.get_nowait() is False
    await asyncio.sleep(0.001)
    assert task.done()
    assert queue.get_nowait() is True
コード例 #7
0
ファイル: tests_testing.py プロジェクト: imlzg/lightbus
def test_queue_mock_context_sync():
    queue = InternalQueue()

    with QueueMockContext(queue) as m:
        queue.put_nowait(1)
        queue.put_nowait(2)
        queue.get_nowait()

    assert m.put_items == [1, 2]
    assert m.got_items == [1]
コード例 #8
0
ファイル: test_internal_queue.py プロジェクト: imlzg/lightbus
def test_internal_thread_safety(start_order, maxsize, num_threads):
    num_consumer_threads, num_producer_threads = num_threads
    queue = InternalQueue(maxsize=maxsize)
    consumer_counter = Semaphore(value=0)
    producer_counter = Semaphore(value=0)

    total_expected_items = num_producer_threads * 1000
    total_per_consumer = total_expected_items // num_consumer_threads

    async def consumer(q: InternalQueue):
        try:
            for _ in range(0, total_per_consumer):
                await q.get()
                consumer_counter.release()
        except Exception as e:
            logging.exception(e)

    async def producer(q: InternalQueue):
        try:
            for _ in range(0, 1000):
                await q.put(1)
                producer_counter.release()
        except Exception as e:
            logging.exception(e)

    consumer_factory = lambda: partial(asyncio.run, consumer(queue))
    producer_factory = lambda: partial(asyncio.run, producer(queue))

    consumers = [Thread(target=consumer_factory()) for _ in range(0, num_consumer_threads)]
    producers = [Thread(target=producer_factory()) for _ in range(0, num_producer_threads)]

    if start_order == "consumer_first":
        threads = consumers + producers
    else:
        threads = producers + consumers

    for t in threads:
        t.start()

    for t in threads:
        t.join()

    assert consumer_counter._value == total_expected_items
    assert producer_counter._value == total_expected_items
コード例 #9
0
    async def consume(
        self,
        listen_for: List[Tuple[str, str]],
        listener_name: str,
        error_queue: ErrorQueueType,
        since: Union[Since, Sequence[Since]] = "$",
        forever=True,
    ) -> AsyncGenerator[List[RedisEventMessage], None]:
        """Consume events for the given APIs"""
        self._sanity_check_listen_for(listen_for)

        consumer_group = f"{self.service_name}-{listener_name}"

        if not isinstance(since, (list, tuple)):
            # Since has been specified as a single value. Normalise it into
            # the value-per-listener format.
            since = [since] * len(listen_for)
        since = map(normalise_since_value, since)

        stream_names = self._get_stream_names(listen_for)
        # Keys are stream names, values as the latest ID consumed from that stream
        streams = OrderedDict(zip(stream_names, since))
        expected_events = {event_name for _, event_name in listen_for}

        logger.debug(
            LBullets(
                L(
                    "Consuming events as consumer {} in group {} on streams",
                    Bold(self.consumer_name),
                    Bold(consumer_group),
                ),
                items={"{} ({})".format(*v)
                       for v in streams.items()},
            ))

        # Cleanup any old groups & consumers
        await self._cleanup(stream_names)

        # Here we use a queue to combine messages coming from both the
        # fetch messages loop and the reclaim messages loop.
        queue = InternalQueue(maxsize=1)
        initial_reclaiming_complete = asyncio.Event()

        async def consume_loop():
            """Regular event consuming. See _fetch_new_messages()"""
            logger.debug(
                "Will begin consuming events once the initial event reclaiming is complete"
            )
            await initial_reclaiming_complete.wait()
            logger.debug(
                "Event reclaiming is complete, beginning to consume events")

            async for messages in self._fetch_new_messages(
                    streams, consumer_group, expected_events, forever):
                await queue.put(messages)
                # Wait for the queue to empty before getting trying to get another message
                await queue.join()

        retry_consume_loop = retry_on_redis_connection_failure(
            fn=consume_loop,
            retry_delay=self.consumption_restart_delay,
            action="consuming events")

        async def reclaim_loop():
            """
            Reclaim messages which other consumers have failed to
            processes in reasonable time. See _reclaim_lost_messages()
            """
            while True:
                logger.debug("Checking for any events which need reclaiming")
                async for messages in self._reclaim_lost_messages(
                        stream_names, consumer_group, expected_events):
                    await queue.put(messages)
                    # Wait for the queue to empty before getting trying to get another message
                    await queue.join()

                initial_reclaiming_complete.set()
                await asyncio.sleep(self.reclaim_interval)

        consume_task = None
        reclaim_task = None

        try:
            # Run the two above coroutines in their own tasks
            consume_task = asyncio.ensure_future(
                queue_exception_checker(retry_consume_loop, error_queue))
            reclaim_task = asyncio.ensure_future(
                queue_exception_checker(reclaim_loop(), error_queue))

            while True:
                try:
                    messages = await queue.get()
                    logger.debug(
                        f"Got batch of {len(messages)} message(s). Yielding messages to Lightbus"
                        " client")
                    yield messages
                    logger.debug(
                        f"Batch of {len(messages)} message(s) was processed by Lightbus client."
                        " Marking as done.")
                    queue.task_done()
                except GeneratorExit:
                    return
        finally:
            # Make sure we cleanup the tasks we created
            await cancel(consume_task, reclaim_task)
コード例 #10
0
    async def call_rpc_remote(
        self, api_name: str, name: str, kwargs: dict = frozendict(), options: dict = frozendict()
    ):
        """ Perform an RPC call

        Call an RPC and return the result.
        """
        kwargs = deform_to_bus(kwargs)
        rpc_message = RpcMessage(api_name=api_name, procedure_name=name, kwargs=kwargs)
        validate_event_or_rpc_name(api_name, "rpc", name)

        logger.info("📞  Calling remote RPC {}.{}".format(Bold(api_name), Bold(name)))

        start_time = time.time()

        validate_outgoing(self.config, self.schema, rpc_message)

        await self.hook_registry.execute("before_rpc_call", rpc_message=rpc_message)

        result_queue = InternalQueue()

        # Send the RPC
        await self.producer.send(
            commands.CallRpcCommand(message=rpc_message, options=options)
        ).wait()

        # Start a listener which will wait for results
        await self.producer.send(
            commands.ReceiveResultCommand(
                message=rpc_message, destination_queue=result_queue, options=options
            )
        ).wait()

        # Wait for the result from the listener we started.
        # The RpcResultDock will handle timeouts
        result = await bail_on_error(self.error_queue, result_queue.get())

        call_time = time.time() - start_time

        try:
            if isinstance(result, Exception):
                raise result
        except asyncio.TimeoutError:
            raise LightbusTimeout(
                f"Timeout when calling RPC {rpc_message.canonical_name} after waiting for {human_time(call_time)}. "
                f"It is possible no Lightbus process is serving this API, or perhaps it is taking "
                f"too long to process the request. In which case consider raising the 'rpc_timeout' "
                f"config option."
            ) from None
        else:
            assert isinstance(result, ResultMessage)
            result_message = result

        await self.hook_registry.execute(
            "after_rpc_call", rpc_message=rpc_message, result_message=result_message
        )

        if not result_message.error:
            logger.info(
                L(
                    "🏁  Remote call of {} completed in {}",
                    Bold(rpc_message.canonical_name),
                    human_time(call_time),
                )
            )
        else:
            logger.warning(
                L(
                    "⚡ Error during remote call of RPC {}. Took {}: {}",
                    Bold(rpc_message.canonical_name),
                    human_time(call_time),
                    result_message.result,
                )
            )
            raise LightbusWorkerError(
                "Error while calling {}: {}\nRemote stack trace:\n{}".format(
                    rpc_message.canonical_name, result_message.result, result_message.trace
                )
            )

        validate_incoming(self.config, self.schema, result_message)

        return result_message.result
コード例 #11
0
ファイル: test_utilities_unit.py プロジェクト: imlzg/lightbus
def error_queue():
    return InternalQueue()
コード例 #12
0
ファイル: test_internal_queue.py プロジェクト: imlzg/lightbus
async def test_internal_queue_get_nowait():
    queue = InternalQueue()
    await queue.put(True)
    assert queue.get_nowait() is True
コード例 #13
0
ファイル: test_internal_queue.py プロジェクト: imlzg/lightbus
async def test_internal_queue_get_nowait_empty():
    queue = InternalQueue()
    with pytest.raises(QueueEmpty):
        queue.get_nowait()
コード例 #14
0
ファイル: test_internal_queue.py プロジェクト: imlzg/lightbus
async def test_internal_queue_put_nowait_full():
    queue = InternalQueue(maxsize=1)
    queue.put_nowait(True)
    with pytest.raises(QueueFull):
        queue.put_nowait(True)
コード例 #15
0
ファイル: test_internal_queue.py プロジェクト: imlzg/lightbus
async def test_internal_queue_put_nowait():
    queue = InternalQueue()
    queue.put_nowait(True)
コード例 #16
0
ファイル: test_internal_queue.py プロジェクト: imlzg/lightbus
async def test_internal_queue_put():
    queue = InternalQueue()
    await queue.put(True)
コード例 #17
0
ファイル: creation.py プロジェクト: imlzg/lightbus
def create(
    config: Union[dict, RootConfig] = None,
    *,
    config_file: str = None,
    service_name: str = None,
    process_name: str = None,
    features: List[Union[Feature, str]] = ALL_FEATURES,
    client_class: Type[BusClient] = BusClient,
    node_class: Type[BusPath] = BusPath,
    plugins=None,
    flask: bool = False,
    **kwargs,
) -> BusPath:
    """
    Create a new bus instance which can be used to access the bus.

    Typically this will be used as follows:

        import lightbus

        bus = lightbus.create()

    This will be a `BusPath` instance. If you wish to access the lower
    level `BusClient` you can do so via `bus.client`.

    Args:
        config (dict, Config): The config object or dictionary to load
        config_file (str): The path to a config file to load (should end in .json or .yaml)
        service_name (str): The name of this service - will be used when creating event consumer groups
        process_name (str): The unique name of this process - used when retrieving unprocessed events following a crash
        client_class (Type[BusClient]): The class from which the bus client will be instantiated
        node_class (BusPath): The class from which the bus path will be instantiated
        plugins (list): A list of plugin instances to load
        flask (bool): Are we using flask? If so we will make sure we don't start lightbus in the reloader process
        **kwargs (): Any additional instantiation arguments to be passed to `client_class`.

    Returns: BusPath

    """
    if flask:
        in_flask_server = sys.argv[0].endswith("flask") and "run" in sys.argv
        if in_flask_server and os.environ.get("WERKZEUG_RUN_MAIN", "").lower() != "true":
            # Flask has a reloader process that shouldn't start a lightbus client
            return

    # Ensure an event loop exists, as creating InternalQueue
    # objects requires that we have one.
    get_event_loop()

    # If were are running via the Lightbus CLI then we may have
    # some command line arguments we need to apply.
    # pylint: disable=cyclic-import,import-outside-toplevel
    from lightbus.commands import COMMAND_PARSED_ARGS

    config_file = COMMAND_PARSED_ARGS.get("config_file", None) or config_file
    service_name = COMMAND_PARSED_ARGS.get("service_name", None) or service_name
    process_name = COMMAND_PARSED_ARGS.get("process_name", None) or process_name

    if config is None:
        config = load_config(
            from_file=config_file, service_name=service_name, process_name=process_name
        )

    if isinstance(config, Mapping):
        config = Config.load_dict(config or {})
    elif isinstance(config, RootConfig):
        config = Config(config)

    transport_registry = kwargs.pop("transport_registry", None) or TransportRegistry().load_config(
        config
    )

    schema = Schema(
        schema_transport=transport_registry.get_schema_transport(),
        max_age_seconds=config.bus().schema.ttl,
        human_readable=config.bus().schema.human_readable,
    )

    error_queue: ErrorQueueType = InternalQueue()

    # Plugin registry

    plugin_registry = PluginRegistry()
    if plugins is None:
        logger.debug("Auto-loading any installed Lightbus plugins...")
        plugin_registry.autoload_plugins(config)
    else:
        logger.debug("Loading explicitly specified Lightbus plugins....")
        plugin_registry.set_plugins(plugins)

    # Hook registry

    hook_registry = HookRegistry(
        error_queue=error_queue, execute_plugin_hooks=plugin_registry.execute_hook
    )

    # API registry

    api_registry = ApiRegistry()
    api_registry.add(LightbusStateApi())
    api_registry.add(LightbusMetricsApi())

    events_queue_client_to_dock = InternalQueue()
    events_queue_dock_to_client = InternalQueue()

    event_client = EventClient(
        api_registry=api_registry,
        hook_registry=hook_registry,
        config=config,
        schema=schema,
        error_queue=error_queue,
        consume_from=events_queue_dock_to_client,
        produce_to=events_queue_client_to_dock,
    )

    event_dock = EventDock(
        transport_registry=transport_registry,
        api_registry=api_registry,
        config=config,
        error_queue=error_queue,
        consume_from=events_queue_client_to_dock,
        produce_to=events_queue_dock_to_client,
    )

    rpcs_queue_client_to_dock = InternalQueue()
    rpcs_queue_dock_to_client = InternalQueue()

    rpc_result_client = RpcResultClient(
        api_registry=api_registry,
        hook_registry=hook_registry,
        config=config,
        schema=schema,
        error_queue=error_queue,
        consume_from=rpcs_queue_dock_to_client,
        produce_to=rpcs_queue_client_to_dock,
    )

    rpc_result_dock = RpcResultDock(
        transport_registry=transport_registry,
        api_registry=api_registry,
        config=config,
        error_queue=error_queue,
        consume_from=rpcs_queue_client_to_dock,
        produce_to=rpcs_queue_dock_to_client,
    )

    client = client_class(
        config=config,
        hook_registry=hook_registry,
        plugin_registry=plugin_registry,
        features=features,
        schema=schema,
        api_registry=api_registry,
        event_client=event_client,
        rpc_result_client=rpc_result_client,
        error_queue=error_queue,
        transport_registry=transport_registry,
        **kwargs,
    )

    # Pass the client to any hooks
    # (use a weakref to prevent circular references)
    hook_registry.set_extra_parameter("client", weakref.proxy(client))

    # We don't do this normally as the docks do not need to be
    # accessed directly, but this is useful in testing
    # TODO: Testing flag removed, but these are only needed in testing.
    #       Perhaps wrap them up in a way that makes this obvious
    client.event_dock = event_dock
    client.rpc_result_dock = rpc_result_dock

    log_welcome_message(
        logger=logger,
        transport_registry=transport_registry,
        schema=schema,
        plugin_registry=plugin_registry,
        config=config,
    )

    return node_class(name="", parent=None, client=client)
コード例 #18
0
ファイル: conftest.py プロジェクト: C0DK/lightbus
def error_queue():
    queue = InternalQueue()
    yield queue
    assert queue.qsize() == 0, f"Errors found in error queue: {queue._queue}"