Пример #1
0
def test_remove_all_plugins():
    assert get_plugins() == OrderedDict()
    manually_set_plugins(OrderedDict([
        ('p1', LightbusPlugin()),
    ]))
    remove_all_plugins()
    assert get_plugins() == OrderedDict()
Пример #2
0
async def test_send_event(dummy_bus: BusPath, get_dummy_events):
    manually_set_plugins(
        plugins={
            "metrics": MetricsPlugin(service_name="foo", process_name="bar")
        })
    registry.add(TestApi())
    await dummy_bus.example.test.my_event.fire_async(f=123)

    # What events were fired?
    event_messages = get_dummy_events()
    assert len(
        event_messages
    ) == 2  # First is the actual event, followed by the metrics event

    # rpc_response_received
    assert event_messages[1].api_name == "internal.metrics"
    assert event_messages[1].event_name == "event_fired"
    assert event_messages[1].kwargs.pop("timestamp")
    assert event_messages[1].kwargs == {
        "api_name": "example.test",
        "event_name": "my_event",
        "event_id": "event_id",
        "kwargs": {
            "f": 123
        },
        "service_name": "foo",
        "process_name": "bar",
    }
Пример #3
0
def test_is_plugin_loaded():
    assert get_plugins() == OrderedDict()
    assert is_plugin_loaded(LightbusPlugin) == False
    manually_set_plugins(OrderedDict([
        ('p1', LightbusPlugin()),
    ]))
    assert is_plugin_loaded(LightbusPlugin) == True
Пример #4
0
async def test_multiple_event_transports(loop, server, redis_server_b):
    """Configure a bus with two redis transports and ensure they write to the correct redis servers"""
    registry.add(ApiA())
    registry.add(ApiB())

    manually_set_plugins(plugins={})

    redis_server_a = server

    port_a = redis_server_a.tcp_address.port
    port_b = redis_server_b.tcp_address.port

    logging.warning(f"Server A port: {port_a}")
    logging.warning(f"Server B port: {port_b}")

    config = Config.load_dict(
        {
            "bus": {"schema": {"transport": {"redis": {"url": f"redis://localhost:{port_a}"}}}},
            "apis": {
                "default": {
                    "event_transport": {
                        "redis": {
                            "url": f"redis://localhost:{port_a}",
                            "stream_use": StreamUse.PER_EVENT.value,
                        }
                    }
                },
                "api_b": {
                    "event_transport": {
                        "redis": {
                            "url": f"redis://localhost:{port_b}",
                            "stream_use": StreamUse.PER_EVENT.value,
                        }
                    }
                },
            },
        }
    )

    bus = BusPath(name="", parent=None, client=lightbus.BusClient(config=config, loop=loop))
    await asyncio.sleep(0.1)

    await bus.api_a.event_a.fire_async()
    await bus.api_b.event_b.fire_async()

    connection_manager_a = bus.client.transport_registry.get_event_transport(
        "api_a"
    ).connection_manager
    connection_manager_b = bus.client.transport_registry.get_event_transport(
        "api_b"
    ).connection_manager

    with await connection_manager_a() as redis:
        assert await redis.xrange("api_a.event_a:stream")
        assert await redis.xrange("api_b.event_b:stream") == []

    with await connection_manager_b() as redis:
        assert await redis.xrange("api_a.event_a:stream") == []
        assert await redis.xrange("api_b.event_b:stream")
Пример #5
0
async def test_local_rpc_call(loop, dummy_bus: BusPath, consume_rpcs,
                              get_dummy_events, mocker):
    rpc_transport = dummy_bus.client.transport_registry.get_rpc_transport(
        "default")
    mocker.patch.object(
        rpc_transport,
        "_get_fake_messages",
        return_value=[
            RpcMessage(id="123abc",
                       api_name="example.test",
                       procedure_name="my_method",
                       kwargs={"f": 123})
        ],
    )

    # Setup the bus and do the call
    manually_set_plugins(
        plugins={
            "metrics": MetricsPlugin(service_name="foo", process_name="bar")
        })
    registry.add(TestApi())

    task = asyncio.ensure_future(consume_rpcs(dummy_bus), loop=loop)

    # The dummy transport will fire an every every 0.1 seconds
    await asyncio.sleep(0.15)

    await cancel(task)

    event_messages = get_dummy_events()
    assert len(event_messages) == 2, event_messages

    # before_rpc_execution
    assert event_messages[0].api_name == "internal.metrics"
    assert event_messages[0].event_name == "rpc_call_received"
    assert event_messages[0].kwargs.pop("timestamp")
    assert event_messages[0].kwargs == {
        "api_name": "example.test",
        "procedure_name": "my_method",
        "id": "123abc",
        "service_name": "foo",
        "process_name": "bar",
    }

    # after_rpc_execution
    assert event_messages[1].api_name == "internal.metrics"
    assert event_messages[1].event_name == "rpc_response_sent"
    assert event_messages[1].kwargs.pop("timestamp")
    assert event_messages[1].kwargs == {
        "api_name": "example.test",
        "procedure_name": "my_method",
        "id": "123abc",
        "result": "value",
        "service_name": "foo",
        "process_name": "bar",
    }
Пример #6
0
async def test_execute_events(dummy_bus: BusPath, dummy_listener,
                              get_dummy_events, mocker):
    event_transport = dummy_bus.client.transport_registry.get_event_transport(
        "default")
    mocker.patch.object(
        event_transport,
        "_get_fake_message",
        return_value=EventMessage(api_name="example.test",
                                  event_name="my_event",
                                  kwargs={"f": 123}),
    )

    await dummy_listener("example.test", "my_event")

    # Setup the bus and do the call
    manually_set_plugins(
        plugins={
            "metrics": MetricsPlugin(service_name="foo", process_name="bar")
        })
    registry.add(TestApi())

    # The dummy transport will fire an every every 0.1 seconds
    await asyncio.sleep(0.15)

    event_messages = get_dummy_events()
    assert len(event_messages) == 2

    # before_rpc_execution
    assert event_messages[0].api_name == "internal.metrics"
    assert event_messages[0].event_name == "event_received"
    assert event_messages[0].kwargs.pop("timestamp")
    assert event_messages[0].kwargs == {
        "api_name": "example.test",
        "event_name": "my_event",
        "event_id": "event_id",
        "kwargs": {
            "f": 123
        },
        "service_name": "foo",
        "process_name": "bar",
    }

    # after_rpc_execution
    assert event_messages[1].api_name == "internal.metrics"
    assert event_messages[1].event_name == "event_processed"
    assert event_messages[1].kwargs.pop("timestamp")
    assert event_messages[1].kwargs == {
        "api_name": "example.test",
        "event_name": "my_event",
        "event_id": "event_id",
        "kwargs": {
            "f": 123
        },
        "service_name": "foo",
        "process_name": "bar",
    }
Пример #7
0
async def test_random_failures(bus: lightbus.path.BusPath, caplog,
                               fire_dummy_events, dummy_api, mocker):
    # Use test_history() (below) to repeat any cases which fail
    caplog.set_level(logging.WARNING)

    # The metrics plugins will add too much overhead to this test
    manually_set_plugins({})

    event_ok_ids = dict()
    history = []

    async def listener(event_message, field, **kwargs):
        call_id = field
        event_ok_ids.setdefault(call_id, 0)
        event_ok_ids[call_id] += 1
        await asyncio.sleep(0.01)

    fire_task = asyncio.ensure_future(
        fire_dummy_events(total=100, initial_delay=0.1))

    for _ in range(0, 20):
        logging.warning(
            "TEST: Still waiting for events to finish. {} so far".format(
                len(event_ok_ids)))
        for _ in range(0, 5):
            listen_task = asyncio.ensure_future(
                bus.my.dummy.my_event.listen_async(listener))
            await asyncio.sleep(0.2)
            listen_task.cancel()
            await listen_task

        if len(event_ok_ids) == 100:
            logging.warning("TEST: Events finished")
            break

    # Cleanup the tasks
    fire_task.cancel()
    try:
        await fire_task
        fire_task.result()
    except CancelledError:
        pass

    duplicate_calls = sum([n - 1 for n in event_ok_ids.values()])

    logger.warning("History: {}".format(",".join("{}{}".format(*x)
                                                 for x in history)))
    logger.warning(
        "Finished with {}/100 events processed, {} duplicated calls".format(
            len(event_ok_ids), duplicate_calls))

    assert set(event_ok_ids.keys()) == set(range(0, 100))

    assert duplicate_calls > 0
Пример #8
0
def test_manually_set_plugins():
    assert get_plugins() == OrderedDict()
    p1 = LightbusPlugin()
    p2 = LightbusPlugin()
    manually_set_plugins(OrderedDict([
        ('p1', p1),
        ('p2', p2),
    ]))
    assert get_plugins() == OrderedDict([
        ('p1', p1),
        ('p2', p2),
    ])
Пример #9
0
async def test_plugin_hook(mocker):
    """Ensure calling plugin_hook() calls the method on the plugin"""
    assert get_plugins() == OrderedDict()
    plugin = LightbusPlugin()
    manually_set_plugins(OrderedDict([
        ('p1', plugin),
    ]))

    async def dummy_coroutine(*args, **kwargs):
        pass
    m = mocker.patch.object(plugin, 'before_server_start', return_value=dummy_coroutine())

    await plugin_hook('before_server_start', bus_client=None, loop=None)
    assert m.called
Пример #10
0
async def test_execute_events(dummy_bus: BusNode, event_consumer,
                              get_dummy_events, mocker):
    mocker.patch.object(dummy_bus.bus_client.event_transport,
                        '_get_fake_messages',
                        return_value=[
                            EventMessage(api_name='example.test',
                                         event_name='my_event',
                                         kwargs={'f': 123})
                        ])

    # Setup the bus and do the call
    manually_set_plugins(plugins={'metrics': MetricsPlugin()})
    registry.add(TestApi())

    # The dummy transport will fire an every every 0.1 seconds
    await asyncio.sleep(0.15)

    event_messages = get_dummy_events()
    assert len(event_messages) == 2

    # before_rpc_execution
    assert event_messages[0].api_name == 'internal.metrics'
    assert event_messages[0].event_name == 'event_received'
    assert event_messages[0].kwargs.pop('timestamp')
    assert event_messages[0].kwargs.pop('process_name')
    assert event_messages[0].kwargs == {
        'api_name': 'example.test',
        'event_name': 'my_event',
        'event_id': 'event_id',
        'kwargs': {
            'f': 123
        }
    }

    # after_rpc_execution
    assert event_messages[1].api_name == 'internal.metrics'
    assert event_messages[1].event_name == 'event_processed'
    assert event_messages[1].kwargs.pop('timestamp')
    assert event_messages[1].kwargs.pop('process_name')
    assert event_messages[1].kwargs == {
        'api_name': 'example.test',
        'event_name': 'my_event',
        'event_id': 'event_id',
        'kwargs': {
            'f': 123
        }
    }
Пример #11
0
async def test_event_exception_in_listener_realtime(bus: lightbus.path.BusPath,
                                                    dummy_api, redis_client):
    """Start a listener (which errors) and then add events to the stream.
    The listener will load them one-by-one."""
    manually_set_plugins({})
    received_messages = []

    # Don't shutdown on error
    bus.client.config.api("default").on_error = OnError.STOP_LISTENER

    async def listener(event_message, **kwargs):
        nonlocal received_messages
        received_messages.append(event_message)
        raise Exception()

    await bus.my.dummy.my_event.listen_async(listener,
                                             bus_options={"since": "0"})
    await asyncio.sleep(0.1)

    await bus.my.dummy.my_event.fire_async(field="Hello! 😎")
    await bus.my.dummy.my_event.fire_async(field="Hello! 😎")
    await bus.my.dummy.my_event.fire_async(field="Hello! 😎")
    await bus.my.dummy.my_event.fire_async(field="Hello! 😎")
    await bus.my.dummy.my_event.fire_async(field="Hello! 😎")
    await asyncio.sleep(0.01)

    # Died when processing first message, so we only saw one message
    assert len(received_messages) == 1

    # Now check we have not acked any messages

    messages = await redis_client.xrange("my.dummy.my_event:stream")
    # Messages 0 is the noop message used to create the stream
    message_ids = [id_ for id_, *_ in messages]

    pending_messages = await redis_client.xpending("my.dummy.my_event:stream",
                                                   "test_cg-default", "-", "+",
                                                   10, "test_consumer")
    pending_message_ids = [id_ for id_, *_ in pending_messages]
    # The first 4 messages are still pending. Why 4 messages? Because:
    #  - 1. The noop message used to create the stream (because we listened before we fired)
    #  - 2. The first message which caused the error

    assert len(pending_message_ids) == 2
    assert pending_message_ids == message_ids[:2]
Пример #12
0
    def setup(self, plugins: dict=None):
        """Setup lightbus and get it ready to consume events and/or RPCs

        You should call this manually if you are calling `consume_rpcs()` or
        `consume_events()` directly. This you be handled for you if you are
        calling `run_forever()`.
        """
        if plugins is None:
            logger.debug("Auto-loading any installed Lightbus plugins...")
            plugins = autoload_plugins()
        else:
            logger.debug("Loading explicitly specified Lightbus plugins....")
            manually_set_plugins(plugins)

        if plugins:
            logger.info(LBullets("Loaded the following plugins ({})".format(len(plugins)), items=plugins))
        else:
            logger.info("No plugins loaded")
Пример #13
0
async def test_remote_rpc_call(dummy_bus: BusPath, get_dummy_events):
    # Setup the bus and do the call
    manually_set_plugins(
        plugins={
            "metrics": MetricsPlugin(service_name="foo", process_name="bar")
        })
    registry.add(TestApi())
    await dummy_bus.example.test.my_method.call_async(f=123)

    # What events were fired?
    event_messages = get_dummy_events()
    assert len(event_messages) == 2

    # rpc_call_sent
    assert event_messages[0].api_name == "internal.metrics"
    assert event_messages[0].event_name == "rpc_call_sent"
    # Pop these next two as the values are variable
    assert event_messages[0].kwargs.pop("timestamp")
    assert event_messages[0].kwargs.pop("id")
    assert event_messages[0].kwargs == {
        "api_name": "example.test",
        "procedure_name": "my_method",
        "kwargs": {
            "f": 123
        },
        "service_name": "foo",
        "process_name": "bar",
    }

    # rpc_response_received
    assert event_messages[1].api_name == "internal.metrics"
    assert event_messages[1].event_name == "rpc_response_received"
    # Pop these next two as the values are variable
    assert event_messages[1].kwargs.pop("timestamp")
    assert event_messages[1].kwargs.pop("id")
    assert event_messages[1].kwargs == {
        "api_name": "example.test",
        "procedure_name": "my_method",
        "service_name": "foo",
        "process_name": "bar",
    }
Пример #14
0
async def test_listen_to_multiple_events_across_multiple_transports(loop, server, redis_server_b):
    registry.add(ApiA())
    registry.add(ApiB())

    manually_set_plugins(plugins={})

    redis_server_a = server

    port_a = redis_server_a.tcp_address.port
    port_b = redis_server_b.tcp_address.port

    logging.warning(f"Server A port: {port_a}")
    logging.warning(f"Server B port: {port_b}")

    config = Config.load_dict(
        {
            "bus": {"schema": {"transport": {"redis": {"url": f"redis://localhost:{port_a}"}}}},
            "apis": {
                "default": {"event_transport": {"redis": {"url": f"redis://localhost:{port_a}"}}},
                "api_b": {"event_transport": {"redis": {"url": f"redis://localhost:{port_b}"}}},
            },
        }
    )

    bus = BusPath(name="", parent=None, client=lightbus.BusClient(config=config, loop=loop))
    await asyncio.sleep(0.1)

    calls = 0

    def listener(*args, **kwargs):
        nonlocal calls
        calls += 1

    await bus.listen_multiple_async([bus.api_a.event_a, bus.api_b.event_b], listener=listener)

    await asyncio.sleep(0.1)
    await bus.api_a.event_a.fire_async()
    await bus.api_b.event_b.fire_async()
    await asyncio.sleep(0.1)

    assert calls == 2
Пример #15
0
async def test_event(bus: lightbus.path.BusPath, dummy_api, stream_use):
    """Full event integration test"""
    bus.client.transport_registry.get_event_transport("default").stream_use = stream_use
    manually_set_plugins({})
    received_messages = []

    async def listener(event_message, **kwargs):
        nonlocal received_messages
        received_messages.append(event_message)

    await bus.my.dummy.my_event.listen_async(listener)
    await asyncio.sleep(0.01)
    await bus.my.dummy.my_event.fire_async(field="Hello! 😎")
    await asyncio.sleep(0.01)

    # await asyncio.gather(co_fire_event(), co_listen_for_events())
    assert len(received_messages) == 1
    assert received_messages[0].kwargs == {"field": "Hello! 😎"}
    assert received_messages[0].api_name == "my.dummy"
    assert received_messages[0].event_name == "my_event"
    assert received_messages[0].native_id
Пример #16
0
async def test_multiple_rpc_transports(loop, server, redis_server_b, consume_rpcs):
    """Configure a bus with two redis transports and ensure they write to the correct redis servers"""
    registry.add(ApiA())
    registry.add(ApiB())

    manually_set_plugins(plugins={})

    redis_server_a = server

    port_a = redis_server_a.tcp_address.port
    port_b = redis_server_b.tcp_address.port

    logging.warning(f"Server A port: {port_a}")
    logging.warning(f"Server B port: {port_b}")

    config = Config.load_dict(
        {
            "bus": {"schema": {"transport": {"redis": {"url": f"redis://localhost:{port_a}"}}}},
            "apis": {
                "default": {
                    "rpc_transport": {"redis": {"url": f"redis://localhost:{port_a}"}},
                    "result_transport": {"redis": {"url": f"redis://localhost:{port_a}"}},
                },
                "api_b": {
                    "rpc_transport": {"redis": {"url": f"redis://localhost:{port_b}"}},
                    "result_transport": {"redis": {"url": f"redis://localhost:{port_b}"}},
                },
            },
        }
    )

    bus = BusPath(name="", parent=None, client=lightbus.BusClient(config=config, loop=loop))
    asyncio.ensure_future(consume_rpcs(bus))
    await asyncio.sleep(0.1)

    await bus.api_a.rpc_a.call_async()
    await bus.api_b.rpc_b.call_async()
Пример #17
0
async def test_event_exception_in_listener_batch_fetch(
        bus: lightbus.path.BusPath, dummy_api, redis_client):
    """Add a number of events to a stream the startup a listener which errors.
    The listener will fetch them all at once."""
    manually_set_plugins({})
    received_messages = []

    # Don't shutdown on error
    bus.client.config.api("default").on_error = OnError.STOP_LISTENER

    async def listener(event_message, **kwargs):
        nonlocal received_messages
        received_messages.append(event_message)
        raise Exception()

    await bus.my.dummy.my_event.fire_async(field="Hello! 😎")
    await bus.my.dummy.my_event.fire_async(field="Hello! 😎")
    await bus.my.dummy.my_event.fire_async(field="Hello! 😎")

    await bus.my.dummy.my_event.listen_async(listener,
                                             bus_options={"since": "0"})
    await asyncio.sleep(0.1)

    # Died when processing first message, so we only saw one message
    assert len(received_messages) == 1

    # Now check we have not acked any of them

    messages = await redis_client.xrange("my.dummy.my_event:stream")
    # No message0 here because the stream already exists (because we've just added events to it)
    message1_id, message2_id, message3_id = [id_ for id_, *_ in messages]

    pending_messages = await redis_client.xpending("my.dummy.my_event:stream",
                                                   "test_cg-default", "-", "+",
                                                   10, "test_consumer")

    assert len(pending_messages) == 3
Пример #18
0
async def test_send_event(dummy_bus: BusNode, get_dummy_events):
    manually_set_plugins(plugins={'metrics': MetricsPlugin()})
    registry.add(TestApi())
    await dummy_bus.example.test.my_event.fire_async(f=123)

    # What events were fired?
    event_messages = get_dummy_events()
    assert len(
        event_messages
    ) == 2  # First is the actual event, followed by the metrics event

    # rpc_response_received
    assert event_messages[1].api_name == 'internal.metrics'
    assert event_messages[1].event_name == 'event_fired'
    assert event_messages[1].kwargs.pop('timestamp')
    assert event_messages[1].kwargs.pop('process_name')
    assert event_messages[1].kwargs == {
        'api_name': 'example.test',
        'event_name': 'my_event',
        'event_id': 'event_id',
        'kwargs': {
            'f': 123
        }
    }
Пример #19
0
async def test_remote_rpc_call(dummy_bus: BusNode, get_dummy_events):
    # Setup the bus and do the call
    manually_set_plugins(plugins={'metrics': MetricsPlugin()})
    registry.add(TestApi())
    await dummy_bus.example.test.my_method.call_async(f=123)

    # What events were fired?
    event_messages = get_dummy_events()
    assert len(event_messages) == 2

    # rpc_call_sent
    assert event_messages[0].api_name == 'internal.metrics'
    assert event_messages[0].event_name == 'rpc_call_sent'
    # Pop these next two as the values are variable
    assert event_messages[0].kwargs.pop('timestamp')
    assert event_messages[0].kwargs.pop('rpc_id')
    assert event_messages[0].kwargs.pop('process_name')
    assert event_messages[0].kwargs == {
        'api_name': 'example.test',
        'procedure_name': 'my_method',
        'kwargs': {
            'f': 123
        },
    }

    # rpc_response_received
    assert event_messages[1].api_name == 'internal.metrics'
    assert event_messages[1].event_name == 'rpc_response_received'
    # Pop these next two as the values are variable
    assert event_messages[1].kwargs.pop('timestamp')
    assert event_messages[1].kwargs.pop('rpc_id')
    assert event_messages[1].kwargs.pop('process_name')
    assert event_messages[1].kwargs == {
        'api_name': 'example.test',
        'procedure_name': 'my_method',
    }
Пример #20
0
 def do_add_base_plugin():
     manually_set_plugins(plugins={"base": LightbusPlugin()})
Пример #21
0
def test_is_plugin_loaded():
    assert get_plugins() is None
    assert is_plugin_loaded(LightbusPlugin) == False
    manually_set_plugins(OrderedDict([("p1", LightbusPlugin())]))
    assert is_plugin_loaded(LightbusPlugin) == True
Пример #22
0
def test_remove_all_plugins():
    assert get_plugins() is None
    manually_set_plugins(OrderedDict([("p1", LightbusPlugin())]))
    remove_all_plugins()
    assert get_plugins() is None
Пример #23
0
def test_manually_set_plugins():
    assert get_plugins() is None
    p1 = LightbusPlugin()
    p2 = LightbusPlugin()
    manually_set_plugins(OrderedDict([("p1", p1), ("p2", p2)]))
    assert get_plugins() == OrderedDict([("p1", p1), ("p2", p2)])
Пример #24
0
    async def setup_async(self, plugins: dict = None):
        """Setup lightbus and get it ready to consume events and/or RPCs

        You should call this manually if you are calling `consume_rpcs()`
        directly. This you be handled for you if you are
        calling `run_forever()`.
        """
        logger.info(
            LBullets(
                "Lightbus is setting up",
                items={
                    "service_name (set with -s or LIGHTBUS_SERVICE_NAME)":
                    Bold(self.config.service_name),
                    "process_name (with with -p or LIGHTBUS_PROCESS_NAME)":
                    Bold(self.config.process_name),
                },
            ))

        # Log the transport information
        rpc_transport = self.transport_registry.get_rpc_transport("default",
                                                                  default=None)
        result_transport = self.transport_registry.get_result_transport(
            "default", default=None)
        event_transport = self.transport_registry.get_event_transport(
            "default", default=None)
        log_transport_information(rpc_transport, result_transport,
                                  event_transport,
                                  self.schema.schema_transport, logger)

        # Log the plugins we have
        if plugins is None:
            logger.debug("Auto-loading any installed Lightbus plugins...")
            # Force auto-loading as many commands need to do config-less best-effort
            # plugin loading. But now we have the config loaded so we can
            # make sure we load the plugins properly.
            plugins = autoload_plugins(self.config, force=True)
        else:
            logger.debug("Loading explicitly specified Lightbus plugins....")
            manually_set_plugins(plugins)

        if plugins:
            logger.info(
                LBullets("Loaded the following plugins ({})".format(
                    len(plugins)),
                         items=plugins))
        else:
            logger.info("No plugins loaded")

        # Load schema
        logger.debug("Loading schema...")
        await self.schema.load_from_bus()

        # Share the schema of the registered APIs
        for api in registry.all():
            await self.schema.add_api(api)

        logger.info(
            LBullets(
                "Loaded the following remote schemas ({})".format(
                    len(self.schema.remote_schemas)),
                items=self.schema.remote_schemas.keys(),
            ))

        for transport in self.transport_registry.get_all_transports():
            await transport.open()