Exemplo n.º 1
0
    async def send_event(self, event_message: EventMessage, options: dict,
                         bus_client: "BusClient"):
        """Publish an event"""
        stream = self._get_stream_names(
            listen_for=[(event_message.api_name, event_message.event_name)])[0]

        logger.debug(
            LBullets(
                L(
                    "Enqueuing event message {} in Redis stream {}",
                    Bold(event_message),
                    Bold(stream),
                ),
                items=dict(**event_message.get_metadata(),
                           kwargs=event_message.get_kwargs()),
            ))

        # Performance: I suspect getting a connection from the connection manager each time is causing
        # performance issues. Need to confirm.
        with await self.connection_manager() as redis:
            start_time = time.time()
            await redis.xadd(
                stream=stream,
                fields=self.serializer(event_message),
                max_len=self.max_stream_length or None,
                exact_len=False,
            )

        logger.debug(
            L(
                "Enqueued event message {} in Redis in {} stream {}",
                Bold(event_message),
                human_time(time.time() - start_time),
                Bold(stream),
            ))
Exemplo n.º 2
0
async def test_event_execution(track_called_hooks, new_bus, worker: Worker,
                               loop, add_base_plugin, dummy_api):
    add_base_plugin()
    bus = new_bus()
    bus.client.register_api(dummy_api)
    hook_tracker = track_called_hooks(bus.client)

    bus.client.listen_for_event("my.dummy",
                                "my_event",
                                lambda *a, **kw: None,
                                listener_name="test")

    async with worker(bus):
        await asyncio.sleep(0.1)

        # Send the event message using a lower-level API to avoid triggering the
        # before_event_sent & after_event_sent plugin hooks. We don't care about those here
        event_message = EventMessage(api_name="my.dummy",
                                     event_name="my_event",
                                     kwargs={"field": "a"})
        event_transport = bus.client.transport_registry.get_event_transport(
            "default")
        await event_transport.send_event(event_message, options={})
        await asyncio.sleep(0.1)

    # FYI: There is a chance of events firing twice (because the dummy_bus keeps firing events),
    assert "before_event_execution" in hook_tracker()
    assert "after_event_execution" in hook_tracker()
Exemplo n.º 3
0
async def test_event_execution(called_hooks, dummy_bus: BusPath, loop,
                               add_base_plugin, dummy_api):
    add_base_plugin()
    await dummy_bus.client.register_api_async(dummy_api)

    await dummy_bus.client.listen_for_event("my.dummy",
                                            "my_event",
                                            lambda *a, **kw: None,
                                            listener_name="test")
    await asyncio.sleep(0.1)

    # Send the event message using a lower-level API to avoid triggering the
    # before_event_sent & after_event_sent plugin hooks. We don't care about those here
    event_message = EventMessage(api_name="my.dummy",
                                 event_name="my_event",
                                 kwargs={"field": 1})
    event_transport = dummy_bus.client.transport_registry.get_event_transport(
        "default")
    await event_transport.send_event(event_message,
                                     options={},
                                     bus_client=None)
    await asyncio.sleep(0.1)

    # There is a chance of events firing twice (because the dummy_bus keeps firing events),
    # so cast to the lists to sets first before comparing
    assert set(
        called_hooks()) == {"before_event_execution", "after_event_execution"}
Exemplo n.º 4
0
async def test_event_execution(called_hooks, dummy_bus: BusPath, loop,
                               add_base_plugin, dummy_api):
    add_base_plugin()

    task = await dummy_bus.client.listen_for_event("my.dummy", "my_event",
                                                   lambda *a, **kw: None)
    await asyncio.sleep(0.1)

    # Send the event message using a lower-level API to avoid triggering the
    # before_event_sent & after_event_sent plugin hooks. We don't care about those here
    event_message = EventMessage(api_name="my.dummy",
                                 event_name="my_event",
                                 kwargs={"field": 1})
    event_transport = dummy_bus.client.transport_registry.get_event_transport(
        "default")
    await event_transport.send_event(event_message, options={})
    await asyncio.sleep(0.1)

    assert called_hooks() == [
        "before_event_execution", "after_event_execution"
    ]
    task.cancel()
    try:
        await task
    except asyncio.CancelledError:
        pass
Exemplo n.º 5
0
 async def after_server_stopped(self, *, bus_client: BusClient, loop):
     await bus_client.event_transport.send_event(
         EventMessage(api_name='internal.state',
                      event_name='server_stopped',
                      kwargs=dict(process_name=bus_client.process_name, )),
         options={},
     )
Exemplo n.º 6
0
 async def _send_ping(self, bus_client: BusClient):
     while True:
         await asyncio.sleep(self.ping_interval)
         await bus_client.event_transport.send_event(
             EventMessage(api_name='internal.state',
                          event_name='server_ping',
                          kwargs=self.get_state_kwargs(bus_client)),
             options={},
         )
Exemplo n.º 7
0
async def test_send_event_return_value(redis_event_transport: RedisEventTransport, redis_client):
    event_message = await redis_event_transport.send_event(
        EventMessage(api_name="my.api", event_name="my_event", id="123", kwargs={"field": "value"}),
        options={},
    )
    assert isinstance(event_message, EventMessage)
    assert event_message.id
    assert event_message.native_id
    assert re.match(r"\d+-0", event_message.native_id)
Exemplo n.º 8
0
def test_blob_serializer():
    serializer = BlobMessageSerializer()
    serialized = serializer(
        EventMessage(api_name="my.api", event_name="my_event", id="123", kwargs={"field": "value"})
    )
    assert json.loads(serialized) == {
        "metadata": {"api_name": "my.api", "event_name": "my_event", "id": "123", "version": 1},
        "kwargs": {"field": "value"},
    }
Exemplo n.º 9
0
async def test_execute_events(dummy_bus: BusPath, dummy_listener,
                              get_dummy_events, mocker):
    event_transport = dummy_bus.client.transport_registry.get_event_transport(
        "default")
    mocker.patch.object(
        event_transport,
        "_get_fake_message",
        return_value=EventMessage(api_name="example.test",
                                  event_name="my_event",
                                  kwargs={"f": 123}),
    )

    await dummy_listener("example.test", "my_event")

    # Setup the bus and do the call
    manually_set_plugins(
        plugins={
            "metrics": MetricsPlugin(service_name="foo", process_name="bar")
        })
    registry.add(TestApi())

    # The dummy transport will fire an every every 0.1 seconds
    await asyncio.sleep(0.15)

    event_messages = get_dummy_events()
    assert len(event_messages) == 2

    # before_rpc_execution
    assert event_messages[0].api_name == "internal.metrics"
    assert event_messages[0].event_name == "event_received"
    assert event_messages[0].kwargs.pop("timestamp")
    assert event_messages[0].kwargs == {
        "api_name": "example.test",
        "event_name": "my_event",
        "event_id": "event_id",
        "kwargs": {
            "f": 123
        },
        "service_name": "foo",
        "process_name": "bar",
    }

    # after_rpc_execution
    assert event_messages[1].api_name == "internal.metrics"
    assert event_messages[1].event_name == "event_processed"
    assert event_messages[1].kwargs.pop("timestamp")
    assert event_messages[1].kwargs == {
        "api_name": "example.test",
        "event_name": "my_event",
        "event_id": "event_id",
        "kwargs": {
            "f": 123
        },
        "service_name": "foo",
        "process_name": "bar",
    }
Exemplo n.º 10
0
    async def send_event(self, event_message: EventMessage, options: dict):
        """Publish an event"""
        stream = '{}.{}:stream'.format(event_message.api_name, event_message.event_name)
        logger.debug(
            LBullets(
                L("Enqueuing event message {} in Redis stream {}", Bold(event_message), Bold(stream)),
                items=event_message.to_dict()
            )
        )

        pool = await self.get_redis_pool()
        with await pool as redis:
            start_time = time.time()
            # TODO: MAXLEN
            await redis.xadd(stream=stream, fields=encode_message_fields(event_message.to_dict()))

        logger.info(L(
            "Enqueued event message {} in Redis in {} stream {}",
            Bold(event_message), human_time(time.time() - start_time), Bold(stream)
        ))
Exemplo n.º 11
0
 async def after_worker_stopped(self, *, client: "BusClient"):
     event_transport = client.transport_registry.get_event_transport("internal.metrics")
     await event_transport.send_event(
         EventMessage(
             api_name="internal.state",
             event_name="worker_stopped",
             kwargs=dict(process_name=self.process_name, service_name=self.service_name),
         ),
         options={},
     )
     await cancel(self._ping_task)
Exemplo n.º 12
0
 async def _send_ping(self, client: "BusClient"):
     event_transport = client.transport_registry.get_event_transport("internal.metrics")
     while True:
         await asyncio.sleep(self.ping_interval)
         await event_transport.send_event(
             EventMessage(
                 api_name="internal.state",
                 event_name="worker_ping",
                 kwargs=self.get_state_kwargs(client),
             ),
             options={},
         )
Exemplo n.º 13
0
    async def fire_event(self,
                         api_name,
                         name,
                         kwargs: dict = None,
                         options: dict = None):
        kwargs = kwargs or {}
        try:
            api = registry.get(api_name)
        except UnknownApi:
            raise UnknownApi(
                "Lightbus tried to fire the event {api_name}.{name}, but could not find API {api_name} in the "
                "registry. An API being in the registry implies you are an authority on that API. Therefore, "
                "Lightbus requires the API to be in the registry as it is a bad idea to fire "
                "events on behalf of remote APIs. However, this could also be caused by a typo in the "
                "API name or event name, or be because the API class has not been "
                "imported. ".format(**locals()))

        self._validate_name(api_name, "event", name)

        try:
            event = api.get_event(name)
        except EventNotFound:
            raise EventNotFound(
                "Lightbus tried to fire the event {api_name}.{name}, but the API {api_name} does not "
                "seem to contain an event named {name}. You may need to define the event, you "
                "may also be using the incorrect API. Also check for typos.".
                format(**locals()))

        if set(kwargs.keys()) != _parameter_names(event.parameters):
            raise InvalidEventArguments(
                "Invalid event arguments supplied when firing event. Attempted to fire event with "
                "{} arguments: {}. Event expected {}: {}".format(
                    len(kwargs),
                    sorted(kwargs.keys()),
                    len(event.parameters),
                    sorted(_parameter_names(event.parameters)),
                ))

        kwargs = deform_to_bus(kwargs)
        event_message = EventMessage(api_name=api.meta.name,
                                     event_name=name,
                                     kwargs=kwargs)

        self._validate(event_message, "outgoing")

        event_transport = self.transport_registry.get_event_transport(api_name)
        await self._plugin_hook("before_event_sent",
                                event_message=event_message)
        logger.info(
            L("📤  Sending event {}.{}".format(Bold(api_name), Bold(name))))
        await event_transport.send_event(event_message, options=options)
        await self._plugin_hook("after_event_sent",
                                event_message=event_message)
Exemplo n.º 14
0
async def test_reconnect_upon_send_event(
    redis_event_transport: RedisEventTransport, redis_client, get_total_redis_connections
):
    await redis_client.execute(b"CLIENT", b"KILL", b"TYPE", b"NORMAL")
    assert await get_total_redis_connections() == 1

    await redis_event_transport.send_event(
        EventMessage(api_name="my.api", event_name="my_event", id="123", kwargs={"field": "value"}),
        options={},
    )
    messages = await redis_client.xrange("my.api.my_event:stream")
    assert len(messages) == 1
    assert await get_total_redis_connections() == 2
Exemplo n.º 15
0
    async def fire_event(self, api_name, name, kwargs: dict = None, options: dict = None):
        kwargs = kwargs or {}
        try:
            api = self.api_registry.get(api_name)
        except UnknownApi:
            raise UnknownApi(
                "Lightbus tried to fire the event {api_name}.{name}, but no API named {api_name} was found in the "
                "registry. An API being in the registry implies you are an authority on that API. Therefore, "
                "Lightbus requires the API to be in the registry as it is a bad idea to fire "
                "events on behalf of remote APIs. However, this could also be caused by a typo in the "
                "API name or event name, or be because the API class has not been "
                "registered using bus.client.register_api(). ".format(**locals())
            )

        validate_event_or_rpc_name(api_name, "event", name)

        try:
            event = api.get_event(name)
        except EventNotFound:
            raise EventNotFound(
                "Lightbus tried to fire the event {api_name}.{name}, but the API {api_name} does not "
                "seem to contain an event named {name}. You may need to define the event, you "
                "may also be using the incorrect API. Also check for typos.".format(**locals())
            )

        parameter_names = {p.name if isinstance(p, Parameter) else p for p in event.parameters}

        if set(kwargs.keys()) != parameter_names:
            raise InvalidEventArguments(
                "Invalid event arguments supplied when firing event. Attempted to fire event with "
                "{} arguments: {}. Event expected {}: {}".format(
                    len(kwargs),
                    sorted(kwargs.keys()),
                    len(event.parameters),
                    sorted(parameter_names),
                )
            )

        kwargs = deform_to_bus(kwargs)
        event_message = EventMessage(
            api_name=api.meta.name, event_name=name, kwargs=kwargs, version=api.meta.version
        )

        validate_outgoing(self.config, self.schema, event_message)

        await self.hook_registry.execute("before_event_sent", event_message=event_message)
        logger.info(L("📤  Sending event {}.{}".format(Bold(api_name), Bold(name))))

        await self.producer.send(SendEventCommand(message=event_message, options=options)).wait()

        await self.hook_registry.execute("after_event_sent", event_message=event_message)
Exemplo n.º 16
0
async def test_history_get_all_multiple_batches(
    redis_event_transport: RedisEventTransport, redis_client
):
    message = EventMessage(native_id="", api_name="my_api", event_name="my_event")
    data = ByFieldMessageSerializer()(message)
    await redis_client.xadd("my_api.my_event:stream", data, message_id=b"1-0")
    await redis_client.xadd("my_api.my_event:stream", data, message_id=b"2-0")
    await redis_client.xadd("my_api.my_event:stream", data, message_id=b"3-0")
    await redis_client.xadd("my_api.my_event:stream", data, message_id=b"4-0")
    await redis_client.xadd("my_api.my_event:stream", data, message_id=b"5-0")

    messages = redis_event_transport.history("my_api", "my_event", batch_size=2)
    message_ids = {m.native_id async for m in messages}
    assert len(message_ids) == 5
Exemplo n.º 17
0
async def test_send_event(redis_event_transport: RedisEventTransport, redis_client):
    await redis_event_transport.send_event(
        EventMessage(api_name="my.api", event_name="my_event", id="123", kwargs={"field": "value"}),
        options={},
    )
    messages = await redis_client.xrange("my.api.my_event:stream")
    assert len(messages) == 1
    assert messages[0][1] == {
        b"api_name": b"my.api",
        b"event_name": b"my_event",
        b"id": b"123",
        b"version": b"1",
        b":field": b'"value"',
    }
Exemplo n.º 18
0
    def send_event(self, bus_client, event_name_, **kwargs) -> Coroutine:
        """Send an event to the bus

        Note that we bypass using BusClient directly, otherwise we would trigger this
        plugin again thereby causing an infinite loop.
        """
        kwargs.setdefault('timestamp', datetime.utcnow().timestamp())
        kwargs.setdefault('process_name', bus_client.process_name)
        return bus_client.event_transport.send_event(
            EventMessage(api_name='internal.metrics',
                         event_name=event_name_,
                         kwargs=kwargs),
            options={},
        )
Exemplo n.º 19
0
def test_by_field_serializer():
    serializer = ByFieldMessageSerializer()
    serialized = serializer(
        EventMessage(
            api_name="my.api", event_name="my_event", kwargs={"field": "value"}, id="123", version=2
        )
    )
    assert serialized == {
        "api_name": "my.api",
        "event_name": "my_event",
        ":field": '"value"',
        "id": "123",
        "version": 2,
    }
Exemplo n.º 20
0
async def test_send_event(redis_event_transport: RedisEventTransport,
                          redis_client):
    await redis_event_transport.send_event(EventMessage(
        api_name='my.api',
        event_name='my_event',
        kwargs={'field': 'value'},
    ),
                                           options={})
    messages = await redis_client.xrange('my.api.my_event:stream')
    assert len(messages) == 1
    assert messages[0][1] == {
        b'api_name': b'"my.api"',
        b'event_name': b'"my_event"',
        b'kw:field': b'"value"',
    }
Exemplo n.º 21
0
async def test_max_len_set_to_none(
    redis_event_transport: RedisEventTransport, redis_client, caplog
):
    """Make sure the event stream does not get truncated when
    max_stream_length = None
    """
    caplog.set_level(logging.WARNING)
    redis_event_transport.max_stream_length = None
    for x in range(0, 200):
        await redis_event_transport.send_event(
            EventMessage(api_name="my.api", event_name="my_event", kwargs={"field": "value"}),
            options={},
        )
    messages = await redis_client.xrange("my.api.my_event:stream")
    assert len(messages) == 200
Exemplo n.º 22
0
async def test_max_len_truncating(redis_event_transport: RedisEventTransport, redis_client, caplog):
    """Make sure the event stream gets truncated

    Note that truncation is approximate
    """
    caplog.set_level(logging.WARNING)
    redis_event_transport.max_stream_length = 100
    for x in range(0, 200):
        await redis_event_transport.send_event(
            EventMessage(api_name="my.api", event_name="my_event", kwargs={"field": "value"}),
            options={},
        )
    messages = await redis_client.xrange("my.api.my_event:stream")
    assert len(messages) >= 100
    assert len(messages) < 150
Exemplo n.º 23
0
    def send_event(self, client, event_name_, **kwargs) -> Coroutine:
        """Send an event to the bus

        Note that we bypass using BusClient directly, otherwise we would trigger this
        plugin again thereby causing an infinite loop.
        """
        kwargs.setdefault("timestamp", datetime.utcnow().timestamp())
        kwargs.setdefault("service_name", self.service_name)
        kwargs.setdefault("process_name", self.process_name)
        kwargs = deform_to_bus(kwargs)
        event_transport = client.transport_registry.get_event_transport("internal.metrics")
        return event_transport.send_event(
            EventMessage(api_name="internal.metrics", event_name=event_name_, kwargs=kwargs),
            options={},
            bus_client=client,
        )
Exemplo n.º 24
0
async def test_send_event_per_api_stream(redis_event_transport: RedisEventTransport, redis_client):
    redis_event_transport.stream_use = StreamUse.PER_API
    await redis_event_transport.send_event(
        EventMessage(api_name="my.api", event_name="my_event", kwargs={"field": "value"}, id="123"),
        options={},
        bus_client=None,
    )
    messages = await redis_client.xrange("my.api.*:stream")
    assert len(messages) == 1
    assert messages[0][1] == {
        b"api_name": b"my.api",
        b"event_name": b"my_event",
        b"id": b"123",
        b"version": b"1",
        b":field": b'"value"',
    }
Exemplo n.º 25
0
async def test_execute_events(dummy_bus: BusNode, event_consumer,
                              get_dummy_events, mocker):
    mocker.patch.object(dummy_bus.bus_client.event_transport,
                        '_get_fake_messages',
                        return_value=[
                            EventMessage(api_name='example.test',
                                         event_name='my_event',
                                         kwargs={'f': 123})
                        ])

    # Setup the bus and do the call
    manually_set_plugins(plugins={'metrics': MetricsPlugin()})
    registry.add(TestApi())

    # The dummy transport will fire an every every 0.1 seconds
    await asyncio.sleep(0.15)

    event_messages = get_dummy_events()
    assert len(event_messages) == 2

    # before_rpc_execution
    assert event_messages[0].api_name == 'internal.metrics'
    assert event_messages[0].event_name == 'event_received'
    assert event_messages[0].kwargs.pop('timestamp')
    assert event_messages[0].kwargs.pop('process_name')
    assert event_messages[0].kwargs == {
        'api_name': 'example.test',
        'event_name': 'my_event',
        'event_id': 'event_id',
        'kwargs': {
            'f': 123
        }
    }

    # after_rpc_execution
    assert event_messages[1].api_name == 'internal.metrics'
    assert event_messages[1].event_name == 'event_processed'
    assert event_messages[1].kwargs.pop('timestamp')
    assert event_messages[1].kwargs.pop('process_name')
    assert event_messages[1].kwargs == {
        'api_name': 'example.test',
        'event_name': 'my_event',
        'event_id': 'event_id',
        'kwargs': {
            'f': 123
        }
    }
Exemplo n.º 26
0
 async def before_server_start(self, *, bus_client: BusClient, loop):
     await bus_client.event_transport.send_event(
         EventMessage(api_name='internal.state',
                      event_name='server_started',
                      kwargs=self.get_state_kwargs(bus_client)),
         options={},
     )
     if self.do_ping:
         logger.info('Ping messages will be sent every {} seconds'.format(
             self.ping_interval))
         asyncio.ensure_future(handle_aio_exceptions(
             self._send_ping(bus_client)),
                               loop=loop)
     else:
         logger.warning(
             'Ping events have been disabled. This will reduce log volume and bus traffic, but '
             'may result in this Lightbus server not appearing in the Lightbus admin interface.'
         )
Exemplo n.º 27
0
 async def before_worker_start(self, *, client: "BusClient"):
     event_transport = client.transport_registry.get_event_transport("internal.metrics")
     await event_transport.send_event(
         EventMessage(
             api_name="internal.state",
             event_name="worker_started",
             kwargs=self.get_state_kwargs(client),
         ),
         options={},
     )
     if self.ping_enabled:
         logger.info("Ping messages will be sent every {} seconds".format(self.ping_interval))
         self._ping_task = asyncio.ensure_future(self._send_ping(client))
     else:
         logger.warning(
             "Ping events have been disabled. This will reduce log volume and bus traffic, but "
             "may result in this Lightbus worker not appearing in the Lightbus admin interface."
         )
Exemplo n.º 28
0
async def test_history_get_subset_multiple_batches(
    redis_event_transport: RedisEventTransport, redis_client
):
    message = EventMessage(native_id="", api_name="my_api", event_name="my_event")
    data = ByFieldMessageSerializer()(message)
    await redis_client.xadd("my_api.my_event:stream", data, message_id=b"1-0")
    await redis_client.xadd("my_api.my_event:stream", data, message_id=b"2-0")
    await redis_client.xadd("my_api.my_event:stream", data, message_id=b"3-0")
    await redis_client.xadd("my_api.my_event:stream", data, message_id=b"4-0")
    await redis_client.xadd("my_api.my_event:stream", data, message_id=b"5-0")

    messages = redis_event_transport.history(
        "my_api",
        "my_event",
        batch_size=2,
        start=datetime.fromtimestamp(0.002),
        stop=datetime.fromtimestamp(0.004),
    )
    message_ids = {m.native_id async for m in messages}
    assert message_ids == {"2-0", "3-0", "4-0"}
Exemplo n.º 29
0
    async def fetch_events(self) -> Tuple[Sequence[EventMessage], Any]:
        pool = await self.get_redis_pool()
        with await pool as redis:
            if not self._streams:
                logger.debug('Event backend has been given no events to consume. Event backend will sleep.')
                self._task = asyncio.ensure_future(asyncio.sleep(3600 * 24 * 365))
            else:
                logger.info(LBullets(
                    'Consuming events from', items={
                        '{} ({})'.format(*v) for v in self._streams.items()
                    }
                ))
                # TODO: Count/timeout
                self._task = asyncio.ensure_future(
                    redis.xread(
                        streams=list(self._streams.keys()),
                        latest_ids=list(self._streams.values()),
                        count=10,  # TODO: Make configurable, add timeout too
                    )
                )

            try:
                stream_messages = await self._task or []
            except asyncio.CancelledError as e:
                if self._reload:
                    # Streams to listen on have changed.
                    # Bail out and let this method get called again,
                    # at which point we'll pickup the new streams.
                    logger.debug('Event transport reloading.')
                    stream_messages = []
                    self._reload = False
                else:
                    raise

        event_messages = []
        latest_ids = {}
        for stream, message_id, fields in stream_messages:
            stream = decode(stream, 'utf8')
            message_id = decode(message_id, 'utf8')
            decoded_fields = decode_message_fields(fields)

            # Keep track of which event ID we are up to. We will store these
            # in consumption_complete(), once we know the events have definitely
            # been consumed.
            latest_ids[stream] = message_id

            # Unfortunately, these is an edge-case when BOTH:
            #  1. We are consuming events from 'now' (i.e. event ID '$'), the default
            #  2. There is an unhandled error when processing the FIRST batch of events
            # In which case, the next iteration would start again from '$', in which
            # case we would loose events. Therefore 'subtract one' from the message ID
            # and store that immediately. Subtracting one is imprecise, as there is a SLIM
            # chance we could grab another event in the process. However, if events are
            # being consumed from 'now' then the developer presumably doesn't care about
            # a high level of precision.
            if self._streams[stream] == '$':
                self._streams[stream] = redis_stream_id_subtract_one(message_id)

            event_messages.append(
                EventMessage.from_dict(decoded_fields)
            )
            logger.debug(LBullets(
                L("⬅ Received event {} on stream {}", Bold(message_id), Bold(stream)),
                items=decoded_fields
            ))

        return event_messages, latest_ids
Exemplo n.º 30
0
 def _get_fake_messages(self):
     return [
         EventMessage(api_name='my_company.auth',
                      event_name='user_registered', kwargs={'example': 'value'})
     ]