Exemplo n.º 1
0
    async def receive_result(self, rpc_message: RpcMessage, return_path: str,
                             options: dict) -> ResultMessage:
        logger.debug(
            L("Awaiting Redis result for RPC message: {}", Bold(rpc_message)))
        redis_key = self._parse_return_path(return_path)

        with await self.connection_manager() as redis:
            start_time = time.time()
            result = None
            while not result:
                # Sometimes blpop() will return None in the case of timeout or
                # cancellation. We therefore perform this step with a loop to catch
                # this. A more elegant solution is welcome.
                result = await redis.blpop(redis_key, timeout=self.rpc_timeout)
            _, serialized = result

        result_message = self.deserializer(serialized)

        logger.debug(
            L(
                "⬅ Received Redis result in {} for RPC message {}: {}",
                human_time(time.time() - start_time),
                rpc_message,
                Bold(result_message.result),
            ))

        return result_message
Exemplo n.º 2
0
    async def call_rpc(self, rpc_message: RpcMessage, options: dict,
                       bus_client: "BusClient"):
        queue_key = f"{rpc_message.api_name}:rpc_queue"
        expiry_key = f"rpc_expiry_key:{rpc_message.id}"
        logger.debug(
            LBullets(
                L("Enqueuing message {} in Redis list {}", Bold(rpc_message),
                  Bold(queue_key)),
                items=dict(**rpc_message.get_metadata(),
                           kwargs=rpc_message.get_kwargs()),
            ))

        start_time = time.time()
        for try_number in range(3, 0, -1):
            last_try = try_number == 1
            try:
                await self._call_rpc(rpc_message, queue_key, expiry_key)
                return
            except (PipelineError, ConnectionClosedError,
                    ConnectionResetError):
                if not last_try:
                    await asyncio.sleep(self.rpc_retry_delay)
                else:
                    raise

        logger.debug(
            L(
                "Enqueued message {} in Redis in {} stream {}",
                Bold(rpc_message),
                human_time(time.time() - start_time),
                Bold(queue_key),
            ))
Exemplo n.º 3
0
    async def _call_rpc_local(self, api_name: str, name: str, kwargs: dict = frozendict()):
        api = self.api_registry.get(api_name)
        validate_event_or_rpc_name(api_name, "rpc", name)

        start_time = time.time()
        try:
            method = getattr(api, name)
            if self.config.api(api_name).cast_values:
                kwargs = cast_to_signature(kwargs, method)
            result = await run_user_provided_callable(method, args=[], kwargs=kwargs)
        except (asyncio.CancelledError, SuddenDeathException):
            raise
        except Exception as e:
            logging.exception(e)
            logger.warning(
                L(
                    "⚡  Error while executing {}.{}. Took {}",
                    Bold(api_name),
                    Bold(name),
                    human_time(time.time() - start_time),
                )
            )
            raise
        else:
            logger.info(
                L(
                    "⚡  Executed {}.{} in {}",
                    Bold(api_name),
                    Bold(name),
                    human_time(time.time() - start_time),
                )
            )
            return result
Exemplo n.º 4
0
    async def send_event(self, event_message: EventMessage, options: dict):
        """Publish an event"""
        stream = self._get_stream_names(
            listen_for=[(event_message.api_name, event_message.event_name)])[0]

        logger.debug(
            LBullets(
                L(
                    "Enqueuing event message {} in Redis stream {}",
                    Bold(event_message),
                    Bold(stream),
                ),
                items=dict(**event_message.get_metadata(),
                           kwargs=event_message.get_kwargs()),
            ))

        # Performance: I suspect getting a connection from the connection manager each time is causing
        # performance issues. Need to confirm.
        with await self.connection_manager() as redis:
            start_time = time.time()
            await redis.xadd(
                stream=stream,
                fields=self.serializer(event_message),
                max_len=self.max_stream_length or None,
                exact_len=False,
            )

        logger.debug(
            L(
                "Enqueued event message {} in Redis in {} stream {}",
                Bold(event_message.canonical_name),
                human_time(time.time() - start_time),
                Bold(stream),
            ))
Exemplo n.º 5
0
    async def call_rpc_local(self,
                             api_name: str,
                             name: str,
                             kwargs: dict = frozendict()):
        api = registry.get(api_name)
        self._validate_name(api_name, "rpc", name)

        start_time = time.time()
        try:
            method = getattr(api, name)
            if self.config.api(api_name).cast_values:
                kwargs = cast_to_signature(kwargs, method)
            result = method(**kwargs)
            result = await await_if_necessary(result)
        except (CancelledError, SuddenDeathException):
            raise
        except Exception as e:
            logger.warning(
                L(
                    "⚡  Error while executing {}.{}. Took {}",
                    Bold(api_name),
                    Bold(name),
                    human_time(time.time() - start_time),
                ))
            return e
        else:
            logger.info(
                L(
                    "⚡  Executed {}.{} in {}",
                    Bold(api_name),
                    Bold(name),
                    human_time(time.time() - start_time),
                ))
            return result
Exemplo n.º 6
0
    async def receive_result(self, rpc_message: RpcMessage, return_path: str,
                             options: dict,
                             bus_client: "BusClient") -> ResultMessage:
        """Await a result from the processing worker"""
        logger.debug(
            L("Awaiting Redis result for RPC message: {}", Bold(rpc_message)))
        redis_key = self._parse_return_path(return_path)

        with await self.connection_manager() as redis:
            start_time = time.time()
            result = None
            while not result:
                # Sometimes blpop() will return None in the case of timeout or
                # cancellation. We therefore perform this step with a loop to catch
                # this. A more elegant solution is welcome.
                # TODO: RPC & result Transports should not be applying a timeout, leave
                #       this to the client which coordinates between the two
                result = await redis.blpop(redis_key, timeout=self.rpc_timeout)
            _, serialized = result

        result_message = self.deserializer(serialized)

        logger.debug(
            L(
                "⬅ Received Redis result in {} for RPC message {}: {}",
                human_time(time.time() - start_time),
                rpc_message,
                Bold(result_message.result),
            ))

        return result_message
Exemplo n.º 7
0
    async def send_result(
        self,
        rpc_message: RpcMessage,
        result_message: ResultMessage,
        return_path: str,
        bus_client: "BusClient",
    ):
        """Send the result back to the caller"""
        logger.debug(
            L(
                "Sending result {} into Redis using return path {}",
                Bold(result_message),
                Bold(return_path),
            ))
        redis_key = self._parse_return_path(return_path)

        with await self.connection_manager() as redis:
            start_time = time.time()
            p = redis.pipeline()
            p.lpush(redis_key, self.serializer(result_message))
            p.expire(redis_key, timeout=self.result_ttl)
            await p.execute()

        logger.debug(
            L(
                "➡ Sent result {} into Redis in {} using return path {}",
                Bold(result_message),
                human_time(time.time() - start_time),
                Bold(return_path),
            ))
Exemplo n.º 8
0
    async def setup_async(self, plugins: dict = None):
        """Setup lightbus and get it ready to consume events and/or RPCs

        You should call this manually if you are calling `consume_rpcs()`
        directly. This you be handled for you if you are
        calling `run_forever()`.
        """
        logger.info(
            LBullets(
                "Lightbus is setting up",
                items={
                    "service_name (set with -s or LIGHTBUS_SERVICE_NAME)": Bold(
                        self.config.service_name
                    ),
                    "process_name (with with -p or LIGHTBUS_PROCESS_NAME)": Bold(
                        self.config.process_name
                    ),
                },
            )
        )

        # Log the transport information
        rpc_transport = self.transport_registry.get_rpc_transport("default", default=None)
        result_transport = self.transport_registry.get_result_transport("default", default=None)
        event_transport = self.transport_registry.get_event_transport("default", default=None)
        log_transport_information(
            rpc_transport, result_transport, event_transport, self.schema.schema_transport, logger
        )

        # Log the plugins we have
        if plugins is None:
            logger.debug("Auto-loading any installed Lightbus plugins...")
            self.plugin_registry.autoload_plugins(self.config)
        else:
            logger.debug("Loading explicitly specified Lightbus plugins....")
            self.plugin_registry.set_plugins(plugins)

        if self.plugin_registry._plugins:
            logger.info(
                LBullets(
                    "Loaded the following plugins ({})".format(len(self.plugin_registry._plugins)),
                    items=self.plugin_registry._plugins,
                )
            )
        else:
            logger.info("No plugins loaded")

        # Load schema
        logger.debug("Loading schema...")
        await self.schema.load_from_bus()

        logger.info(
            LBullets(
                "Loaded the following remote schemas ({})".format(len(self.schema.remote_schemas)),
                items=self.schema.remote_schemas.keys(),
            )
        )

        for transport in self.transport_registry.get_all_transports():
            await transport.open()
Exemplo n.º 9
0
    async def call_rpc(self, rpc_message: RpcMessage, options: dict):
        queue_key = f"{rpc_message.api_name}:rpc_queue"
        expiry_key = f"rpc_expiry_key:{rpc_message.id}"
        logger.debug(
            LBullets(
                L("Enqueuing message {} in Redis stream {}", Bold(rpc_message),
                  Bold(queue_key)),
                items=dict(**rpc_message.get_metadata(),
                           kwargs=rpc_message.get_kwargs()),
            ))

        with await self.connection_manager() as redis:
            start_time = time.time()
            print("setting " + expiry_key)
            p = redis.pipeline()
            p.rpush(key=queue_key, value=self.serializer(rpc_message))
            p.set(expiry_key, 1)
            p.expire(expiry_key, timeout=self.rpc_timeout)
            await p.execute()

        logger.debug(
            L(
                "Enqueued message {} in Redis in {} stream {}",
                Bold(rpc_message),
                human_time(time.time() - start_time),
                Bold(queue_key),
            ))
Exemplo n.º 10
0
    async def consume_rpcs(self, apis: Sequence[Api]) -> Sequence[RpcMessage]:
        # Get the name of each stream
        streams = ['{}:stream'.format(api.meta.name) for api in apis]
        # Get where we last left off in each stream
        latest_ids = [self._latest_ids.get(stream, '$') for stream in streams]

        logger.debug(LBullets(
            'Consuming RPCs from', items=[
                '{} ({})'.format(s, self._latest_ids.get(s, '$')) for s in streams
            ]
        ))

        pool = await self.get_redis_pool()
        with await pool as redis:
            # TODO: Count/timeout configurable
            stream_messages = await redis.xread(streams, latest_ids=latest_ids, count=10)

        rpc_messages = []
        for stream, message_id, fields in stream_messages:
            stream = decode(stream, 'utf8')
            message_id = decode(message_id, 'utf8')
            decoded_fields = decode_message_fields(fields)

            # See comment on events transport re updating message_id
            self._latest_ids[stream] = message_id
            rpc_messages.append(
                RpcMessage.from_dict(decoded_fields)
            )
            logger.debug(LBullets(
                L("⬅ Received message {} on stream {}", Bold(message_id), Bold(stream)),
                items=decoded_fields
            ))

        return rpc_messages
Exemplo n.º 11
0
    async def _on_message(self, event_message: EventMessage,
                          listener: Callable, options: dict,
                          on_error: OnError):

        # TODO: Check events match those requested
        logger.info(
            L("📩  Received event {}.{} with ID {}".format(
                Bold(event_message.api_name), Bold(event_message.event_name),
                event_message.id)))

        validate_incoming(self.config, self.schema, event_message)

        await self.hook_registry.execute("before_event_execution",
                                         event_message=event_message)

        if self.config.api(event_message.api_name).cast_values:
            parameters = cast_to_signature(parameters=event_message.kwargs,
                                           callable=listener)
        else:
            parameters = event_message.kwargs

        # Call the listener.
        # Pass the event message as a positional argument,
        # thereby allowing listeners to have flexibility in the argument names.
        # (And therefore allowing listeners to use the `event` parameter themselves)
        if on_error == OnError.SHUTDOWN:
            # Run the callback in the queue_exception_checker(). This will
            # put any errors into Lightbus' error queue, and therefore
            # cause a shutdown
            await queue_exception_checker(
                run_user_provided_callable(listener,
                                           args=[event_message],
                                           kwargs=parameters),
                self.error_queue,
                help=
                (f"An error occurred while {listener} was handling an event. Lightbus will now"
                 " shutdown. If you wish to continue you can use the on_error parameter when"
                 " setting up your event. For example:\n\n    bus.my_api.my_event.listen(fn,"
                 " listener_name='example', on_error=lightbus.OnError.ACKNOWLEDGE_AND_LOG)"
                 ),
            )
        elif on_error == on_error.ACKNOWLEDGE_AND_LOG:
            try:
                await listener(event_message, **parameters)
            except asyncio.CancelledError:
                raise
            except Exception as e:
                # Log here. Acknowledgement will follow in below
                logger.exception(e)

        # Acknowledge the successfully processed message
        await self.producer.send(
            AcknowledgeEventCommand(message=event_message,
                                    options=options)).wait()

        await self.hook_registry.execute("after_event_execution",
                                         event_message=event_message)
Exemplo n.º 12
0
 async def receive_result(self, rpc_message: RpcMessage,
                          return_path: asyncio.Future,
                          options: dict) -> ResultMessage:
     logger.info(
         L("⌛️  Awaiting result for RPC message: {}", Bold(rpc_message)))
     result = await return_path
     logger.info(
         L("⬅  Received result for RPC message {}: {}", rpc_message,
           Bold(result)))
     return result
Exemplo n.º 13
0
    async def fire_event(self,
                         api_name,
                         name,
                         kwargs: dict = None,
                         options: dict = None):
        kwargs = kwargs or {}
        try:
            api = registry.get(api_name)
        except UnknownApi:
            raise UnknownApi(
                "Lightbus tried to fire the event {api_name}.{name}, but could not find API {api_name} in the "
                "registry. An API being in the registry implies you are an authority on that API. Therefore, "
                "Lightbus requires the API to be in the registry as it is a bad idea to fire "
                "events on behalf of remote APIs. However, this could also be caused by a typo in the "
                "API name or event name, or be because the API class has not been "
                "imported. ".format(**locals()))

        self._validate_name(api_name, "event", name)

        try:
            event = api.get_event(name)
        except EventNotFound:
            raise EventNotFound(
                "Lightbus tried to fire the event {api_name}.{name}, but the API {api_name} does not "
                "seem to contain an event named {name}. You may need to define the event, you "
                "may also be using the incorrect API. Also check for typos.".
                format(**locals()))

        if set(kwargs.keys()) != _parameter_names(event.parameters):
            raise InvalidEventArguments(
                "Invalid event arguments supplied when firing event. Attempted to fire event with "
                "{} arguments: {}. Event expected {}: {}".format(
                    len(kwargs),
                    sorted(kwargs.keys()),
                    len(event.parameters),
                    sorted(_parameter_names(event.parameters)),
                ))

        kwargs = deform_to_bus(kwargs)
        event_message = EventMessage(api_name=api.meta.name,
                                     event_name=name,
                                     kwargs=kwargs)

        self._validate(event_message, "outgoing")

        event_transport = self.transport_registry.get_event_transport(api_name)
        await self._plugin_hook("before_event_sent",
                                event_message=event_message)
        logger.info(
            L("📤  Sending event {}.{}".format(Bold(api_name), Bold(name))))
        await event_transport.send_event(event_message, options=options)
        await self._plugin_hook("after_event_sent",
                                event_message=event_message)
Exemplo n.º 14
0
    def run_forever(self, *, loop=None, consume_rpcs=True, consume_events=True, plugins=None):
        logger.info(LBullets(
            "Lightbus getting ready to run. Brokers in use",
            items={
                "RPC transport": L(
                    '{}.{}',
                    self.rpc_transport.__module__, Bold(self.rpc_transport.__class__.__name__)
                ),
                "Result transport": L(
                    '{}.{}', self.result_transport.__module__,
                    Bold(self.result_transport.__class__.__name__)
                ),
                "Event transport": L(
                    '{}.{}', self.event_transport.__module__,
                    Bold(self.event_transport.__class__.__name__)
                ),
            }
        ))

        self.setup(plugins=plugins)
        registry.add(LightbusStateApi())
        registry.add(LightbusMetricsApi())

        if consume_rpcs:
            if registry.public():
                logger.info(LBullets(
                    "APIs in registry ({})".format(len(registry.all())),
                    items=registry.all()
                ))
            else:
                if consume_events:
                    logger.warning(
                        "No APIs have been registered, lightbus may still receive events "
                        "but Lightbus will not handle any incoming RPCs"
                    )
                else:
                    logger.error(
                        "No APIs have been registered, yet Lightbus has been configured to only "
                        "handle RPCs. There is therefore nothing for lightbus to do. Exiting."
                    )
                    return

        block(handle_aio_exceptions(
            plugin_hook('before_server_start', bus_client=self, loop=loop)
        ), timeout=5)

        loop = loop or asyncio.get_event_loop()
        self._run_forever(loop, consume_rpcs, consume_events)

        loop.run_until_complete(handle_aio_exceptions(
            plugin_hook('after_server_stopped', bus_client=self, loop=loop)
        ))
Exemplo n.º 15
0
 async def call_rpc_local(self, api_name: str, name: str, kwargs: dict):
     api = registry.get(api_name)
     start_time = time.time()
     try:
         result = await api.call(name, kwargs)
     except (CancelledError, SuddenDeathException):
         raise
     except Exception as e:
         logger.warning(L("⚡ Error while executing {}.{}. Took {}", Bold(api_name), Bold(name), human_time(time.time() - start_time)))
         return e
     else:
         logger.info(L("⚡ Executed {}.{} in {}", Bold(api_name), Bold(name), human_time(time.time() - start_time)))
         return result
Exemplo n.º 16
0
    async def fire_event(self, api_name, name, kwargs: dict = None, options: dict = None):
        kwargs = kwargs or {}
        try:
            api = self.api_registry.get(api_name)
        except UnknownApi:
            raise UnknownApi(
                "Lightbus tried to fire the event {api_name}.{name}, but no API named {api_name} was found in the "
                "registry. An API being in the registry implies you are an authority on that API. Therefore, "
                "Lightbus requires the API to be in the registry as it is a bad idea to fire "
                "events on behalf of remote APIs. However, this could also be caused by a typo in the "
                "API name or event name, or be because the API class has not been "
                "registered using bus.client.register_api(). ".format(**locals())
            )

        validate_event_or_rpc_name(api_name, "event", name)

        try:
            event = api.get_event(name)
        except EventNotFound:
            raise EventNotFound(
                "Lightbus tried to fire the event {api_name}.{name}, but the API {api_name} does not "
                "seem to contain an event named {name}. You may need to define the event, you "
                "may also be using the incorrect API. Also check for typos.".format(**locals())
            )

        parameter_names = {p.name if isinstance(p, Parameter) else p for p in event.parameters}

        if set(kwargs.keys()) != parameter_names:
            raise InvalidEventArguments(
                "Invalid event arguments supplied when firing event. Attempted to fire event with "
                "{} arguments: {}. Event expected {}: {}".format(
                    len(kwargs),
                    sorted(kwargs.keys()),
                    len(event.parameters),
                    sorted(parameter_names),
                )
            )

        kwargs = deform_to_bus(kwargs)
        event_message = EventMessage(
            api_name=api.meta.name, event_name=name, kwargs=kwargs, version=api.meta.version
        )

        validate_outgoing(self.config, self.schema, event_message)

        await self.hook_registry.execute("before_event_sent", event_message=event_message)
        logger.info(L("📤  Sending event {}.{}".format(Bold(api_name), Bold(name))))

        await self.producer.send(SendEventCommand(message=event_message, options=options)).wait()

        await self.hook_registry.execute("after_event_sent", event_message=event_message)
Exemplo n.º 17
0
def autodiscover(directory='.'):
    logger.debug("Attempting to autodiscover bus.py file")
    bus_path = discover_bus_py(directory)
    if not bus_path:
        return None
    logger.debug(L("Found bus.py file at: {}", Bold(bus_path)))
    bus_module_name = prepare_exec_for_file(bus_path)
    logger.debug(L("Going to import {}", Bold(bus_module_name)))
    spec = importlib.util.spec_from_file_location(bus_module_name, str(bus_path))
    bus_module = importlib.util.module_from_spec(spec)
    spec.loader.exec_module(bus_module)
    logger.info(L("No initial import was specified. Using autodiscovered module '{}'", Bold(bus_module_name)))

    return bus_module
Exemplo n.º 18
0
    async def call_rpc_remote(self, api_name: str, name: str, kwargs: dict, options: dict):
        rpc_message = RpcMessage(api_name=api_name, procedure_name=name, kwargs=kwargs)
        return_path = self.result_transport.get_return_path(rpc_message)
        rpc_message.return_path = return_path
        options = options or {}
        timeout = options.get('timeout', 5)

        logger.info("➡ Calling remote RPC ".format(rpc_message))

        start_time = time.time()
        # TODO: It is possible that the RPC will be called before we start waiting for the response. This is bad.

        future = asyncio.gather(
            self.receive_result(rpc_message, return_path, options=options),
            self.rpc_transport.call_rpc(rpc_message, options=options),
        )

        await plugin_hook('before_rpc_call', rpc_message=rpc_message, bus_client=self)

        try:
            result_message, _ = await asyncio.wait_for(future, timeout=timeout)
        except asyncio.TimeoutError:
            future.cancel()
            # TODO: Include description of possible causes and how to increase the timeout.
            # TODO: Remove RPC from queue. Perhaps add a RpcBackend.cancel() method. Optional,
            #       as not all backends will support it. No point processing calls which have timed out.
            raise LightbusTimeout('Timeout when calling RPC {} after {} seconds'.format(
                rpc_message.canonical_name, timeout
            )) from None

        await plugin_hook('after_rpc_call', rpc_message=rpc_message, result_message=result_message, bus_client=self)

        if not result_message.error:
            logger.info(L("⚡ Remote call of {} completed in {}", Bold(rpc_message.canonical_name), human_time(time.time() - start_time)))
        else:
            logger.warning(
                L("⚡ Server error during remote call of {}. Took {}: {}",
                  Bold(rpc_message.canonical_name),
                  human_time(time.time() - start_time),
                  result_message.result,
                ),
            )
            raise LightbusServerError('Error while calling {}: {}\nRemote stack trace:\n{}'.format(
                rpc_message.canonical_name,
                result_message.result,
                result_message.trace,
            ))

        return result_message.result
Exemplo n.º 19
0
    async def receive_result(self, rpc_message: RpcMessage, return_path: str, options: dict) -> ResultMessage:
        logger.info(L("⌛ Awaiting Redis result for RPC message: {}", Bold(rpc_message)))
        redis_key = self._parse_return_path(return_path)

        pool = await self.get_redis_pool()
        with await pool as redis:
            start_time = time.time()
            # TODO: Make timeout configurable
            _, result = await redis.blpop(redis_key, timeout=5)
            result_dictionary = redis_decode(result)

        logger.info(L(
            "⬅ Received Redis result in {} for RPC message {}: {}",
            human_time(time.time() - start_time), rpc_message, Bold(result)
        ))

        return ResultMessage.from_dict(result_dictionary)
Exemplo n.º 20
0
 async def send_result(
     self,
     rpc_message: RpcMessage,
     result_message: ResultMessage,
     return_path: asyncio.Future,
     bus_client: "BusClient",
 ):
     logger.info(L("⚡️  Directly sending RPC result: {}", Bold(result_message)))
     return_path.set_result(result_message)
Exemplo n.º 21
0
    async def call_rpc(self, rpc_message: RpcMessage, options: dict):
        stream = '{}:stream'.format(rpc_message.api_name)
        logger.debug(
            LBullets(
                L("Enqueuing message {} in Redis stream {}", Bold(rpc_message), Bold(stream)),
                items=rpc_message.to_dict()
            )
        )

        pool = await self.get_redis_pool()
        with await pool as redis:
            start_time = time.time()
            # TODO: MAXLEN
            await redis.xadd(stream=stream, fields=encode_message_fields(rpc_message.to_dict()))

        logger.info(L(
            "Enqueued message {} in Redis in {} stream {}",
            Bold(rpc_message), human_time(time.time() - start_time), Bold(stream)
        ))
Exemplo n.º 22
0
    async def send_event(self, event_message: EventMessage, options: dict):
        """Publish an event"""
        stream = '{}.{}:stream'.format(event_message.api_name, event_message.event_name)
        logger.debug(
            LBullets(
                L("Enqueuing event message {} in Redis stream {}", Bold(event_message), Bold(stream)),
                items=event_message.to_dict()
            )
        )

        pool = await self.get_redis_pool()
        with await pool as redis:
            start_time = time.time()
            # TODO: MAXLEN
            await redis.xadd(stream=stream, fields=encode_message_fields(event_message.to_dict()))

        logger.info(L(
            "Enqueued event message {} in Redis in {} stream {}",
            Bold(event_message), human_time(time.time() - start_time), Bold(stream)
        ))
Exemplo n.º 23
0
    async def send_result(self, rpc_message: RpcMessage, result_message: ResultMessage, return_path: str):
        logger.debug(L(
            "Sending result {} into Redis using return path {}",
            Bold(result_message), Bold(return_path)
        ))
        redis_key = self._parse_return_path(return_path)

        pool = await self.get_redis_pool()
        with await pool as redis:
            start_time = time.time()
            p = redis.pipeline()
            p.lpush(redis_key, redis_encode(result_message.to_dict()))
            # TODO: Make result expiry configurable
            p.expire(redis_key, timeout=60)
            await p.execute()

        logger.debug(L(
            "➡ Sent result {} into Redis in {} using return path {}",
            Bold(result_message), human_time(time.time() - start_time), Bold(return_path)
        ))
Exemplo n.º 24
0
def log_welcome_message(
    logger: Logger,
    transport_registry: TransportRegistry,
    schema: "Schema",
    plugin_registry: PluginRegistry,
    config: Config,
):
    """Show the worker-startup welcome message
    """
    logger.info(
        LBullets(
            "Lightbus is setting up",
            items={
                "service_name (set with -s or LIGHTBUS_SERVICE_NAME)":
                Bold(config.service_name),
                "process_name (with with -p or LIGHTBUS_PROCESS_NAME)":
                Bold(config.process_name),
            },
        ))

    # Log the transport information
    rpc_transport = transport_registry.get_rpc_transport("default",
                                                         default=None)
    result_transport = transport_registry.get_result_transport("default",
                                                               default=None)
    event_transport = transport_registry.get_event_transport("default",
                                                             default=None)
    log_transport_information(rpc_transport, result_transport, event_transport,
                              schema.schema_transport, logger)

    if plugin_registry._plugins:
        logger.info(
            LBullets(
                "Loaded the following plugins ({})".format(
                    len(plugin_registry._plugins)),
                items=plugin_registry._plugins,
            ))
    else:
        logger.info("No plugins loaded")
Exemplo n.º 25
0
    async def call_rpc(self, rpc_message: RpcMessage, options: dict):
        """Emit a call to a remote procedure

        This only sends the request, it does not await any result (see RedisResultTransport)
        """
        queue_key = f"{rpc_message.api_name}:rpc_queue"
        expiry_key = f"rpc_expiry_key:{rpc_message.id}"
        logger.debug(
            LBullets(
                L("Enqueuing message {} in Redis list {}", Bold(rpc_message),
                  Bold(queue_key)),
                items=dict(**rpc_message.get_metadata(),
                           kwargs=rpc_message.get_kwargs()),
            ))

        start_time = time.time()
        for try_number in range(3, 0, -1):
            last_try = try_number == 1
            try:
                await self._call_rpc(rpc_message, queue_key, expiry_key)
                break
            except (PipelineError, ConnectionClosedError,
                    ConnectionResetError) as e:
                if not last_try:
                    logger.debug(
                        f"Retrying sending of message. Will retry {try_number} more times. "
                        f"Error was {type(e).__name__}: {e}")
                    await asyncio.sleep(self.rpc_retry_delay)
                else:
                    raise

        logger.debug(
            L(
                "Enqueued message {} in Redis in {} stream {}",
                Bold(rpc_message),
                human_time(time.time() - start_time),
                Bold(queue_key),
            ))
Exemplo n.º 26
0
    async def _reclaim_lost_messages(self, stream_names: List[str],
                                     consumer_group: str,
                                     expected_events: set):
        """Reclaim messages that other consumers in the group failed to acknowledge"""
        with await self.connection_manager() as redis:
            for stream in stream_names:
                old_messages = await redis.xpending(
                    stream,
                    consumer_group,
                    "-",
                    "+",
                    count=self.reclaim_batch_size)
                timeout = self.acknowledgement_timeout * 1000
                for (
                        message_id,
                        consumer_name,
                        ms_since_last_delivery,
                        num_deliveries,
                ) in old_messages:
                    message_id = decode(message_id, "utf8")
                    consumer_name = decode(consumer_name, "utf8")

                    if ms_since_last_delivery > timeout:
                        logger.info(
                            L(
                                "Found timed out event {} in stream {}. Abandoned by {}. Attempting to reclaim...",
                                Bold(message_id),
                                Bold(stream),
                                Bold(consumer_name),
                            ))

                    result = await redis.xclaim(stream, consumer_group,
                                                self.consumer_name,
                                                int(timeout), message_id)
                    for claimed_message_id, fields in result:
                        claimed_message_id = decode(claimed_message_id, "utf8")
                        event_message = self._fields_to_message(
                            fields,
                            expected_events,
                            native_id=claimed_message_id)
                        if not event_message:
                            # noop message, or message an event we don't care about
                            continue
                        logger.debug(
                            LBullets(
                                L(
                                    "⬅ Reclaimed timed out event {} on stream {}. Abandoned by {}.",
                                    Bold(message_id),
                                    Bold(stream),
                                    Bold(consumer_name),
                                ),
                                items=dict(
                                    **event_message.get_metadata(),
                                    kwargs=event_message.get_kwargs(),
                                ),
                            ))
                        yield event_message, stream
Exemplo n.º 27
0
    async def _consume_rpcs(self, apis: Sequence[Api]) -> Sequence[RpcMessage]:
        # Get the name of each list queue
        queue_keys = ["{}:rpc_queue".format(api.meta.name) for api in apis]

        logger.debug(
            LBullets(
                "Consuming RPCs from",
                items=[
                    "{} ({})".format(s, self._latest_ids.get(s, "$"))
                    for s in queue_keys
                ],
            ))

        with await self.connection_manager() as redis:
            try:
                try:
                    stream, data = await redis.blpop(*queue_keys)
                except RuntimeError:
                    # For some reason aio-redis likes to eat the CancelledError and
                    # turn it into a Runtime error:
                    # https://github.com/aio-libs/aioredis/blob/9f5964/aioredis/connection.py#L184
                    raise asyncio.CancelledError(
                        "aio-redis task was cancelled and decided it should be a RuntimeError"
                    )
            except asyncio.CancelledError:
                # We need to manually close the connection here otherwise the aioredis
                # pool will emit warnings saying that this connection still has pending
                # commands (i.e. the above blocking pop)
                redis.close()
                raise

            stream = decode(stream, "utf8")
            rpc_message = self.deserializer(data)
            expiry_key = f"rpc_expiry_key:{rpc_message.id}"
            key_deleted = await redis.delete(expiry_key)

            if not key_deleted:
                return []

            logger.debug(
                LBullets(
                    L("⬅ Received RPC message on stream {}", Bold(stream)),
                    items=dict(**rpc_message.get_metadata(),
                               kwargs=rpc_message.get_kwargs()),
                ))

            return [rpc_message]
Exemplo n.º 28
0
    def _register_listener(self, events: List[Tuple[str, str]]):
        """A context manager to help keep track of what the bus is listening for"""
        logger.info(
            LBullets(f"Registering listener for",
                     items=[Bold(f"{a}.{e}") for a, e in events]))

        for api_name, event_name in events:
            key = (api_name, event_name)
            self._listeners.setdefault(key, 0)
            self._listeners[key] += 1

        yield

        for api_name, event_name in events:
            key = (api_name, event_name)
            self._listeners[key] -= 1
            if not self._listeners[key]:
                self._listeners.pop(key)
Exemplo n.º 29
0
    async def start_listening_for(self, api_name, event_name, options: dict):
        stream_name = '{}.{}:stream'.format(api_name, event_name)
        options = options or {}
        since = options.get('since')

        if stream_name in self._streams:
            if since:
                # We could probably handle this by starting up a new Redis connection
                # just for this listener. Let's see if this is needed.
                logger.warning(
                    'Cannot start listening on {}.{} as it is already being listened for. '
                    'You also specified a "since" value, so be warned you are probably not '
                    'going to be getting the history of messages you expect. Consider '
                    'calling stop_listening_for() first.'.format(api_name, event_name)
                )
            else:
                logger.debug('Already listening on event stream {}. Doing nothing.'.format(stream_name))
        else:
            if not since:
                latest_id = '$'
            elif hasattr(since, 'timestamp'):  # datetime
                # Create message ID: "<milliseconds-timestamp>-<sequence-number>"
                latest_id = '{}-0'.format(round(since.timestamp() * 1000))
            else:
                latest_id = since

            logger.info(L(
                'Will to listen on event stream {} {}',
                Bold(stream_name),
                'starting now' if self._task else 'once event consumption begins',
            ))
            self._streams[stream_name] = latest_id

            if self._task:
                logger.debug('Existing consumer task running, cancelling')
                self._reload = True
                self._task.cancel()
Exemplo n.º 30
0
    async def fetch_events(self) -> Tuple[Sequence[EventMessage], Any]:
        pool = await self.get_redis_pool()
        with await pool as redis:
            if not self._streams:
                logger.debug('Event backend has been given no events to consume. Event backend will sleep.')
                self._task = asyncio.ensure_future(asyncio.sleep(3600 * 24 * 365))
            else:
                logger.info(LBullets(
                    'Consuming events from', items={
                        '{} ({})'.format(*v) for v in self._streams.items()
                    }
                ))
                # TODO: Count/timeout
                self._task = asyncio.ensure_future(
                    redis.xread(
                        streams=list(self._streams.keys()),
                        latest_ids=list(self._streams.values()),
                        count=10,  # TODO: Make configurable, add timeout too
                    )
                )

            try:
                stream_messages = await self._task or []
            except asyncio.CancelledError as e:
                if self._reload:
                    # Streams to listen on have changed.
                    # Bail out and let this method get called again,
                    # at which point we'll pickup the new streams.
                    logger.debug('Event transport reloading.')
                    stream_messages = []
                    self._reload = False
                else:
                    raise

        event_messages = []
        latest_ids = {}
        for stream, message_id, fields in stream_messages:
            stream = decode(stream, 'utf8')
            message_id = decode(message_id, 'utf8')
            decoded_fields = decode_message_fields(fields)

            # Keep track of which event ID we are up to. We will store these
            # in consumption_complete(), once we know the events have definitely
            # been consumed.
            latest_ids[stream] = message_id

            # Unfortunately, these is an edge-case when BOTH:
            #  1. We are consuming events from 'now' (i.e. event ID '$'), the default
            #  2. There is an unhandled error when processing the FIRST batch of events
            # In which case, the next iteration would start again from '$', in which
            # case we would loose events. Therefore 'subtract one' from the message ID
            # and store that immediately. Subtracting one is imprecise, as there is a SLIM
            # chance we could grab another event in the process. However, if events are
            # being consumed from 'now' then the developer presumably doesn't care about
            # a high level of precision.
            if self._streams[stream] == '$':
                self._streams[stream] = redis_stream_id_subtract_one(message_id)

            event_messages.append(
                EventMessage.from_dict(decoded_fields)
            )
            logger.debug(LBullets(
                L("⬅ Received event {} on stream {}", Bold(message_id), Bold(stream)),
                items=decoded_fields
            ))

        return event_messages, latest_ids