async def call_rpc_local(self, api_name: str, name: str, kwargs: dict = frozendict()): api = registry.get(api_name) self._validate_name(api_name, "rpc", name) start_time = time.time() try: method = getattr(api, name) if self.config.api(api_name).cast_values: kwargs = cast_to_signature(kwargs, method) result = method(**kwargs) result = await await_if_necessary(result) except (CancelledError, SuddenDeathException): raise except Exception as e: logger.warning( L( "⚡ Error while executing {}.{}. Took {}", Bold(api_name), Bold(name), human_time(time.time() - start_time), )) return e else: logger.info( L( "⚡ Executed {}.{} in {}", Bold(api_name), Bold(name), human_time(time.time() - start_time), )) return result
async def _call_rpc_local(self, api_name: str, name: str, kwargs: dict = frozendict()): api = self.api_registry.get(api_name) validate_event_or_rpc_name(api_name, "rpc", name) start_time = time.time() try: method = getattr(api, name) if self.config.api(api_name).cast_values: kwargs = cast_to_signature(kwargs, method) result = await run_user_provided_callable(method, args=[], kwargs=kwargs) except (asyncio.CancelledError, SuddenDeathException): raise except Exception as e: logging.exception(e) logger.warning( L( "⚡ Error while executing {}.{}. Took {}", Bold(api_name), Bold(name), human_time(time.time() - start_time), ) ) raise else: logger.info( L( "⚡ Executed {}.{} in {}", Bold(api_name), Bold(name), human_time(time.time() - start_time), ) ) return result
async def send_result( self, rpc_message: RpcMessage, result_message: ResultMessage, return_path: str, bus_client: "BusClient", ): """Send the result back to the caller""" logger.debug( L( "Sending result {} into Redis using return path {}", Bold(result_message), Bold(return_path), )) redis_key = self._parse_return_path(return_path) with await self.connection_manager() as redis: start_time = time.time() p = redis.pipeline() p.lpush(redis_key, self.serializer(result_message)) p.expire(redis_key, timeout=self.result_ttl) await p.execute() logger.debug( L( "➡ Sent result {} into Redis in {} using return path {}", Bold(result_message), human_time(time.time() - start_time), Bold(return_path), ))
async def receive_result(self, rpc_message: RpcMessage, return_path: str, options: dict, bus_client: "BusClient") -> ResultMessage: """Await a result from the processing worker""" logger.debug( L("Awaiting Redis result for RPC message: {}", Bold(rpc_message))) redis_key = self._parse_return_path(return_path) with await self.connection_manager() as redis: start_time = time.time() result = None while not result: # Sometimes blpop() will return None in the case of timeout or # cancellation. We therefore perform this step with a loop to catch # this. A more elegant solution is welcome. # TODO: RPC & result Transports should not be applying a timeout, leave # this to the client which coordinates between the two result = await redis.blpop(redis_key, timeout=self.rpc_timeout) _, serialized = result result_message = self.deserializer(serialized) logger.debug( L( "⬅ Received Redis result in {} for RPC message {}: {}", human_time(time.time() - start_time), rpc_message, Bold(result_message.result), )) return result_message
async def send_event(self, event_message: EventMessage, options: dict): """Publish an event""" stream = self._get_stream_names( listen_for=[(event_message.api_name, event_message.event_name)])[0] logger.debug( LBullets( L( "Enqueuing event message {} in Redis stream {}", Bold(event_message), Bold(stream), ), items=dict(**event_message.get_metadata(), kwargs=event_message.get_kwargs()), )) # Performance: I suspect getting a connection from the connection manager each time is causing # performance issues. Need to confirm. with await self.connection_manager() as redis: start_time = time.time() await redis.xadd( stream=stream, fields=self.serializer(event_message), max_len=self.max_stream_length or None, exact_len=False, ) logger.debug( L( "Enqueued event message {} in Redis in {} stream {}", Bold(event_message.canonical_name), human_time(time.time() - start_time), Bold(stream), ))
async def call_rpc(self, rpc_message: RpcMessage, options: dict, bus_client: "BusClient"): queue_key = f"{rpc_message.api_name}:rpc_queue" expiry_key = f"rpc_expiry_key:{rpc_message.id}" logger.debug( LBullets( L("Enqueuing message {} in Redis list {}", Bold(rpc_message), Bold(queue_key)), items=dict(**rpc_message.get_metadata(), kwargs=rpc_message.get_kwargs()), )) start_time = time.time() for try_number in range(3, 0, -1): last_try = try_number == 1 try: await self._call_rpc(rpc_message, queue_key, expiry_key) return except (PipelineError, ConnectionClosedError, ConnectionResetError): if not last_try: await asyncio.sleep(self.rpc_retry_delay) else: raise logger.debug( L( "Enqueued message {} in Redis in {} stream {}", Bold(rpc_message), human_time(time.time() - start_time), Bold(queue_key), ))
async def receive_result(self, rpc_message: RpcMessage, return_path: str, options: dict) -> ResultMessage: logger.debug( L("Awaiting Redis result for RPC message: {}", Bold(rpc_message))) redis_key = self._parse_return_path(return_path) with await self.connection_manager() as redis: start_time = time.time() result = None while not result: # Sometimes blpop() will return None in the case of timeout or # cancellation. We therefore perform this step with a loop to catch # this. A more elegant solution is welcome. result = await redis.blpop(redis_key, timeout=self.rpc_timeout) _, serialized = result result_message = self.deserializer(serialized) logger.debug( L( "⬅ Received Redis result in {} for RPC message {}: {}", human_time(time.time() - start_time), rpc_message, Bold(result_message.result), )) return result_message
async def call_rpc(self, rpc_message: RpcMessage, options: dict): queue_key = f"{rpc_message.api_name}:rpc_queue" expiry_key = f"rpc_expiry_key:{rpc_message.id}" logger.debug( LBullets( L("Enqueuing message {} in Redis stream {}", Bold(rpc_message), Bold(queue_key)), items=dict(**rpc_message.get_metadata(), kwargs=rpc_message.get_kwargs()), )) with await self.connection_manager() as redis: start_time = time.time() print("setting " + expiry_key) p = redis.pipeline() p.rpush(key=queue_key, value=self.serializer(rpc_message)) p.set(expiry_key, 1) p.expire(expiry_key, timeout=self.rpc_timeout) await p.execute() logger.debug( L( "Enqueued message {} in Redis in {} stream {}", Bold(rpc_message), human_time(time.time() - start_time), Bold(queue_key), ))
async def call_rpc(self, rpc_message: RpcMessage, options: dict): """Emit a call to a remote procedure This only sends the request, it does not await any result (see RedisResultTransport) """ queue_key = f"{rpc_message.api_name}:rpc_queue" expiry_key = f"rpc_expiry_key:{rpc_message.id}" logger.debug( LBullets( L("Enqueuing message {} in Redis list {}", Bold(rpc_message), Bold(queue_key)), items=dict(**rpc_message.get_metadata(), kwargs=rpc_message.get_kwargs()), )) start_time = time.time() for try_number in range(3, 0, -1): last_try = try_number == 1 try: await self._call_rpc(rpc_message, queue_key, expiry_key) break except (PipelineError, ConnectionClosedError, ConnectionResetError) as e: if not last_try: logger.debug( f"Retrying sending of message. Will retry {try_number} more times. " f"Error was {type(e).__name__}: {e}") await asyncio.sleep(self.rpc_retry_delay) else: raise logger.debug( L( "Enqueued message {} in Redis in {} stream {}", Bold(rpc_message), human_time(time.time() - start_time), Bold(queue_key), ))
async def call_rpc_remote(self, api_name: str, name: str, kwargs: dict = frozendict(), options: dict = frozendict()): rpc_transport = self.transport_registry.get_rpc_transport(api_name) result_transport = self.transport_registry.get_result_transport( api_name) kwargs = deform_to_bus(kwargs) rpc_message = RpcMessage(api_name=api_name, procedure_name=name, kwargs=kwargs) return_path = result_transport.get_return_path(rpc_message) rpc_message.return_path = return_path options = options or {} timeout = options.get("timeout", self.config.api(api_name).rpc_timeout) self._validate_name(api_name, "rpc", name) logger.info("📞 Calling remote RPC {}.{}".format( Bold(api_name), Bold(name))) start_time = time.time() # TODO: It is possible that the RPC will be called before we start waiting for the response. This is bad. self._validate(rpc_message, "outgoing") future = asyncio.gather( self.receive_result(rpc_message, return_path, options=options), rpc_transport.call_rpc(rpc_message, options=options), ) await self._plugin_hook("before_rpc_call", rpc_message=rpc_message) try: result_message, _ = await asyncio.wait_for(future, timeout=timeout) future.result() except asyncio.TimeoutError: # Allow the future to finish, as per https://bugs.python.org/issue29432 try: await future future.result() except CancelledError: pass # TODO: Remove RPC from queue. Perhaps add a RpcBackend.cancel() method. Optional, # as not all backends will support it. No point processing calls which have timed out. raise LightbusTimeout( f"Timeout when calling RPC {rpc_message.canonical_name} after {timeout} seconds. " f"It is possible no Lightbus process is serving this API, or perhaps it is taking " f"too long to process the request. In which case consider raising the 'rpc_timeout' " f"config option.") from None await self._plugin_hook("after_rpc_call", rpc_message=rpc_message, result_message=result_message) if not result_message.error: logger.info( L( "🏁 Remote call of {} completed in {}", Bold(rpc_message.canonical_name), human_time(time.time() - start_time), )) else: logger.warning( L( "⚡ Server error during remote call of {}. Took {}: {}", Bold(rpc_message.canonical_name), human_time(time.time() - start_time), result_message.result, )) raise LightbusServerError( "Error while calling {}: {}\nRemote stack trace:\n{}".format( rpc_message.canonical_name, result_message.result, result_message.trace)) self._validate(result_message, "incoming", api_name, procedure_name=name) return result_message.result
async def call_rpc_remote( self, api_name: str, name: str, kwargs: dict = frozendict(), options: dict = frozendict() ): """ Perform an RPC call Call an RPC and return the result. """ kwargs = deform_to_bus(kwargs) rpc_message = RpcMessage(api_name=api_name, procedure_name=name, kwargs=kwargs) validate_event_or_rpc_name(api_name, "rpc", name) logger.info("📞 Calling remote RPC {}.{}".format(Bold(api_name), Bold(name))) start_time = time.time() validate_outgoing(self.config, self.schema, rpc_message) await self.hook_registry.execute("before_rpc_call", rpc_message=rpc_message) result_queue = InternalQueue() # Send the RPC await self.producer.send( commands.CallRpcCommand(message=rpc_message, options=options) ).wait() # Start a listener which will wait for results await self.producer.send( commands.ReceiveResultCommand( message=rpc_message, destination_queue=result_queue, options=options ) ).wait() # Wait for the result from the listener we started. # The RpcResultDock will handle timeouts result = await bail_on_error(self.error_queue, result_queue.get()) call_time = time.time() - start_time try: if isinstance(result, Exception): raise result except asyncio.TimeoutError: raise LightbusTimeout( f"Timeout when calling RPC {rpc_message.canonical_name} after waiting for {human_time(call_time)}. " f"It is possible no Lightbus process is serving this API, or perhaps it is taking " f"too long to process the request. In which case consider raising the 'rpc_timeout' " f"config option." ) from None else: assert isinstance(result, ResultMessage) result_message = result await self.hook_registry.execute( "after_rpc_call", rpc_message=rpc_message, result_message=result_message ) if not result_message.error: logger.info( L( "🏁 Remote call of {} completed in {}", Bold(rpc_message.canonical_name), human_time(call_time), ) ) else: logger.warning( L( "⚡ Error during remote call of RPC {}. Took {}: {}", Bold(rpc_message.canonical_name), human_time(call_time), result_message.result, ) ) raise LightbusWorkerError( "Error while calling {}: {}\nRemote stack trace:\n{}".format( rpc_message.canonical_name, result_message.result, result_message.trace ) ) validate_incoming(self.config, self.schema, result_message) return result_message.result