def test_transport_registry_get_rpc_transports(redis_default_config): registry = TransportRegistry().load_config(redis_default_config) # Note how we set a config value below. We do this because # we need this transport to appear different from the default # transport for the purposes of this test. Also note that these # are not actual transports, but transport pools which wrap transports. # We are therefore actually comparing transport pools. Transport # pools are considered equal if they have the same transport class # and config. It is for this reason we modify the config below. registry.set_rpc_transport("redis1", RedisRpcTransport, RedisRpcTransport.Config(rpc_timeout=99), Config.default()) registry.set_rpc_transport("redis2", RedisRpcTransport, RedisRpcTransport.Config(rpc_timeout=99), Config.default()) registry.set_rpc_transport("debug1", DebugRpcTransport, DebugRpcTransport.Config(), Config.default()) registry.set_rpc_transport("debug2", DebugRpcTransport, DebugRpcTransport.Config(), Config.default()) transport_pools = registry.get_rpc_transports( ["default", "foo", "bar", "redis1", "redis2", "debug1", "debug2"]) default_transport = registry.get_rpc_transport("default") redis_transport = registry.get_rpc_transport("redis1") debug_transport = registry.get_rpc_transport("debug1") assert set(transport_pools[default_transport]) == {"default", "foo", "bar"} assert set(transport_pools[debug_transport]) == {"debug1", "debug2"} assert set(transport_pools[redis_transport]) == {"redis1", "redis2"}
async def _new_bus(): bus = await lightbus.creation.create_async( config=RootConfig( apis={ 'default': ApiConfig( rpc_transport=RpcTransportSelector( redis=RedisRpcTransport.Config(url=redis_server_url) ), result_transport=ResultTransportSelector( redis=RedisResultTransport.Config(url=redis_server_url) ), event_transport=EventTransportSelector(redis=RedisEventTransport.Config( url=redis_server_url, stream_use=StreamUse.PER_EVENT, service_name="test_service", consumer_name="test_consumer", )), ) }, bus=BusConfig( schema=SchemaConfig( transport=SchemaTransportSelector(redis=RedisSchemaTransport.Config(url=redis_server_url)), ) ) ), plugins=[], ) return bus
def _new_bus(service_name="{friendly}"): bus = lightbus.creation.create( config=RootConfig( apis={ 'default': ApiConfig( rpc_transport=RpcTransportSelector( redis=RedisRpcTransport.Config( url=redis_server_url)), result_transport=ResultTransportSelector( redis=RedisResultTransport.Config( url=redis_server_url)), event_transport=EventTransportSelector( redis=RedisEventTransport.Config( url=redis_server_url, stream_use=StreamUse.PER_EVENT, service_name="test_service", consumer_name="test_consumer", )), ) }, bus=BusConfig( schema=SchemaConfig(transport=SchemaTransportSelector( redis=RedisSchemaTransport.Config( url=redis_server_url)), )), service_name=service_name), plugins=[], ) bus.client.disable_proxy() return bus
async def test_consume_rpcs_only_once(redis_client, dummy_api, redis_pool): """Ensure that an RPC call gets consumed only once even with multiple listeners""" message_count = 0 transport1 = RedisRpcTransport(redis_pool=redis_pool) transport2 = RedisRpcTransport(redis_pool=redis_pool) async def co_consume(transport): nonlocal message_count messages = await transport.consume_rpcs(apis=[dummy_api], bus_client=None) message_count += len(messages) consumer1 = asyncio.ensure_future(co_consume(transport1)) consumer2 = asyncio.ensure_future(co_consume(transport2)) await asyncio.sleep(0.1) await redis_client.set("rpc_expiry_key:123abc", 1) await redis_client.rpush( "my.dummy:rpc_queue", json.dumps({ "metadata": { "id": "123abc", "api_name": "my.api", "procedure_name": "my_proc", "return_path": "abc", }, "kwargs": { "field": "value" }, }), ) await asyncio.sleep(0.1) await cancel(consumer1, consumer2) assert message_count == 1 assert not await redis_client.exists("rpc_expiry_key:123abc")
def test_transport_registry_get_rpc_transports(redis_default_config): registry = TransportRegistry().load_config(redis_default_config) debug_transport = DebugRpcTransport() redis_transport = RedisRpcTransport() registry.set_rpc_transport("redis1", redis_transport) registry.set_rpc_transport("redis2", redis_transport) registry.set_rpc_transport("debug1", debug_transport) registry.set_rpc_transport("debug2", debug_transport) transports = registry.get_rpc_transports( ["default", "foo", "bar", "redis1", "redis2", "debug1", "debug2"]) default_redis_transport = registry.get_rpc_transport("default") transports = dict(transports) assert set( transports[default_redis_transport]) == {"default", "foo", "bar"} assert set(transports[debug_transport]) == {"debug1", "debug2"} assert set(transports[redis_transport]) == {"redis1", "redis2"}
async def test_from_config(redis_client): await redis_client.select(5) host, port = redis_client.address transport = RedisRpcTransport.from_config( config=None, url=f"redis://127.0.0.1:{port}/5", connection_parameters=dict(maxsize=123), batch_size=123, # Non default serializers, event though they wouldn't make sense in this context serializer="lightbus.serializers.BlobMessageSerializer", deserializer="lightbus.serializers.BlobMessageDeserializer", ) with await transport.connection_manager() as transport_client: assert transport_client.connection.address == ("127.0.0.1", port) assert transport_client.connection.db == 5 await transport_client.set("x", 1) assert await redis_client.get("x") assert transport._redis_pool.connection.maxsize == 123 assert isinstance(transport.serializer, BlobMessageSerializer) assert isinstance(transport.deserializer, BlobMessageDeserializer)
async def test_reconnect_consume_rpcs_dead_server( standalone_redis_server: StandaloneRedisServer, create_redis_pool, dummy_api): """Start a redis server up then turn it off and on again during RPC consumption. RPCs should be consumed as normal once the redis server returns """ redis_url = f"redis://127.0.0.1:{standalone_redis_server.port}/0" standalone_redis_server.start() redis_rpc_transport = RedisRpcTransport(redis_pool=await create_redis_pool(address=redis_url ), consumption_restart_delay=0.0001) async def co_enqeue(): redis_client = await aioredis.create_redis(address=redis_url) try: while True: await asyncio.sleep(0.01) await redis_client.set("rpc_expiry_key:123abc", 1) await redis_client.rpush( "my.dummy:rpc_queue", value=json.dumps({ "metadata": { "id": "123abc", "api_name": "my.api", "procedure_name": "my_proc", "return_path": "abc", }, "kwargs": { "field": "value" }, }), ) finally: redis_client.close() total_messages = 0 async def co_consume(): nonlocal total_messages while True: messages = await redis_rpc_transport.consume_rpcs(apis=[dummy_api]) total_messages += len(messages) # Starting enqeuing and consuming rpc calls enqueue_task = asyncio.ensure_future(co_enqeue()) consume_task = asyncio.ensure_future(co_consume()) await asyncio.sleep(0.2) assert total_messages > 0 # Stop enqeuing and stop the server await cancel(enqueue_task) standalone_redis_server.stop() try: # We don't get any more messages total_messages = 0 await asyncio.sleep(0.2) assert total_messages == 0 # Now start the server again, and start emitting messages standalone_redis_server.start() enqueue_task = asyncio.ensure_future(co_enqeue()) total_messages = 0 await asyncio.sleep(0.2) # ... the consumer has auto-reconnected and received some messages assert total_messages > 0 finally: await cancel(enqueue_task, consume_task)