Beispiel #1
0
async def test_skeleton_syncer(request, event_loop, event_bus, chaindb_fresh,
                               chaindb_1000):

    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer],
                                                          event_bus=event_bus)
        client = FastChainSyncer(LatestTestChain(chaindb_fresh.db),
                                 chaindb_fresh, client_peer_pool)
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer), run_request_server(
                    event_bus, AsyncChainDB(chaindb_1000.db)):

            client_peer.logger.info("%s is serving 1000 blocks", client_peer)
            server_peer.logger.info("%s is syncing up 1000 blocks",
                                    server_peer)

            await asyncio.wait_for(client.run(), timeout=20)

            head = chaindb_fresh.get_canonical_head()
            assert head == chaindb_1000.get_canonical_head()
Beispiel #2
0
async def test_light_syncer(request, event_loop, event_bus, chaindb_fresh,
                            chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request,
        event_loop,
        alice_peer_class=LESPeer,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db))
    client = LightChainSyncer(LatestTestChain(chaindb_fresh.db), chaindb_fresh,
                              MockPeerPoolWithConnectedPeers([client_peer]))
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                      event_bus=event_bus)

    async with run_peer_pool_event_server(
            event_bus, server_peer_pool,
            handler_type=LESPeerPoolEventServer), run_request_server(
                event_bus,
                FakeAsyncChainDB(chaindb_20.db),
                server_type=LightRequestServer):

        server_peer.logger.info("%s is serving 20 blocks", server_peer)
        client_peer.logger.info("%s is syncing up 20", client_peer)

        def finalizer():
            event_loop.run_until_complete(client.cancel())
            # Yield control so that client/server.run() returns, otherwise asyncio will complain.
            event_loop.run_until_complete(asyncio.sleep(0.1))

        request.addfinalizer(finalizer)

        asyncio.ensure_future(client.run())

        await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
Beispiel #3
0
async def test_skeleton_syncer(request, event_loop, event_bus, chaindb_fresh,
                               chaindb_1000):
    client_peer, server_peer = await get_directly_linked_peers(
        request,
        event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_1000.db))
    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
    client = FastChainSyncer(LatestTestChain(chaindb_fresh.db), chaindb_fresh,
                             client_peer_pool)
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                      event_bus=event_bus)

    async with run_peer_pool_event_server(
            event_bus, server_peer_pool,
            handler_type=ETHPeerPoolEventServer), run_request_server(
                event_bus, FakeAsyncChainDB(chaindb_1000.db)):

        client_peer.logger.info("%s is serving 1000 blocks", client_peer)
        server_peer.logger.info("%s is syncing up 1000 blocks", server_peer)

        await asyncio.wait_for(client.run(), timeout=20)

        head = chaindb_fresh.get_canonical_head()
        assert head == chaindb_1000.get_canonical_head()

        # Now download the state for the chain's head.
        state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db,
                                           head.state_root, client_peer_pool)
        await asyncio.wait_for(state_downloader.run(), timeout=20)

        assert head.state_root in chaindb_fresh.db
Beispiel #4
0
async def test_regular_syncer_fallback(request, event_loop, event_bus, chaindb_fresh, chaindb_20):
    """
    Test the scenario where a header comes in that's not in memory (but is in the DB)
    """
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db))
    client = FallbackTesting_RegularChainSyncer(
        ByzantiumTestChain(chaindb_fresh.db),
        chaindb_fresh,
        MockPeerPoolWithConnectedPeers([client_peer]))
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus)

    async with run_peer_pool_event_server(
        event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
    ), run_request_server(
        event_bus, FakeAsyncChainDB(chaindb_20.db)
    ):

        server_peer.logger.info("%s is serving 20 blocks", server_peer)
        client_peer.logger.info("%s is syncing up 20", client_peer)

        def finalizer():
            event_loop.run_until_complete(client.cancel())
            # Yield control so that client/server.run() returns, otherwise asyncio will complain.
            event_loop.run_until_complete(asyncio.sleep(0.1))
        request.addfinalizer(finalizer)

        asyncio.ensure_future(client.run())

        await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
        head = chaindb_fresh.get_canonical_head()
        assert head.state_root in chaindb_fresh.db
Beispiel #5
0
async def test_proxy_peer_requests(request, event_bus, other_event_bus,
                                   event_loop, chaindb_fresh, chaindb_20):
    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = await get_directly_linked_peers(
        request,
        event_loop,
        alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncChainDB(chaindb_20.db),
    )

    client_peer_pool = MockPeerPoolWithConnectedPeers(
        [client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers(
        [server_peer], event_bus=server_event_bus)

    async with run_peer_pool_event_server(
            client_event_bus,
            client_peer_pool,
            handler_type=ETHPeerPoolEventServer), run_peer_pool_event_server(
                server_event_bus,
                server_peer_pool,
                handler_type=ETHPeerPoolEventServer), run_request_server(
                    server_event_bus,
                    FakeAsyncChainDB(chaindb_20.db)), run_proxy_peer_pool(
                        client_event_bus
                    ) as client_proxy_peer_pool, run_proxy_peer_pool(
                        server_event_bus):

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(
            client_peer.remote)

        headers = await proxy_peer.requests.get_block_headers(0, 1, 0, False)

        assert len(headers) == 1
        block_header = headers[0]
        assert block_header.block_number == 0

        receipts = await proxy_peer.requests.get_receipts(headers)
        assert len(receipts) == 1
        receipt = receipts[0]
        assert receipt[1][0] == block_header.receipt_root

        block_bundles = await proxy_peer.requests.get_block_bodies(headers)
        assert len(block_bundles) == 1
        first_bundle = block_bundles[0]
        assert first_bundle[1][0] == block_header.transaction_root

        node_data = await proxy_peer.requests.get_node_data(
            (block_header.state_root, ))
        assert node_data[0][0] == block_header.state_root
Beispiel #6
0
async def get_peer_and_receive_server(
    request, event_loop, event_bus
) -> Tuple[BCCPeer, BCCRequestServer, BCCReceiveServer, asyncio.Queue]:
    alice_chain = await get_fake_chain()
    bob_chain = await get_fake_chain()

    (alice, alice_peer_pool, bob, bob_peer_pool
     ) = await bcc_helpers.get_directly_linked_peers_in_peer_pools(
         request,
         event_loop,
         alice_chain_db=alice_chain.chaindb,
         bob_chain_db=bob_chain.chaindb,
     )

    msg_queue = asyncio.Queue()
    orig_handle_msg = BCCReceiveServer._handle_msg

    # Inject a queue to each `BCCReceiveServer`, which puts the message passed to `_handle_msg` to
    # the queue, right after every `_handle_msg` finishes.
    # This is crucial to make the test be able to wait until `_handle_msg` finishes.
    async def _handle_msg(self, base_peer, cmd, msg):
        task = asyncio.ensure_future(orig_handle_msg(self, base_peer, cmd,
                                                     msg))

        def enqueue_msg(future, msg):
            msg_queue.put_nowait(msg)

        task.add_done_callback(functools.partial(enqueue_msg, msg=msg))
        await task

    BCCReceiveServer._handle_msg = _handle_msg

    async with run_peer_pool_event_server(
            event_bus, alice_peer_pool,
            BCCPeerPoolEventServer), run_request_server(
                event_bus, alice_chain.chaindb,
                server_type=BCCRequestServer) as alice_req_server:

        bob_recv_server = BCCReceiveServer(chain=bob_chain,
                                           peer_pool=bob_peer_pool)

        asyncio.ensure_future(bob_recv_server.run())
        await bob_recv_server.events.started.wait()

        def finalizer():
            event_loop.run_until_complete(bob_recv_server.cancel())

        request.addfinalizer(finalizer)

        yield alice, alice_req_server, bob_recv_server, msg_queue
Beispiel #7
0
async def test_requests_when_peer_in_client_vanishs(request, event_bus,
                                                    other_event_bus,
                                                    event_loop, chaindb_fresh,
                                                    chaindb_20):

    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = await get_directly_linked_peers(
        request,
        event_loop,
        alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncChainDB(chaindb_20.db),
    )

    client_peer_pool = MockPeerPoolWithConnectedPeers(
        [client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers(
        [server_peer], event_bus=server_event_bus)

    async with run_peer_pool_event_server(
            client_event_bus,
            client_peer_pool,
            handler_type=ETHPeerPoolEventServer), run_peer_pool_event_server(
                server_event_bus,
                server_peer_pool,
                handler_type=ETHPeerPoolEventServer), run_request_server(
                    server_event_bus,
                    FakeAsyncChainDB(chaindb_20.db)), run_proxy_peer_pool(
                        client_event_bus
                    ) as client_proxy_peer_pool, run_proxy_peer_pool(
                        server_event_bus):

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(
            client_peer.remote)

        # We remove the peer from the client and assume to see PeerConnectionLost exceptions raised
        client_peer_pool.connected_nodes.pop(client_peer.remote)

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.requests.get_block_headers(0, 1, 0, False)

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.requests.get_receipts(())

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.requests.get_block_bodies(())

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.requests.get_node_data(())
Beispiel #8
0
async def test_fast_syncer(request, event_bus, event_loop, chaindb_fresh,
                           chaindb_20):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
        client = FastChainSyncer(LatestTestChain(chaindb_fresh.db),
                                 chaindb_fresh, client_peer_pool)
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus,
                server_peer_pool,
                handler_type=ETHPeerPoolEventServer,
        ), run_request_server(
                event_bus,
                AsyncChainDB(chaindb_20.db),
        ):

            server_peer.logger.info("%s is serving 20 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 20", client_peer)

            # FastChainSyncer.run() will return as soon as it's caught up with the peer.
            await asyncio.wait_for(client.run(), timeout=5)

            head = chaindb_fresh.get_canonical_head()
            assert head == chaindb_20.get_canonical_head()

            # Now download the state for the chain's head.
            state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db,
                                               head.state_root,
                                               client_peer_pool)
            await asyncio.wait_for(state_downloader.run(), timeout=5)

            assert head.state_root in chaindb_fresh.db
Beispiel #9
0
async def test_regular_syncer(request, event_loop, event_bus, chaindb_fresh,
                              chaindb_20):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )

    async with peer_pair as (client_peer, server_peer):

        client = RegularChainSyncer(
            ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer]))
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer), run_request_server(
                    event_bus, AsyncChainDB(chaindb_20.db)):

            server_peer.logger.info("%s is serving 20 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 20", client_peer)

            def finalizer():
                event_loop.run_until_complete(client.cancel())
                # Yield control so that client/server.run() returns, otherwise
                # asyncio will complain.
                event_loop.run_until_complete(asyncio.sleep(0.1))

            request.addfinalizer(finalizer)

            asyncio.ensure_future(client.run())

            await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
            head = chaindb_fresh.get_canonical_head()
            assert head.state_root in chaindb_fresh.db
Beispiel #10
0
async def test_fast_syncer(request, event_bus, event_loop, chaindb_fresh,
                           chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request,
        event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db))
    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
    client = FastChainSyncer(ByzantiumTestChain(chaindb_fresh.db),
                             chaindb_fresh, client_peer_pool)
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                      event_bus=event_bus)

    async with run_peer_pool_event_server(
            event_bus,
            server_peer_pool,
            handler_type=ETHPeerPoolEventServer,
    ), run_request_server(
            event_bus,
            FakeAsyncChainDB(chaindb_20.db),
    ):

        server_peer.logger.info("%s is serving 20 blocks", server_peer)
        client_peer.logger.info("%s is syncing up 20", client_peer)

        # FastChainSyncer.run() will return as soon as it's caught up with the peer.
        await asyncio.wait_for(client.run(), timeout=2)

        head = chaindb_fresh.get_canonical_head()
        assert head == chaindb_20.get_canonical_head()

        # Now download the state for the chain's head.
        state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db,
                                           head.state_root, client_peer_pool)
        await asyncio.wait_for(state_downloader.run(), timeout=2)

        assert head.state_root in chaindb_fresh.db
Beispiel #11
0
async def test_beam_syncer(request,
                           event_loop,
                           event_bus,
                           chaindb_fresh,
                           chaindb_churner,
                           beam_to_block,
                           checkpoint=None):

    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_churner.db)
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        # Need a name that will be unique per xdist-process, otherwise
        #   lahja IPC endpoints in each process will clobber each other
        unique_process_name = uuid.uuid4()

        # manually add endpoint for beam vm to make requests
        pausing_config = ConnectionConfig.from_name(
            f"PausingEndpoint-{unique_process_name}")

        # manually add endpoint for trie data gatherer to serve requests
        gatherer_config = ConnectionConfig.from_name(
            f"GathererEndpoint-{unique_process_name}")

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer), run_request_server(
                    event_bus, AsyncChainDB(chaindb_churner.db)
                ), AsyncioEndpoint.serve(
                    pausing_config) as pausing_endpoint, AsyncioEndpoint.serve(
                        gatherer_config) as gatherer_endpoint:

            client_chain = make_pausing_beam_chain(
                ((0, PetersburgVM), ),
                chain_id=999,
                db=chaindb_fresh.db,
                event_bus=pausing_endpoint,
                loop=event_loop,
            )

            client = BeamSyncer(
                client_chain,
                chaindb_fresh.db,
                AsyncChainDB(chaindb_fresh.db),
                client_peer_pool,
                gatherer_endpoint,
                force_beam_block_number=beam_to_block,
                checkpoint=checkpoint,
            )

            client_peer.logger.info("%s is serving churner blocks",
                                    client_peer)
            server_peer.logger.info("%s is syncing up churner blocks",
                                    server_peer)

            import_server = BlockImportServer(
                pausing_endpoint,
                client_chain,
                token=client.cancel_token,
            )
            asyncio.ensure_future(import_server.run())

            await pausing_endpoint.connect_to_endpoints(gatherer_config)
            asyncio.ensure_future(client.run())

            # We can sync at least 10 blocks in 1s at current speeds, (or reach the current one)
            # Trying to keep the tests short-ish. A fuller test could always set the target header
            #   to the chaindb_churner canonical head, and increase the timeout significantly
            target_block_number = min(beam_to_block + 10, 129)
            target_head = chaindb_churner.get_canonical_block_header_by_number(
                target_block_number)
            await wait_for_head(chaindb_fresh, target_head, sync_timeout=10)
            assert target_head.state_root in chaindb_fresh.db

            # first stop the import server, so it doesn't hang waiting for state data
            await import_server.cancel()
            await client.cancel()
Beispiel #12
0
async def test_beam_syncer(request, event_loop, event_bus, chaindb_fresh,
                           chaindb_churner, beam_to_block):

    client_peer, server_peer = await get_directly_linked_peers(
        request,
        event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_churner.db))

    # manually add endpoint for beam vm to make requests
    pausing_config = ConnectionConfig.from_name("PausingEndpoint")

    # manually add endpoint for trie data gatherer to serve requests
    gatherer_config = ConnectionConfig.from_name("GathererEndpoint")

    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                      event_bus=event_bus)

    async with run_peer_pool_event_server(
            event_bus, server_peer_pool,
            handler_type=ETHPeerPoolEventServer), run_request_server(
                event_bus,
                FakeAsyncChainDB(chaindb_churner.db)), AsyncioEndpoint.serve(
                    pausing_config) as pausing_endpoint, AsyncioEndpoint.serve(
                        gatherer_config) as gatherer_endpoint:

        BeamPetersburgVM = pausing_vm_decorator(PetersburgVM, pausing_endpoint)

        class BeamPetersburgTestChain(FakeAsyncChain):
            vm_configuration = ((0, BeamPetersburgVM), )
            network_id = 999

        client_chain = BeamPetersburgTestChain(chaindb_fresh.db)
        client = BeamSyncer(
            client_chain,
            chaindb_fresh.db,
            client_chain.chaindb,
            client_peer_pool,
            gatherer_endpoint,
            beam_to_block,
        )

        client_peer.logger.info("%s is serving churner blocks", client_peer)
        server_peer.logger.info("%s is syncing up churner blocks", server_peer)

        import_server = BlockImportServer(pausing_endpoint,
                                          client_chain,
                                          token=client.cancel_token)
        asyncio.ensure_future(import_server.run())

        await pausing_endpoint.connect_to_endpoints(gatherer_config)
        asyncio.ensure_future(client.run())

        # We can sync at least 10 blocks in 1s at current speeds, (or reach the current one)
        # Trying to keep the tests short-ish. A fuller test could always set the target header
        #   to the chaindb_churner canonical head, and increase the timeout significantly
        target_block_number = min(beam_to_block + 10, 129)
        target_head = chaindb_churner.get_canonical_block_header_by_number(
            target_block_number)
        await wait_for_head(chaindb_fresh, target_head, sync_timeout=4)
        assert target_head.state_root in chaindb_fresh.db

        # first stop the import server, so it doesn't hang waiting for state data
        await import_server.cancel()
        await client.cancel()
Beispiel #13
0
async def get_peer_and_receive_server(
    request, event_loop, event_bus
) -> Tuple[BCCPeer, BCCRequestServer, BCCReceiveServer, asyncio.Queue]:
    alice_chain = await get_fake_chain()
    bob_chain = await get_fake_chain()

    alice_private_key = PrivateKeyFactory()
    bob_private_key = PrivateKeyFactory()

    alice_context = BeaconContextFactory(chain_db=alice_chain.chaindb)
    bob_context = BeaconContextFactory(chain_db=bob_chain.chaindb)
    peer_pair = BCCPeerPairFactory(
        alice_peer_context=alice_context,
        alice_private_key=alice_private_key,
        bob_peer_context=bob_context,
        bob_private_key=bob_private_key,
        event_bus=event_bus,
    )
    async with peer_pair as (alice, bob):
        alice_pool_ctx = BCCPeerPoolFactory.run_for_peer(
            alice,
            privkey=alice_private_key,
            context=alice_context,
            event_bus=event_bus,
        )
        bob_pool_ctx = BCCPeerPoolFactory.run_for_peer(
            bob,
            privkey=bob_private_key,
            context=bob_context,
            event_bus=event_bus,
        )
        async with alice_pool_ctx as alice_peer_pool, bob_pool_ctx as bob_peer_pool:
            msg_queue = asyncio.Queue()
            orig_handle_msg = BCCReceiveServer._handle_msg

            # Inject a queue to each `BCCReceiveServer`, which puts the message
            # passed to `_handle_msg` to the queue, right after every `_handle_msg`
            # finishes.  This is crucial to make the test be able to wait until
            # `_handle_msg` finishes.
            async def _handle_msg(self, base_peer, cmd, msg):
                task = asyncio.ensure_future(
                    orig_handle_msg(self, base_peer, cmd, msg))

                def enqueue_msg(future, msg):
                    msg_queue.put_nowait(msg)

                task.add_done_callback(functools.partial(enqueue_msg, msg=msg))
                await task

            BCCReceiveServer._handle_msg = _handle_msg

            async with run_peer_pool_event_server(
                    event_bus, alice_peer_pool,
                    BCCPeerPoolEventServer), run_request_server(
                        event_bus,
                        alice_chain.chaindb,
                        server_type=BCCRequestServer) as alice_req_server:

                bob_recv_server = BCCReceiveServer(chain=bob_chain,
                                                   peer_pool=bob_peer_pool)

                asyncio.ensure_future(bob_recv_server.run())
                await bob_recv_server.events.started.wait()

                def finalizer():
                    event_loop.run_until_complete(bob_recv_server.cancel())

                request.addfinalizer(finalizer)

                yield alice, alice_req_server, bob_recv_server, msg_queue