예제 #1
0
파일: peer.py 프로젝트: wschwab/trinity
def ParagonPeerPairFactory(
    *,
    alice_peer_context: ParagonContext = None,
    alice_remote: NodeAPI = None,
    alice_private_key: keys.PrivateKey = None,
    alice_client_version: str = 'alice',
    bob_peer_context: ParagonContext = None,
    bob_remote: NodeAPI = None,
    bob_private_key: keys.PrivateKey = None,
    bob_client_version: str = 'bob',
    cancel_token: CancelToken = None,
    event_bus: EndpointAPI = None,
) -> AsyncContextManager[Tuple[ParagonPeer, ParagonPeer]]:
    if alice_peer_context is None:
        alice_peer_context = ParagonContext()
    if bob_peer_context is None:
        bob_peer_context = ParagonContext()

    return cast(
        AsyncContextManager[Tuple[ParagonPeer, ParagonPeer]],
        PeerPairFactory(
            alice_peer_context=alice_peer_context,
            alice_peer_factory_class=ParagonPeerFactory,
            bob_peer_context=bob_peer_context,
            bob_peer_factory_class=ParagonPeerFactory,
            alice_remote=alice_remote,
            alice_private_key=alice_private_key,
            alice_client_version=alice_client_version,
            bob_remote=bob_remote,
            bob_private_key=bob_private_key,
            bob_client_version=bob_client_version,
            cancel_token=cancel_token,
            event_bus=event_bus,
        ))
예제 #2
0
파일: test_server.py 프로젝트: onyb/trinity
 def _make_peer_pool(self):
     return ParagonPeerPool(
         privkey=self.privkey,
         context=ParagonContext(),
         token=self.cancel_token,
         event_bus=self.event_bus,
     )
예제 #3
0
파일: test_server.py 프로젝트: onyb/trinity
async def test_peer_pool_answers_connect_commands(event_loop, event_bus, server, receiver_remote):
    # This is the PeerPool which will accept our message and try to connect to {server}
    initiator_peer_pool = ParagonPeerPool(
        privkey=INITIATOR_PRIVKEY,
        context=ParagonContext(),
        event_bus=event_bus,
    )
    asyncio.ensure_future(initiator_peer_pool.run(), loop=event_loop)
    await initiator_peer_pool.events.started.wait()
    async with run_peer_pool_event_server(
        event_bus,
        initiator_peer_pool,
    ):

        assert len(server.peer_pool.connected_nodes) == 0

        await event_bus.wait_until_any_endpoint_subscribed_to(ConnectToNodeCommand)
        await event_bus.broadcast(
            ConnectToNodeCommand(receiver_remote),
            TO_NETWORKING_BROADCAST_CONFIG
        )

        # This test was maybe 30% flaky at 0.1 sleep
        await asyncio.sleep(0.2)

        assert len(server.peer_pool.connected_nodes) == 1

        await initiator_peer_pool.cancel()
예제 #4
0
파일: test_server.py 프로젝트: onyb/trinity
async def test_peer_pool_connect(monkeypatch, event_loop, server, receiver_remote):
    started_peers = []

    async def mock_start_peer(peer):
        nonlocal started_peers
        started_peers.append(peer)

    monkeypatch.setattr(server.peer_pool, 'start_peer', mock_start_peer)

    initiator_peer_pool = ParagonPeerPool(
        privkey=INITIATOR_PRIVKEY,
        context=ParagonContext(),
    )
    nodes = [receiver_remote]
    asyncio.ensure_future(initiator_peer_pool.run(), loop=event_loop)
    await initiator_peer_pool.events.started.wait()
    await initiator_peer_pool.connect_to_nodes(nodes)
    # Give the receiver_server a chance to ack the handshake.
    await asyncio.sleep(0.2)

    assert len(started_peers) == 1
    assert len(initiator_peer_pool.connected_nodes) == 1

    # Stop our peer to make sure its pending asyncio tasks are cancelled.
    await list(initiator_peer_pool.connected_nodes.values())[0].cancel()
    await initiator_peer_pool.cancel()
예제 #5
0
async def test_peer_pool_answers_connect_commands(event_bus, server, receiver_remote):
    # This is the PeerPool which will accept our message and try to connect to {server}
    initiator_peer_pool = ParagonPeerPool(
        privkey=INITIATOR_PRIVKEY,
        context=ParagonContext(),
        event_bus=event_bus,
    )
    async with background_asyncio_service(initiator_peer_pool) as manager:
        await manager.wait_started()
        async with run_peer_pool_event_server(event_bus, initiator_peer_pool):

            assert len(server.peer_pool.connected_nodes) == 0

            await event_bus.wait_until_any_endpoint_subscribed_to(ConnectToNodeCommand)
            await event_bus.broadcast(
                ConnectToNodeCommand(receiver_remote),
                TO_NETWORKING_BROADCAST_CONFIG
            )

            # This test was maybe 30% flaky at 0.1 sleep, so wait in a loop.
            for _ in range(5):
                await asyncio.sleep(0.1)
                if len(server.peer_pool.connected_nodes) == 1:
                    break
            else:
                assert len(server.peer_pool.connected_nodes) == 1
예제 #6
0
async def test_peer_pool_connect(monkeypatch, server, receiver_remote):
    peer_started = asyncio.Event()

    start_peer = server.peer_pool.start_peer

    async def mock_start_peer(peer):
        # Call the original start_peer() so that we create and run Peer/Connection objects,
        # which will ensure everything is cleaned up properly.
        await start_peer(peer)
        peer_started.set()

    monkeypatch.setattr(server.peer_pool, 'start_peer', mock_start_peer)

    initiator_peer_pool = ParagonPeerPool(
        privkey=INITIATOR_PRIVKEY,
        context=ParagonContext(),
    )
    nodes = [receiver_remote]
    async with background_asyncio_service(initiator_peer_pool) as manager:
        await manager.wait_started()
        await initiator_peer_pool.connect_to_nodes(nodes)

        await asyncio.wait_for(peer_started.wait(), timeout=10)

        assert len(initiator_peer_pool.connected_nodes) == 1
예제 #7
0
async def test_peer_pool_connect(monkeypatch, server, receiver_remote):
    receiving_peer_pool = server.peer_pool
    peer_started = asyncio.Event()

    add_inbound_peer = receiving_peer_pool.add_inbound_peer

    async def mock_add_inbound_peer(peer):
        # Call the original add_inbound_peer() so that we create and run Peer/Connection objects,
        # which will ensure everything is cleaned up properly.
        await add_inbound_peer(peer)
        peer_started.set()

    monkeypatch.setattr(receiving_peer_pool, 'add_inbound_peer',
                        mock_add_inbound_peer)

    broadcast_msg_buffer = BroadcastMsgCollector()
    receiving_peer_pool.subscribe(broadcast_msg_buffer)

    initiator_peer_pool = ParagonPeerPool(
        privkey=INITIATOR_PRIVKEY,
        context=ParagonContext(),
    )
    nodes = [receiver_remote]
    async with background_asyncio_service(initiator_peer_pool) as manager:
        await manager.wait_started()
        await initiator_peer_pool.connect_to_nodes(nodes)

        await asyncio.wait_for(peer_started.wait(), timeout=2)

        assert len(initiator_peer_pool.connected_nodes) == 1

        peer = list(initiator_peer_pool.connected_nodes.values())[0]

        receiving_peer = list(receiving_peer_pool.connected_nodes.values())[0]
        # Once our peer is running, it will start streaming messages, which will be stored in our
        # msg buffer.
        assert receiving_peer.connection.is_streaming_messages

        peer.connection.get_logic(ParagonAPI.name,
                                  ParagonAPI).send_broadcast_data(b'data')
        msg = await asyncio.wait_for(broadcast_msg_buffer.msg_queue.get(),
                                     timeout=0.5)
        assert msg.command.payload.data == b'data'
예제 #8
0
async def test_server_incoming_connection(monkeypatch, server, event_loop):
    use_eip8 = False
    token = CancelToken("initiator")
    initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY,
                                   use_eip8, token)
    reader, writer = await initiator.connect()
    # Send auth init message to the server, then read and decode auth ack
    aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake(
        initiator, reader, writer, token)

    transport = Transport(
        remote=RECEIVER_REMOTE,
        private_key=initiator.privkey,
        reader=reader,
        writer=writer,
        aes_secret=aes_secret,
        mac_secret=mac_secret,
        egress_mac=egress_mac,
        ingress_mac=ingress_mac,
    )
    initiator_peer = ParagonPeer(
        transport=transport,
        context=ParagonContext(),
        token=token,
    )
    # Perform p2p/sub-proto handshake, completing the full handshake and causing a new peer to be
    # added to the server's pool.
    await initiator_peer.do_p2p_handshake()
    await initiator_peer.do_sub_proto_handshake()

    # wait for peer to be processed
    while len(server.peer_pool) == 0:
        await asyncio.sleep(0)

    assert len(server.peer_pool.connected_nodes) == 1
    receiver_peer = list(server.peer_pool.connected_nodes.values())[0]
    assert isinstance(receiver_peer, ParagonPeer)
    assert initiator_peer.sub_proto is not None
    assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name
    assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version
    # test public key here in order to not access private `_private_key` variable.
    assert receiver_peer.transport.public_key == RECEIVER_PRIVKEY.public_key
예제 #9
0
async def test_peer_pool_answers_connect_commands(event_loop, event_bus,
                                                  server):
    # This is the PeerPool which will accept our message and try to connect to {server}
    initiator_peer_pool = ParagonPeerPool(
        privkey=INITIATOR_PRIVKEY,
        context=ParagonContext(),
        event_bus=event_bus,
    )
    asyncio.ensure_future(initiator_peer_pool.run(), loop=event_loop)
    await initiator_peer_pool.events.started.wait()

    assert len(server.peer_pool.connected_nodes) == 0

    event_bus.broadcast(ConnectToNodeCommand(RECEIVER_REMOTE.uri()),
                        TO_NETWORKING_BROADCAST_CONFIG)

    await asyncio.sleep(0.5)

    assert len(server.peer_pool.connected_nodes) == 1
    await initiator_peer_pool.cancel()
예제 #10
0
파일: test_server.py 프로젝트: onyb/trinity
async def test_server_incoming_connection(monkeypatch,
                                          server,
                                          event_loop,
                                          receiver_remote,
                                          ):
    use_eip8 = False
    token = CancelToken("initiator")
    initiator = HandshakeInitiator(receiver_remote, INITIATOR_PRIVKEY, use_eip8, token)
    initiator_remote = NodeFactory(
        pubkey=INITIATOR_PUBKEY,
        address__ip='127.0.0.1',
    )
    for _ in range(10):
        # The server isn't listening immediately so we give it a short grace
        # period while trying to connect.
        try:
            reader, writer = await initiator.connect()
        except ConnectionRefusedError:
            await asyncio.sleep(0)
        else:
            break
    else:
        raise AssertionError("Unable to connect within 10 loops")
    # Send auth init message to the server, then read and decode auth ack
    aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake(
        initiator, reader, writer, token)

    transport = Transport(
        remote=receiver_remote,
        private_key=initiator.privkey,
        reader=reader,
        writer=writer,
        aes_secret=aes_secret,
        mac_secret=mac_secret,
        egress_mac=egress_mac,
        ingress_mac=ingress_mac,
    )

    factory = ParagonPeerFactory(
        initiator.privkey,
        context=ParagonContext(),
        token=token,
    )
    handshakers = await factory.get_handshakers()
    devp2p_handshake_params = DevP2PHandshakeParamsFactory(
        listen_port=initiator_remote.address.tcp_port,
    )

    multiplexer, devp2p_receipt, protocol_receipts = await negotiate_protocol_handshakes(
        transport=transport,
        p2p_handshake_params=devp2p_handshake_params,
        protocol_handshakers=handshakers,
        token=token,
    )
    connection = Connection(
        multiplexer=multiplexer,
        devp2p_receipt=devp2p_receipt,
        protocol_receipts=protocol_receipts,
        is_dial_out=False,
    )
    initiator_peer = factory.create_peer(connection=connection)

    # wait for peer to be processed
    for _ in range(100):
        if len(server.peer_pool) > 0:
            break
        await asyncio.sleep(0)

    assert len(server.peer_pool.connected_nodes) == 1
    receiver_peer = list(server.peer_pool.connected_nodes.values())[0]
    assert isinstance(receiver_peer, ParagonPeer)
    assert initiator_peer.sub_proto is not None
    assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name
    assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version
    assert initiator_peer.remote.pubkey == RECEIVER_PRIVKEY.public_key
예제 #11
0
async def test_handshake():
    # TODO: this test should be re-written to not depend on functionality in the `ETHPeer` class.
    cancel_token = CancelToken("test_handshake")
    use_eip8 = False
    initiator_remote = kademlia.Node(
        keys.PrivateKey(test_values['receiver_private_key']).public_key,
        kademlia.Address('0.0.0.0', 0, 0))
    initiator = HandshakeInitiator(
        initiator_remote,
        keys.PrivateKey(test_values['initiator_private_key']),
        use_eip8,
        cancel_token)
    initiator.ephemeral_privkey = keys.PrivateKey(test_values['initiator_ephemeral_private_key'])

    responder_remote = kademlia.Node(
        keys.PrivateKey(test_values['initiator_private_key']).public_key,
        kademlia.Address('0.0.0.0', 0, 0))
    responder = HandshakeResponder(
        responder_remote,
        keys.PrivateKey(test_values['receiver_private_key']),
        use_eip8,
        cancel_token)
    responder.ephemeral_privkey = keys.PrivateKey(test_values['receiver_ephemeral_private_key'])

    # Check that the auth message generated by the initiator is what we expect. Notice that we
    # can't use the auth_init generated here because the non-deterministic prefix would cause the
    # derived secrets to not match the expected values.
    _auth_init = initiator.create_auth_message(test_values['initiator_nonce'])
    assert len(_auth_init) == len(test_values['auth_plaintext'])
    assert _auth_init[65:] == test_values['auth_plaintext'][65:]  # starts with non deterministic k

    # Check that encrypting and decrypting the auth_init gets us the orig msg.
    _auth_init_ciphertext = initiator.encrypt_auth_message(_auth_init)
    assert _auth_init == ecies.decrypt(_auth_init_ciphertext, responder.privkey)

    # Check that the responder correctly decodes the auth msg.
    auth_msg_ciphertext = test_values['auth_ciphertext']
    initiator_ephemeral_pubkey, initiator_nonce, _ = decode_authentication(
        auth_msg_ciphertext, responder.privkey)
    assert initiator_nonce == test_values['initiator_nonce']
    assert initiator_ephemeral_pubkey == (
        keys.PrivateKey(test_values['initiator_ephemeral_private_key']).public_key)

    # Check that the auth_ack msg generated by the responder is what we expect.
    auth_ack_msg = responder.create_auth_ack_message(test_values['receiver_nonce'])
    assert auth_ack_msg == test_values['authresp_plaintext']

    # Check that the secrets derived from ephemeral key agreements match the expected values.
    auth_ack_ciphertext = test_values['authresp_ciphertext']
    aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
        initiator_nonce, test_values['receiver_nonce'],
        initiator_ephemeral_pubkey, auth_msg_ciphertext, auth_ack_ciphertext)
    assert aes_secret == test_values['aes_secret']
    assert mac_secret == test_values['mac_secret']
    # Test values are from initiator perspective, so they're reversed here.
    assert ingress_mac.digest() == test_values['initial_egress_MAC']
    assert egress_mac.digest() == test_values['initial_ingress_MAC']

    # Check that the initiator secrets match as well.
    responder_ephemeral_pubkey, responder_nonce = initiator.decode_auth_ack_message(
        test_values['authresp_ciphertext'])
    (initiator_aes_secret,
     initiator_mac_secret,
     initiator_egress_mac,
     initiator_ingress_mac) = initiator.derive_secrets(
         initiator_nonce, responder_nonce,
         responder_ephemeral_pubkey, auth_msg_ciphertext, auth_ack_ciphertext)
    assert initiator_aes_secret == aes_secret
    assert initiator_mac_secret == mac_secret
    assert initiator_ingress_mac.digest() == test_values['initial_ingress_MAC']
    assert initiator_egress_mac.digest() == test_values['initial_egress_MAC']

    # Finally, check that two Peers configured with the secrets generated above understand each
    # other.
    (
        (responder_reader, responder_writer),
        (initiator_reader, initiator_writer),
    ) = get_directly_connected_streams()

    initiator_connection = PeerConnection(
        reader=initiator_reader,
        writer=initiator_writer,
        aes_secret=initiator_aes_secret,
        mac_secret=initiator_mac_secret,
        egress_mac=initiator_egress_mac,
        ingress_mac=initiator_ingress_mac
    )
    initiator_peer = ParagonPeer(
        remote=initiator.remote,
        privkey=initiator.privkey,
        connection=initiator_connection,
        context=ParagonContext(),
    )
    initiator_peer.base_protocol.send_handshake()
    responder_connection = PeerConnection(
        reader=responder_reader,
        writer=responder_writer,
        aes_secret=aes_secret,
        mac_secret=mac_secret,
        egress_mac=egress_mac,
        ingress_mac=ingress_mac,
    )
    responder_peer = ParagonPeer(
        remote=responder.remote,
        privkey=responder.privkey,
        connection=responder_connection,
        context=ParagonContext(),
    )
    responder_peer.base_protocol.send_handshake()

    # The handshake msgs sent by each peer (above) are going to be fed directly into their remote's
    # reader, and thus the read_msg() calls will return immediately.
    responder_hello, _ = await responder_peer.read_msg()
    initiator_hello, _ = await initiator_peer.read_msg()

    assert isinstance(responder_hello, Hello)
    assert isinstance(initiator_hello, Hello)
예제 #12
0
async def test_handshake_eip8():
    cancel_token = CancelToken("test_handshake_eip8")
    use_eip8 = True
    initiator_remote = kademlia.Node(
        keys.PrivateKey(eip8_values['receiver_private_key']).public_key,
        kademlia.Address('0.0.0.0', 0, 0))
    initiator = HandshakeInitiator(
        initiator_remote,
        keys.PrivateKey(eip8_values['initiator_private_key']),
        use_eip8,
        cancel_token)
    initiator.ephemeral_privkey = keys.PrivateKey(eip8_values['initiator_ephemeral_private_key'])

    responder_remote = kademlia.Node(
        keys.PrivateKey(eip8_values['initiator_private_key']).public_key,
        kademlia.Address('0.0.0.0', 0, 0))
    responder = HandshakeResponder(
        responder_remote,
        keys.PrivateKey(eip8_values['receiver_private_key']),
        use_eip8,
        cancel_token)
    responder.ephemeral_privkey = keys.PrivateKey(eip8_values['receiver_ephemeral_private_key'])

    auth_init_ciphertext = eip8_values['auth_init_ciphertext']

    # Check that we can decrypt/decode the EIP-8 auth init message.
    initiator_ephemeral_pubkey, initiator_nonce, _ = decode_authentication(
        auth_init_ciphertext, responder.privkey)
    assert initiator_nonce == eip8_values['initiator_nonce']
    assert initiator_ephemeral_pubkey == (
        keys.PrivateKey(eip8_values['initiator_ephemeral_private_key']).public_key)

    responder_nonce = eip8_values['receiver_nonce']
    auth_ack_ciphertext = eip8_values['auth_ack_ciphertext']
    aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
        initiator_nonce, responder_nonce, initiator_ephemeral_pubkey, auth_init_ciphertext,
        auth_ack_ciphertext)

    # Check that the secrets derived by the responder match the expected values.
    assert aes_secret == eip8_values['expected_aes_secret']
    assert mac_secret == eip8_values['expected_mac_secret']

    # Also according to https://github.com/ethereum/EIPs/blob/master/EIPS/eip-8.md, running B's
    # ingress-mac keccak state on the string "foo" yields the following hash:
    ingress_mac_copy = ingress_mac.copy()
    ingress_mac_copy.update(b'foo')
    assert ingress_mac_copy.hexdigest() == (
        '0c7ec6340062cc46f5e9f1e3cf86f8c8c403c5a0964f5df0ebd34a75ddc86db5')

    responder_ephemeral_pubkey, responder_nonce = initiator.decode_auth_ack_message(
        auth_ack_ciphertext)
    (initiator_aes_secret,
     initiator_mac_secret,
     initiator_egress_mac,
     initiator_ingress_mac) = initiator.derive_secrets(
        initiator_nonce, responder_nonce, responder_ephemeral_pubkey, auth_init_ciphertext,
        auth_ack_ciphertext)

    # Check that the secrets derived by the initiator match the expected values.
    assert initiator_aes_secret == eip8_values['expected_aes_secret']
    assert initiator_mac_secret == eip8_values['expected_mac_secret']

    # Finally, check that two Peers configured with the secrets generated above understand each
    # other.
    (
        (responder_reader, responder_writer),
        (initiator_reader, initiator_writer),
    ) = get_directly_connected_streams()

    initiator_connection = PeerConnection(
        reader=initiator_reader,
        writer=initiator_writer,
        aes_secret=initiator_aes_secret,
        mac_secret=initiator_mac_secret,
        egress_mac=initiator_egress_mac,
        ingress_mac=initiator_ingress_mac
    )
    initiator_peer = ParagonPeer(
        remote=initiator.remote,
        privkey=initiator.privkey,
        connection=initiator_connection,
        context=ParagonContext(),
    )
    initiator_peer.base_protocol.send_handshake()
    responder_connection = PeerConnection(
        reader=responder_reader,
        writer=responder_writer,
        aes_secret=aes_secret,
        mac_secret=mac_secret,
        egress_mac=egress_mac,
        ingress_mac=ingress_mac,
    )
    responder_peer = ParagonPeer(
        remote=responder.remote,
        privkey=responder.privkey,
        connection=responder_connection,
        context=ParagonContext(),
    )
    responder_peer.base_protocol.send_handshake()

    # The handshake msgs sent by each peer (above) are going to be fed directly into their remote's
    # reader, and thus the read_msg() calls will return immediately.
    responder_hello, _ = await responder_peer.read_msg()
    initiator_hello, _ = await initiator_peer.read_msg()

    assert isinstance(responder_hello, Hello)
    assert isinstance(initiator_hello, Hello)
예제 #13
0
async def test_server_incoming_connection(server, receiver_remote):
    use_eip8 = False
    initiator = HandshakeInitiator(receiver_remote, INITIATOR_PRIVKEY,
                                   use_eip8)
    initiator_remote = NodeFactory(
        pubkey=INITIATOR_PUBKEY,
        address__ip='127.0.0.1',
    )
    for num_retries in range(10):
        # The server isn't listening immediately so we give it a short grace
        # period while trying to connect.
        try:
            reader, writer = await initiator.connect()
        except ConnectionRefusedError:
            await asyncio.sleep(0 + 0.001 * num_retries)
        else:
            break
    else:
        raise AssertionError("Unable to connect within 10 loops")
    # Send auth init message to the server, then read and decode auth ack
    aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake(
        initiator, reader, writer)

    transport = Transport(
        remote=receiver_remote,
        private_key=initiator.privkey,
        reader=reader,
        writer=writer,
        aes_secret=aes_secret,
        mac_secret=mac_secret,
        egress_mac=egress_mac,
        ingress_mac=ingress_mac,
    )

    factory = ParagonPeerFactory(
        initiator.privkey,
        context=ParagonContext(),
    )
    handshakers = await factory.get_handshakers()
    devp2p_handshake_params = DevP2PHandshakeParamsFactory(
        listen_port=initiator_remote.address.tcp_port, )

    multiplexer, devp2p_receipt, protocol_receipts = await negotiate_protocol_handshakes(
        transport=transport,
        p2p_handshake_params=devp2p_handshake_params,
        protocol_handshakers=handshakers,
    )
    connection = Connection(
        multiplexer=multiplexer,
        devp2p_receipt=devp2p_receipt,
        protocol_receipts=protocol_receipts,
        is_dial_out=False,
    )
    initiator_peer = factory.create_peer(connection=connection)

    # wait for peer to be processed
    for _ in range(100):
        if len(server.peer_pool) > 0:
            break
        await asyncio.sleep(0)

    assert len(server.peer_pool.connected_nodes) == 1
    receiver_peer = list(server.peer_pool.connected_nodes.values())[0]
    assert isinstance(receiver_peer, ParagonPeer)
    assert initiator_peer.sub_proto is not None
    assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name
    assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version
    assert initiator_peer.remote.pubkey == RECEIVER_PRIVKEY.public_key

    # Our connections are created manually and we don't call run() on them, so we need to stop the
    # background streaming task or else we'll get asyncio warnings about task exceptions not being
    # retrieved.
    await initiator_peer.connection._multiplexer.stop_streaming()
    await receiver_peer.connection._multiplexer.stop_streaming()