async def test_peer_pool_answers_connect_commands(event_loop, event_bus, server, receiver_remote): # This is the PeerPool which will accept our message and try to connect to {server} initiator_peer_pool = ParagonPeerPool( privkey=INITIATOR_PRIVKEY, context=ParagonContext(), event_bus=event_bus, ) asyncio.ensure_future(initiator_peer_pool.run(), loop=event_loop) await initiator_peer_pool.events.started.wait() async with run_peer_pool_event_server( event_bus, initiator_peer_pool, ): assert len(server.peer_pool.connected_nodes) == 0 await event_bus.wait_until_any_endpoint_subscribed_to(ConnectToNodeCommand) await event_bus.broadcast( ConnectToNodeCommand(receiver_remote), TO_NETWORKING_BROADCAST_CONFIG ) # This test was maybe 30% flaky at 0.1 sleep await asyncio.sleep(0.2) assert len(server.peer_pool.connected_nodes) == 1 await initiator_peer_pool.cancel()
async def test_peer_pool_connect(monkeypatch, event_loop, server, receiver_remote): started_peers = [] async def mock_start_peer(peer): nonlocal started_peers started_peers.append(peer) monkeypatch.setattr(server.peer_pool, 'start_peer', mock_start_peer) initiator_peer_pool = ParagonPeerPool( privkey=INITIATOR_PRIVKEY, context=ParagonContext(), ) nodes = [receiver_remote] asyncio.ensure_future(initiator_peer_pool.run(), loop=event_loop) await initiator_peer_pool.events.started.wait() await initiator_peer_pool.connect_to_nodes(nodes) # Give the receiver_server a chance to ack the handshake. await asyncio.sleep(0.2) assert len(started_peers) == 1 assert len(initiator_peer_pool.connected_nodes) == 1 # Stop our peer to make sure its pending asyncio tasks are cancelled. await list(initiator_peer_pool.connected_nodes.values())[0].cancel() await initiator_peer_pool.cancel()
def _make_peer_pool(self): return ParagonPeerPool( privkey=self.privkey, context=ParagonContext(), token=self.cancel_token, event_bus=self.event_bus, )
async def test_peer_pool_answers_connect_commands(event_bus, server, receiver_remote): # This is the PeerPool which will accept our message and try to connect to {server} initiator_peer_pool = ParagonPeerPool( privkey=INITIATOR_PRIVKEY, context=ParagonContext(), event_bus=event_bus, ) async with background_asyncio_service(initiator_peer_pool) as manager: await manager.wait_started() async with run_peer_pool_event_server(event_bus, initiator_peer_pool): assert len(server.peer_pool.connected_nodes) == 0 await event_bus.wait_until_any_endpoint_subscribed_to(ConnectToNodeCommand) await event_bus.broadcast( ConnectToNodeCommand(receiver_remote), TO_NETWORKING_BROADCAST_CONFIG ) # This test was maybe 30% flaky at 0.1 sleep, so wait in a loop. for _ in range(5): await asyncio.sleep(0.1) if len(server.peer_pool.connected_nodes) == 1: break else: assert len(server.peer_pool.connected_nodes) == 1
async def test_peer_pool_connect(monkeypatch, server, receiver_remote): peer_started = asyncio.Event() start_peer = server.peer_pool.start_peer async def mock_start_peer(peer): # Call the original start_peer() so that we create and run Peer/Connection objects, # which will ensure everything is cleaned up properly. await start_peer(peer) peer_started.set() monkeypatch.setattr(server.peer_pool, 'start_peer', mock_start_peer) initiator_peer_pool = ParagonPeerPool( privkey=INITIATOR_PRIVKEY, context=ParagonContext(), ) nodes = [receiver_remote] async with background_asyncio_service(initiator_peer_pool) as manager: await manager.wait_started() await initiator_peer_pool.connect_to_nodes(nodes) await asyncio.wait_for(peer_started.wait(), timeout=10) assert len(initiator_peer_pool.connected_nodes) == 1
async def test_peer_pool_answers_connect_commands(event_loop, event_bus, server): # This is the PeerPool which will accept our message and try to connect to {server} initiator_peer_pool = ParagonPeerPool( privkey=INITIATOR_PRIVKEY, context=ParagonContext(), event_bus=event_bus, ) asyncio.ensure_future(initiator_peer_pool.run(), loop=event_loop) await initiator_peer_pool.events.started.wait() assert len(server.peer_pool.connected_nodes) == 0 event_bus.broadcast(ConnectToNodeCommand(RECEIVER_REMOTE.uri()), TO_NETWORKING_BROADCAST_CONFIG) await asyncio.sleep(0.5) assert len(server.peer_pool.connected_nodes) == 1 await initiator_peer_pool.cancel()
async def test_peer_pool_connect(monkeypatch, server, receiver_remote): receiving_peer_pool = server.peer_pool peer_started = asyncio.Event() add_inbound_peer = receiving_peer_pool.add_inbound_peer async def mock_add_inbound_peer(peer): # Call the original add_inbound_peer() so that we create and run Peer/Connection objects, # which will ensure everything is cleaned up properly. await add_inbound_peer(peer) peer_started.set() monkeypatch.setattr(receiving_peer_pool, 'add_inbound_peer', mock_add_inbound_peer) broadcast_msg_buffer = BroadcastMsgCollector() receiving_peer_pool.subscribe(broadcast_msg_buffer) initiator_peer_pool = ParagonPeerPool( privkey=INITIATOR_PRIVKEY, context=ParagonContext(), ) nodes = [receiver_remote] async with background_asyncio_service(initiator_peer_pool) as manager: await manager.wait_started() await initiator_peer_pool.connect_to_nodes(nodes) await asyncio.wait_for(peer_started.wait(), timeout=2) assert len(initiator_peer_pool.connected_nodes) == 1 peer = list(initiator_peer_pool.connected_nodes.values())[0] receiving_peer = list(receiving_peer_pool.connected_nodes.values())[0] # Once our peer is running, it will start streaming messages, which will be stored in our # msg buffer. assert receiving_peer.connection.is_streaming_messages peer.connection.get_logic(ParagonAPI.name, ParagonAPI).send_broadcast_data(b'data') msg = await asyncio.wait_for(broadcast_msg_buffer.msg_queue.get(), timeout=0.5) assert msg.command.payload.data == b'data'