async def test_tx_sending(request, event_loop): # This test covers the communication end to end whereas the previous # focusses on the rules of the transaction pool on when to send tx to whom peer1, peer2 = await get_directly_linked_peers( request, event_loop, peer1_class=ETHPeer, peer2_class=ETHPeer, ) peer2_subscriber = asyncio.Queue() peer2.add_subscriber(peer2_subscriber) pool = TxPool(MockPeerPoolWithConnectedPeers([peer1, peer2])) asyncio.ensure_future(pool.run()) def finalizer(): event_loop.run_until_complete(pool.cancel()) request.addfinalizer(finalizer) txs = [create_random_tx()] peer1.sub_proto.send_transactions(txs) # Ensure that peer2 gets the transactions peer, cmd, msg = await asyncio.wait_for( peer2_subscriber.get(), timeout=0.1, ) assert peer == peer2 assert isinstance(cmd, Transactions) assert msg[0].hash == txs[0].hash
async def test_syncer_proposing(request, event_loop): # setup a-b topology peer_a_b, peer_b_a = await get_directly_linked_sharding_peers( request, event_loop) peer_a_b_subscriber = asyncio.Queue() peer_a_b.add_subscriber(peer_a_b_subscriber) peer_pool_b = MockPeerPoolWithConnectedPeers([peer_b_a]) # setup shard dbs at b shard_db = ShardDB(MemoryDB()) shard = Shard(shard_db, 0) # start shard syncer syncer = ShardSyncer(shard, peer_pool_b) asyncio.ensure_future(syncer.run()) def finalizer(): event_loop.run_until_complete(syncer.cancel()) request.addfinalizer(finalizer) # propose at b and check that it announces its proposal await syncer.propose() peer, cmd, msg = await asyncio.wait_for( peer_a_b_subscriber.get(), timeout=1, ) assert peer == peer_a_b assert isinstance(cmd, NewCollationHashes) assert len(msg["collation_hashes_and_periods"]) == 1 proposed_hash = msg["collation_hashes_and_periods"][0][0] # test that the collation has been added to the shard shard.get_collation_by_hash(proposed_hash)
async def test_tx_propagation(monkeypatch, request, event_loop): peer1, peer2 = await get_directly_linked_peers( request, event_loop, peer1_class=ETHPeer, peer2_class=ETHPeer, ) # We intercept sub_proto.send_transactions to record detailed information # about which peer received what and was invoked how often. peer1_txs_recorder = create_tx_recorder(monkeypatch, peer1) peer2_txs_recorder = create_tx_recorder(monkeypatch, peer2) pool = TxPool(MockPeerPoolWithConnectedPeers([peer1, peer2])) asyncio.ensure_future(pool.run()) def finalizer(): event_loop.run_until_complete(pool.cancel()) request.addfinalizer(finalizer) txs_broadcasted_by_peer1 = [create_random_tx()] # Peer1 sends some txs await pool._handle_tx(peer1, txs_broadcasted_by_peer1) # Check that we don't send the txs back to peer1 where they came from assert peer1_txs_recorder.send_count == 0 # Check that Peer2 receives them assert len(peer2_txs_recorder.recorded_tx) == 1 assert peer2_txs_recorder.recorded_tx[0].hash == txs_broadcasted_by_peer1[ 0].hash # Peer1 sends same txs again await pool._handle_tx(peer1, txs_broadcasted_by_peer1) # Check that Peer2 doesn't receive them again assert peer2_txs_recorder.send_count == 1 # Peer2 sends exact same txs back await pool._handle_tx(peer2, txs_broadcasted_by_peer1) # Check that Peer1 won't get them as that is where they originally came from assert len(peer1_txs_recorder.recorded_tx) == 0 # Also ensure, we don't even call send_transactions with an empty tx list assert peer1_txs_recorder.send_count == 0 # Peer2 sends old + new tx txs_broadcasted_by_peer2 = [ create_random_tx(), txs_broadcasted_by_peer1[0] ] await pool._handle_tx(peer2, txs_broadcasted_by_peer2) # Check that Peer1 receives only the one tx that it didn't know about assert len(peer1_txs_recorder.recorded_tx) == 1 assert peer1_txs_recorder.recorded_tx[0].hash == txs_broadcasted_by_peer2[ 0].hash assert peer1_txs_recorder.send_count == 1
async def test_syncer_requests_new_collations(request, event_loop): # setup a-b topology peer_a_b, peer_b_a = await get_directly_linked_sharding_peers( request, event_loop) peer_a_b_subscriber = asyncio.Queue() peer_a_b.add_subscriber(peer_a_b_subscriber) peer_pool_b = MockPeerPoolWithConnectedPeers([peer_b_a]) # setup shard dbs at b shard_db = ShardDB(MemoryDB()) shard = Shard(shard_db, 0) # start shard syncer syncer = ShardSyncer(shard, peer_pool_b) asyncio.ensure_future(syncer.run()) def finalizer(): event_loop.run_until_complete(syncer.cancel()) request.addfinalizer(finalizer) # notify b about new hashes at a and check that it requests them hashes_and_periods = ((b"\xaa" * 32, 0), ) peer_a_b.sub_proto.send_new_collation_hashes(hashes_and_periods) peer, cmd, msg = await asyncio.wait_for( peer_a_b_subscriber.get(), timeout=1, ) assert peer == peer_a_b assert isinstance(cmd, GetCollations) assert msg["collation_hashes"] == (hashes_and_periods[0][0], )
async def test_shard_syncer(connections, request, event_loop): peers_by_server = {} for server_id1, server_id2 in connections: peer1, peer2 = await get_directly_linked_sharding_peers( request, event_loop) peers_by_server.setdefault(server_id1, []).append(peer1) peers_by_server.setdefault(server_id2, []).append(peer2) syncers = [] for _, peers in sorted(peers_by_server.items()): peer_pool = MockPeerPoolWithConnectedPeers(peers) shard_db = ShardDB(MemoryDB()) syncer = ShardSyncer(Shard(shard_db, 0), peer_pool) syncers.append(syncer) asyncio.ensure_future(syncer.run()) def finalizer(): event_loop.run_until_complete( asyncio.gather(*[syncer.cancel() for syncer in syncers])) request.addfinalizer(finalizer) # let each node propose and check that collation appears at all other nodes for proposer in syncers: collation = proposer.propose() await asyncio.wait_for(asyncio.gather(*[ syncer.collations_received_event.wait() for syncer in syncers if syncer != proposer ]), timeout=2) for syncer in syncers: assert syncer.shard.get_collation_by_hash( collation.hash) == collation
async def bootstrap_test_setup(monkeypatch, request, event_loop, chain, tx_validator): peer1, peer2 = await get_directly_linked_peers( request, event_loop, peer1_class=ETHPeer, peer2_class=ETHPeer, ) # We intercept sub_proto.send_transactions to record detailed information # about which peer received what and was invoked how often. peer1_txs_recorder = create_tx_recorder(monkeypatch, peer1) peer2_txs_recorder = create_tx_recorder(monkeypatch, peer2) pool = TxPool( MockPeerPoolWithConnectedPeers([peer1, peer2]), tx_validator ) asyncio.ensure_future(pool.run()) def finalizer(): event_loop.run_until_complete(pool.cancel()) request.addfinalizer(finalizer) return peer1, peer1_txs_recorder, peer2, peer2_txs_recorder, pool
async def test_new_collations_notification(request, event_loop): # setup a-b-c topology peer_a_b, peer_b_a = await get_directly_linked_sharding_peers( request, event_loop) peer_b_c, peer_c_b = await get_directly_linked_sharding_peers( request, event_loop) peer_c_b_subscriber = asyncio.Queue() peer_c_b.add_subscriber(peer_c_b_subscriber) peer_pool_b = MockPeerPoolWithConnectedPeers([peer_b_a, peer_b_c]) # setup shard dbs at b shard_db = ShardDB(MemoryDB()) shard = Shard(shard_db, 0) # start shard syncer syncer = ShardSyncer(shard, peer_pool_b) asyncio.ensure_future(syncer.run()) def finalizer(): event_loop.run_until_complete(syncer.cancel()) request.addfinalizer(finalizer) # send collation from a to b and check that c gets notified c1 = next(collations) peer_a_b.sub_proto.send_collations(0, [c1]) peer, cmd, msg = await asyncio.wait_for( peer_c_b_subscriber.get(), timeout=1, ) assert peer == peer_c_b assert isinstance(cmd, NewCollationHashes) assert msg["collation_hashes_and_periods"] == ((c1.hash, c1.period), ) # check that c won't be notified about c1 again c2 = next(collations) peer_a_b.sub_proto.send_collations(0, [c1, c2]) peer, cmd, msg = await asyncio.wait_for( peer_c_b_subscriber.get(), timeout=1, ) assert peer == peer_c_b assert isinstance(cmd, NewCollationHashes) assert msg["collation_hashes_and_periods"] == ((c2.hash, c2.period), )
async def test_collation_requests(request, event_loop): # setup two peers sender, receiver = await get_directly_linked_sharding_peers( request, event_loop) receiver_peer_pool = MockPeerPoolWithConnectedPeers([receiver]) # setup shard db for request receiving node receiver_db = ShardDB(MemoryDB()) receiver_shard = Shard(receiver_db, 0) # create three collations and add two to the shard of the receiver # body is shared to avoid unnecessary chunk root calculation body = zpad_right(b"body", COLLATION_SIZE) chunk_root = calc_chunk_root(body) c1 = Collation( CollationHeader(0, chunk_root, 0, zpad_right(b"proposer1", 20)), body) c2 = Collation( CollationHeader(0, chunk_root, 1, zpad_right(b"proposer2", 20)), body) c3 = Collation( CollationHeader(0, chunk_root, 2, zpad_right(b"proposer3", 20)), body) for collation in [c1, c2]: receiver_shard.add_collation(collation) # start shard syncer receiver_syncer = ShardSyncer(receiver_shard, receiver_peer_pool) asyncio.ensure_future(receiver_syncer.run()) def finalizer(): event_loop.run_until_complete(receiver_syncer.cancel()) request.addfinalizer(finalizer) cancel_token = CancelToken("test") # request single collation received_collations = await asyncio.wait_for( sender.get_collations([c1.hash], cancel_token), timeout=1, ) assert received_collations == set([c1]) # request multiple collations received_collations = await asyncio.wait_for( sender.get_collations([c1.hash, c2.hash], cancel_token), timeout=1, ) assert received_collations == set([c1, c2]) # request no collations received_collations = await asyncio.wait_for( sender.get_collations([], cancel_token), timeout=1, ) assert received_collations == set() # request unknown collation received_collations = await asyncio.wait_for( sender.get_collations([c3.hash], cancel_token), timeout=1, ) assert received_collations == set() # request multiple collations, including unknown one received_collations = await asyncio.wait_for( sender.get_collations([c1.hash, c2.hash, c3.hash], cancel_token), timeout=1, ) assert received_collations == set([c1, c2])