async def test_reorg_sync(request, event_loop, event_bus, genesis_state): genesis_block = SignedBeaconBlockFactory( message__state_root=genesis_state.hash_tree_root) alice_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=49, root=genesis_block, message__state_root=b"\x11" * 32) bob_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=99, root=genesis_block, message__state_root=b"\x22" * 32) async with get_sync_setup(request, event_loop, event_bus, genesis_state, alice_branch, bob_branch) as (alice, bob): assert_synced(alice, bob, bob_branch)
async def test_sync_from_old_head(request, event_loop, event_bus, genesis_state): genesis_block = SignedBeaconBlockFactory( message__state_root=genesis_state.hash_tree_root) alice_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=49, root=genesis_block) bob_branch = alice_branch + SignedBeaconBlockFactory.create_branch( length=50, root=alice_branch[-1]) async with get_sync_setup(request, event_loop, event_bus, genesis_state, alice_branch, bob_branch) as (alice, bob): assert_synced(alice, bob, bob_branch)
async def test_request_beacon_blocks_by_root(monkeypatch): async with ConnectionPairFactory() as (alice, bob): blocks = SignedBeaconBlockFactory.create_branch(5) mock_root_to_block_db = {block.signing_root: block for block in blocks} def get_block_by_root(root): validate_word(root) if root in mock_root_to_block_db: return mock_root_to_block_db[root] else: raise BlockNotFound monkeypatch.setattr(bob.chain, "get_block_by_root", get_block_by_root) requesting_block_roots = [ blocks[0].signing_root, b"\x12" * 32, # Unknown block root blocks[1].signing_root, b"\x23" * 32, # Unknown block root blocks[3].signing_root, ] requested_blocks = await alice.request_beacon_blocks_by_root( peer_id=bob.peer_id, block_roots=requesting_block_roots ) expected_blocks = [blocks[0], blocks[1], blocks[3]] assert len(requested_blocks) == len(expected_blocks) assert set(requested_blocks) == set(expected_blocks)
async def test_bcc_receive_server_process_received_block(receive_server, monkeypatch): block_not_orphan, block_orphan = SignedBeaconBlockFactory.create_branch(2) # test: if the block is an orphan, puts it in the orphan pool receive_server._process_received_block(block_orphan) assert ( receive_server.orphan_block_pool.get(block_orphan.message.hash_tree_root) == block_orphan ) # test: should returns `False` if `ValidationError` occurs. def import_block_raises_validation_error(block, performa_validation=True): raise ValidationError with monkeypatch.context() as m: m.setattr( receive_server.chain, "import_block", import_block_raises_validation_error ) receive_server._process_received_block(block_not_orphan) assert not receive_server._is_block_root_in_db( block_not_orphan.message.hash_tree_root ) # test: successfully imported the block, calls `self._try_import_orphan_blocks` event = asyncio.Event() def _try_import_orphan_blocks(parent_root): event.set() with monkeypatch.context() as m: m.setattr( receive_server, "_try_import_orphan_blocks", _try_import_orphan_blocks ) receive_server._process_received_block(block_not_orphan) assert event.is_set()
async def test_sync_when_already_at_best_head(request, event_loop, event_bus, genesis_state): genesis_block = SignedBeaconBlockFactory( message__state_root=genesis_state.hash_tree_root) alice_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=99, root=genesis_block, message__state_root=b"\x11" * 32) bob_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=50, root=genesis_block, message__state_root=b"\x22" * 32) async with get_sync_setup(request, event_loop, event_bus, genesis_state, alice_branch, bob_branch) as (alice, bob): alice_head = alice.chain.get_canonical_head() assert alice_head.slot == 99 for correct_block in alice_branch: slot = correct_block.slot alice_block = alice.chain.get_canonical_block_by_slot(slot) assert alice_block == correct_block
async def test_sync_skipped_slots(request, event_loop, event_bus, genesis_state): genesis_block = SignedBeaconBlockFactory( message__state_root=genesis_state.hash_tree_root) alice_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=0, root=genesis_block) bob_branch = ( genesis_block, ) + SignedBeaconBlockFactory.create_branch_by_slots( slots=tuple(range(4, 99)), root=genesis_block) assert bob_branch[0].slot == 0 assert bob_branch[1].slot == 4 async with get_sync_setup(request, event_loop, event_bus, genesis_state, alice_branch, bob_branch) as (alice, bob): assert_synced(alice, bob, bob_branch)
async def test_bcc_receive_server_try_import_orphan_blocks(receive_server): blocks = SignedBeaconBlockFactory.create_branch(4) assert not receive_server._is_block_root_in_db(blocks[0].message.hash_tree_root) receive_server.chain.import_block(blocks[0]) assert receive_server._is_block_root_in_db(blocks[0].message.hash_tree_root) # test: block without its parent in db should not be imported, and it should be put in the # `orphan_block_pool`. receive_server.orphan_block_pool.add(blocks[2]) # test: No effect when calling `_try_import_orphan_blocks` # if the `parent_root` is not in db. assert blocks[2].parent_root == blocks[1].message.hash_tree_root receive_server._try_import_orphan_blocks(blocks[2].parent_root) assert not receive_server._is_block_root_in_db(blocks[2].parent_root) assert not receive_server._is_block_root_in_db(blocks[2].message.hash_tree_root) assert receive_server._is_block_root_in_orphan_block_pool( blocks[2].message.hash_tree_root ) receive_server.orphan_block_pool.add(blocks[3]) # test: No effect when calling `_try_import_orphan_blocks` if `parent_root` is in the pool # but not in db. assert blocks[3].parent_root == blocks[2].message.hash_tree_root receive_server._try_import_orphan_blocks(blocks[2].message.hash_tree_root) assert not receive_server._is_block_root_in_db(blocks[2].message.hash_tree_root) assert not receive_server._is_block_root_in_db(blocks[3].message.hash_tree_root) assert receive_server._is_block_root_in_orphan_block_pool( blocks[3].message.hash_tree_root ) # test: a successfully imported parent is present, its children should be processed # recursively. receive_server.chain.import_block(blocks[1]) receive_server._try_import_orphan_blocks(blocks[1].message.hash_tree_root) assert receive_server._is_block_root_in_db(blocks[1].message.hash_tree_root) assert receive_server._is_block_root_in_db(blocks[2].message.hash_tree_root) assert receive_server._is_block_root_in_db(blocks[3].message.hash_tree_root) assert not receive_server._is_block_root_in_orphan_block_pool( blocks[2].message.hash_tree_root ) assert not receive_server._is_block_root_in_orphan_block_pool( blocks[3].message.hash_tree_root )
async def test_bcc_receive_server_handle_orphan_block_loop( receive_server_with_mock_process_orphan_blocks_period, monkeypatch ): receive_server = receive_server_with_mock_process_orphan_blocks_period # block dependency graph # block 1 -- block 2 -- block 3 -- block 4 -- block 5 # | \ # | block 3' # block 3'' # # block 5, 3' and 3'' are orphan blocks # # First iteration will request block 4 and block 2 and import block 2, block 3' and block 3'', # second iteration will request block 3 and import block 3, block 4 and block 5. blocks = SignedBeaconBlockFactory.create_branch(5) fork_blocks = ( blocks[2].transform(("message", "state_root"), b"\x01" * 32), blocks[2].transform(("message", "state_root"), b"\x12" * 32), ) mock_peer_1_db = {block.message.hash_tree_root: block for block in blocks[3:]} mock_peer_2_db = {block.message.hash_tree_root: block for block in blocks[:3]} receive_server.chain.import_block(blocks[0]) peer1 = PeerFactory(node=receive_server.p2p_node) peer2 = PeerFactory(node=receive_server.p2p_node) peer_1_called_event = asyncio.Event() peer_2_called_event = asyncio.Event() async def request_beacon_blocks_by_root(peer_id, block_roots): requested_blocks = [] db = {} if peer_id == peer1._id: db = mock_peer_1_db peer_1_called_event.set() elif peer_id == peer2._id: db = mock_peer_2_db peer_2_called_event.set() for block_root in block_roots: if block_root in db: requested_blocks.append(db[block_root]) return requested_blocks with monkeypatch.context() as m: for orphan_block in (blocks[4],) + fork_blocks: receive_server.orphan_block_pool.add(orphan_block) await wait_until_true( lambda: len(receive_server.orphan_block_pool) != 0, timeout=4 ) for peer in (peer1, peer2): receive_server.p2p_node.handshaked_peers.add(peer) m.setattr( receive_server.p2p_node, "request_beacon_blocks_by_root", request_beacon_blocks_by_root, ) # Wait for receive server to process the orphan blocks await wait_until_true( lambda: len(receive_server.orphan_block_pool) == 0, timeout=4 ) # Check that both peers were requested for blocks assert peer_1_called_event.is_set() assert peer_2_called_event.is_set() # Check that all blocks are processed and no more orphan blocks for block in blocks + fork_blocks: assert receive_server._is_block_root_in_db(block.message.hash_tree_root)