def test_orphan_block_pool(): pool = OrphanBlockPool() b0 = SignedBeaconBlockFactory() b1 = SignedBeaconBlockFactory(parent=b0) b2 = SignedBeaconBlockFactory(parent=b0, message__state_root=b"\x11" * 32) # test: add pool.add(b1) assert b1 in pool._pool assert len(pool._pool) == 1 # test: add: no side effect for adding twice pool.add(b1) assert len(pool._pool) == 1 # test: `__contains__` assert b1 in pool assert b1.message.hash_tree_root in pool print(b2 not in pool) assert b2 not in pool assert b2.message.hash_tree_root not in pool # test: add: two blocks pool.add(b2) assert len(pool._pool) == 2 # test: get assert pool.get(b1.message.hash_tree_root) == b1 assert pool.get(b2.message.hash_tree_root) == b2 # test: pop_children b2_children = pool.pop_children(b2.message.hash_tree_root) assert len(b2_children) == 0 assert len(pool._pool) == 2 b0_children = pool.pop_children(b0.message.hash_tree_root) assert len(b0_children) == 2 and (b1 in b0_children) and (b2 in b0_children) assert len(pool._pool) == 0
async def test_reorg_sync(request, event_loop, event_bus, genesis_state): genesis_block = SignedBeaconBlockFactory( message__state_root=genesis_state.hash_tree_root) alice_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=49, root=genesis_block, message__state_root=b"\x11" * 32) bob_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=99, root=genesis_block, message__state_root=b"\x22" * 32) async with get_sync_setup(request, event_loop, event_bus, genesis_state, alice_branch, bob_branch) as (alice, bob): assert_synced(alice, bob, bob_branch)
async def test_sync_from_old_head(request, event_loop, event_bus, genesis_state): genesis_block = SignedBeaconBlockFactory( message__state_root=genesis_state.hash_tree_root) alice_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=49, root=genesis_block) bob_branch = alice_branch + SignedBeaconBlockFactory.create_branch( length=50, root=alice_branch[-1]) async with get_sync_setup(request, event_loop, event_bus, genesis_state, alice_branch, bob_branch) as (alice, bob): assert_synced(alice, bob, bob_branch)
async def test_sync_skipped_slots(request, event_loop, event_bus, genesis_state): genesis_block = SignedBeaconBlockFactory( message__state_root=genesis_state.hash_tree_root) alice_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=0, root=genesis_block) bob_branch = ( genesis_block, ) + SignedBeaconBlockFactory.create_branch_by_slots( slots=tuple(range(4, 99)), root=genesis_block) assert bob_branch[0].slot == 0 assert bob_branch[1].slot == 4 async with get_sync_setup(request, event_loop, event_bus, genesis_state, alice_branch, bob_branch) as (alice, bob): assert_synced(alice, bob, bob_branch)
async def test_request_beacon_blocks_by_root(monkeypatch): async with ConnectionPairFactory() as (alice, bob): blocks = SignedBeaconBlockFactory.create_branch(5) mock_root_to_block_db = {block.signing_root: block for block in blocks} def get_block_by_root(root): validate_word(root) if root in mock_root_to_block_db: return mock_root_to_block_db[root] else: raise BlockNotFound monkeypatch.setattr(bob.chain, "get_block_by_root", get_block_by_root) requesting_block_roots = [ blocks[0].signing_root, b"\x12" * 32, # Unknown block root blocks[1].signing_root, b"\x23" * 32, # Unknown block root blocks[3].signing_root, ] requested_blocks = await alice.request_beacon_blocks_by_root( peer_id=bob.peer_id, block_roots=requesting_block_roots ) expected_blocks = [blocks[0], blocks[1], blocks[3]] assert len(requested_blocks) == len(expected_blocks) assert set(requested_blocks) == set(expected_blocks)
async def test_get_blocks_from_fork_chain_by_root(fork_chain_block_slots, slot_of_requested_blocks, expected_block_slots): fork_chain_blocks = SignedBeaconBlockFactory.create_branch_by_slots( fork_chain_block_slots) mock_root_to_block_db = { block.message.hash_tree_root: block for block in fork_chain_blocks } class Chain: def get_block_by_root(self, root): if root in mock_root_to_block_db: return mock_root_to_block_db[root] else: raise BlockNotFound requested_blocks = get_blocks_from_fork_chain_by_root( chain=Chain(), start_slot=slot_of_requested_blocks[0], peer_head_block=fork_chain_blocks[-1], slot_of_requested_blocks=slot_of_requested_blocks, ) expected_blocks = [ block for block in fork_chain_blocks if block.slot in expected_block_slots ] assert len(requested_blocks) == len(expected_blocks) assert set(requested_blocks) == set(expected_blocks)
async def test_bcc_receive_server_process_received_block(receive_server, monkeypatch): block_not_orphan, block_orphan = SignedBeaconBlockFactory.create_branch(2) # test: if the block is an orphan, puts it in the orphan pool receive_server._process_received_block(block_orphan) assert ( receive_server.orphan_block_pool.get(block_orphan.message.hash_tree_root) == block_orphan ) # test: should returns `False` if `ValidationError` occurs. def import_block_raises_validation_error(block, performa_validation=True): raise ValidationError with monkeypatch.context() as m: m.setattr( receive_server.chain, "import_block", import_block_raises_validation_error ) receive_server._process_received_block(block_not_orphan) assert not receive_server._is_block_root_in_db( block_not_orphan.message.hash_tree_root ) # test: successfully imported the block, calls `self._try_import_orphan_blocks` event = asyncio.Event() def _try_import_orphan_blocks(parent_root): event.set() with monkeypatch.context() as m: m.setattr( receive_server, "_try_import_orphan_blocks", _try_import_orphan_blocks ) receive_server._process_received_block(block_not_orphan) assert event.is_set()
async def get_fake_chain() -> FakeChain: genesis_config = Eth2GenesisConfig(MINIMAL_SERENITY_CONFIG) chain_db = AsyncBeaconChainDBFactory(genesis_config=genesis_config) genesis_block = SignedBeaconBlockFactory() chain_db.persist_block(genesis_block, SerenitySignedBeaconBlock, HigherSlotScoring()) return FakeChain(base_db=chain_db.db, genesis_config=genesis_config)
async def test_sync_when_already_at_best_head(request, event_loop, event_bus, genesis_state): genesis_block = SignedBeaconBlockFactory( message__state_root=genesis_state.hash_tree_root) alice_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=99, root=genesis_block, message__state_root=b"\x11" * 32) bob_branch = (genesis_block, ) + SignedBeaconBlockFactory.create_branch( length=50, root=genesis_block, message__state_root=b"\x22" * 32) async with get_sync_setup(request, event_loop, event_bus, genesis_state, alice_branch, bob_branch) as (alice, bob): alice_head = alice.chain.get_canonical_head() assert alice_head.slot == 99 for correct_block in alice_branch: slot = correct_block.slot alice_block = alice.chain.get_canonical_block_by_slot(slot) assert alice_block == correct_block
async def test_bcc_receive_server_handle_beacon_attestations(receive_server): attestation = Attestation.create() encoded_attestation = ssz.encode(attestation) msg = rpc_pb2.Message( from_id=b"my_id", seqno=b"\x00" * 8, data=encoded_attestation, topicIDs=[PUBSUB_TOPIC_BEACON_ATTESTATION], ) assert attestation not in receive_server.unaggregated_attestation_pool beacon_attestation_queue = receive_server.topic_msg_queues[ PUBSUB_TOPIC_BEACON_ATTESTATION] # Wait for receive server to process the new attestation await beacon_attestation_queue.put(msg) await wait_all_messages_processed(beacon_attestation_queue) # Check that attestation is put to attestation pool assert attestation in receive_server.unaggregated_attestation_pool # Put the attestation in the next block block = SignedBeaconBlockFactory( parent=receive_server.chain.get_canonical_head()) block = block.transform(("message", "body", "attestations"), (attestation, )) encoded_block = ssz.encode(block, SignedBeaconBlock) msg = rpc_pb2.Message( from_id=b"my_id", seqno=b"\x00" * 8, data=encoded_block, topicIDs=[PUBSUB_TOPIC_BEACON_BLOCK], ) beacon_block_queue = receive_server.topic_msg_queues[ PUBSUB_TOPIC_BEACON_BLOCK] # Wait for receive server to process the new block await beacon_block_queue.put(msg) await wait_all_messages_processed(beacon_block_queue) # Check that attestation is removed from attestation pool assert attestation not in receive_server.unaggregated_attestation_pool
async def test_bcc_receive_server_handle_beacon_blocks(receive_server): block = SignedBeaconBlockFactory(parent=receive_server.chain.get_canonical_head()) encoded_block = ssz.encode(block, SignedBeaconBlock) msg = rpc_pb2.Message( from_id=b"my_id", seqno=b"\x00" * 8, data=encoded_block, topicIDs=[PUBSUB_TOPIC_BEACON_BLOCK], ) assert receive_server.chain.get_canonical_head() != block beacon_block_queue = receive_server.topic_msg_queues[PUBSUB_TOPIC_BEACON_BLOCK] await beacon_block_queue.put(msg) await wait_all_messages_processed(beacon_block_queue) assert receive_server.chain.get_canonical_head() == block
async def test_bcc_receive_server_try_import_orphan_blocks(receive_server): blocks = SignedBeaconBlockFactory.create_branch(4) assert not receive_server._is_block_root_in_db(blocks[0].message.hash_tree_root) receive_server.chain.import_block(blocks[0]) assert receive_server._is_block_root_in_db(blocks[0].message.hash_tree_root) # test: block without its parent in db should not be imported, and it should be put in the # `orphan_block_pool`. receive_server.orphan_block_pool.add(blocks[2]) # test: No effect when calling `_try_import_orphan_blocks` # if the `parent_root` is not in db. assert blocks[2].parent_root == blocks[1].message.hash_tree_root receive_server._try_import_orphan_blocks(blocks[2].parent_root) assert not receive_server._is_block_root_in_db(blocks[2].parent_root) assert not receive_server._is_block_root_in_db(blocks[2].message.hash_tree_root) assert receive_server._is_block_root_in_orphan_block_pool( blocks[2].message.hash_tree_root ) receive_server.orphan_block_pool.add(blocks[3]) # test: No effect when calling `_try_import_orphan_blocks` if `parent_root` is in the pool # but not in db. assert blocks[3].parent_root == blocks[2].message.hash_tree_root receive_server._try_import_orphan_blocks(blocks[2].message.hash_tree_root) assert not receive_server._is_block_root_in_db(blocks[2].message.hash_tree_root) assert not receive_server._is_block_root_in_db(blocks[3].message.hash_tree_root) assert receive_server._is_block_root_in_orphan_block_pool( blocks[3].message.hash_tree_root ) # test: a successfully imported parent is present, its children should be processed # recursively. receive_server.chain.import_block(blocks[1]) receive_server._try_import_orphan_blocks(blocks[1].message.hash_tree_root) assert receive_server._is_block_root_in_db(blocks[1].message.hash_tree_root) assert receive_server._is_block_root_in_db(blocks[2].message.hash_tree_root) assert receive_server._is_block_root_in_db(blocks[3].message.hash_tree_root) assert not receive_server._is_block_root_in_orphan_block_pool( blocks[2].message.hash_tree_root ) assert not receive_server._is_block_root_in_orphan_block_pool( blocks[3].message.hash_tree_root )
async def test_get_blocks_from_canonical_chain_by_slot( db_block_slots, slot_of_requested_blocks, expected_block_slots): chain = SignedBeaconBlockFactory.create_branch_by_slots(db_block_slots) # Mock up block database mock_slot_to_block_db = {block.slot: block for block in chain} class Chain: def get_canonical_block_by_slot(self, slot): if slot in mock_slot_to_block_db: return mock_slot_to_block_db[slot] else: raise BlockNotFound result_blocks = get_blocks_from_canonical_chain_by_slot( chain=Chain(), slot_of_requested_blocks=slot_of_requested_blocks) expected_blocks = [ mock_slot_to_block_db[slot] for slot in expected_block_slots ] assert len(result_blocks) == len(expected_blocks) assert set(result_blocks) == set(expected_blocks)
async def test_request_beacon_blocks_by_range_invalid_request(monkeypatch): async with ConnectionPairFactory() as (alice, bob): head_slot = 1 request_head_block_root = b"\x56" * 32 head_block = SignedBeaconBlockFactory(message__slot=head_slot) # TEST: Can not request blocks with `start_slot` greater than head block slot start_slot = 2 def get_block_by_root(root): return head_block monkeypatch.setattr(bob.chain, "get_block_by_root", get_block_by_root) count = 1 step = 1 with pytest.raises(RequestFailure): await alice.request_beacon_blocks_by_range( peer_id=bob.peer_id, head_block_root=request_head_block_root, start_slot=start_slot, count=count, step=step, ) # TEST: Can not request fork chain blocks with `start_slot` # lower than peer's latest finalized slot start_slot = head_slot state_machine = bob.chain.get_state_machine() old_state = bob.chain.get_head_state() new_checkpoint = old_state.finalized_checkpoint.set( "epoch", old_state.finalized_checkpoint.epoch + 1 ) def get_canonical_block_by_slot(slot): raise BlockNotFound monkeypatch.setattr( bob.chain, "get_canonical_block_by_slot", get_canonical_block_by_slot ) def get_state_machine(at_slot=None): class MockStateMachine: state = old_state.set("finalized_checkpoint", new_checkpoint) config = state_machine.config return MockStateMachine() def get_head_state(): return old_state.set("finalized_checkpoint", new_checkpoint) monkeypatch.setattr(bob.chain, "get_state_machine", get_state_machine) monkeypatch.setattr(bob.chain, "get_head_state", get_head_state) with pytest.raises(RequestFailure): await alice.request_beacon_blocks_by_range( peer_id=bob.peer_id, head_block_root=request_head_block_root, start_slot=start_slot, count=count, step=step, )
async def test_bcc_receive_server_handle_orphan_block_loop( receive_server_with_mock_process_orphan_blocks_period, monkeypatch ): receive_server = receive_server_with_mock_process_orphan_blocks_period # block dependency graph # block 1 -- block 2 -- block 3 -- block 4 -- block 5 # | \ # | block 3' # block 3'' # # block 5, 3' and 3'' are orphan blocks # # First iteration will request block 4 and block 2 and import block 2, block 3' and block 3'', # second iteration will request block 3 and import block 3, block 4 and block 5. blocks = SignedBeaconBlockFactory.create_branch(5) fork_blocks = ( blocks[2].transform(("message", "state_root"), b"\x01" * 32), blocks[2].transform(("message", "state_root"), b"\x12" * 32), ) mock_peer_1_db = {block.message.hash_tree_root: block for block in blocks[3:]} mock_peer_2_db = {block.message.hash_tree_root: block for block in blocks[:3]} receive_server.chain.import_block(blocks[0]) peer1 = PeerFactory(node=receive_server.p2p_node) peer2 = PeerFactory(node=receive_server.p2p_node) peer_1_called_event = asyncio.Event() peer_2_called_event = asyncio.Event() async def request_beacon_blocks_by_root(peer_id, block_roots): requested_blocks = [] db = {} if peer_id == peer1._id: db = mock_peer_1_db peer_1_called_event.set() elif peer_id == peer2._id: db = mock_peer_2_db peer_2_called_event.set() for block_root in block_roots: if block_root in db: requested_blocks.append(db[block_root]) return requested_blocks with monkeypatch.context() as m: for orphan_block in (blocks[4],) + fork_blocks: receive_server.orphan_block_pool.add(orphan_block) await wait_until_true( lambda: len(receive_server.orphan_block_pool) != 0, timeout=4 ) for peer in (peer1, peer2): receive_server.p2p_node.handshaked_peers.add(peer) m.setattr( receive_server.p2p_node, "request_beacon_blocks_by_root", request_beacon_blocks_by_root, ) # Wait for receive server to process the orphan blocks await wait_until_true( lambda: len(receive_server.orphan_block_pool) == 0, timeout=4 ) # Check that both peers were requested for blocks assert peer_1_called_event.is_set() assert peer_2_called_event.is_set() # Check that all blocks are processed and no more orphan blocks for block in blocks + fork_blocks: assert receive_server._is_block_root_in_db(block.message.hash_tree_root)