async def test_persisting_and_looking_up(): wit_db = AsyncWitnessDB(AtomicDB()) hash1 = Hash32Factory() with pytest.raises(WitnessHashesUnavailable): await wit_db.coro_get_witness_hashes(hash1) hash1_witnesses = tuple(Hash32Factory.create_batch(5)) await wit_db.coro_persist_witness_hashes(hash1, hash1_witnesses) assert await wit_db.coro_get_witness_hashes(hash1) == hash1_witnesses
async def test_witness_eviction_on_repeat_blocks(): """ After witnesses are persisted twice for the same block, make sure that eviction does not cause any failures. """ wit_db = AsyncWitnessDB(MemoryDB()) hash_ = Hash32Factory() await wit_db.coro_persist_witness_hashes(hash_, Hash32Factory.create_batch(2)) await wit_db.coro_persist_witness_hashes(hash_, Hash32Factory.create_batch(2)) for _ in range(wit_db._max_witness_history): await wit_db.coro_persist_witness_hashes(Hash32Factory(), Hash32Factory.create_batch(2))
async def test_witness_union(): wit_db = AsyncWitnessDB(MemoryDB()) hash1 = Hash32Factory() hash1_witnesses_unique1 = set(Hash32Factory.create_batch(3)) hash1_witnesses_unique2 = set(Hash32Factory.create_batch(3)) hash1_witnesses_both = set(Hash32Factory.create_batch(2)) hash1_witnesses1 = tuple(hash1_witnesses_unique1 | hash1_witnesses_both) hash1_witnesses2 = tuple(hash1_witnesses_unique2 | hash1_witnesses_both) await wit_db.coro_persist_witness_hashes(hash1, hash1_witnesses1) await wit_db.coro_persist_witness_hashes(hash1, hash1_witnesses2) stored_hashes = await wit_db.coro_get_witness_hashes(hash1) expected = hash1_witnesses_unique1 | hash1_witnesses_both | hash1_witnesses_unique2 assert set(stored_hashes) == expected
async def test_witness_history_on_repeat_blocks(): """ Repeated blocks should not consume more slots in the limited history of block witnesses """ wit_db = AsyncWitnessDB(MemoryDB()) hash1 = Hash32Factory() hash1_witnesses = tuple(Hash32Factory.create_batch(5)) await wit_db.coro_persist_witness_hashes(hash1, hash1_witnesses) hash2 = Hash32Factory() await wit_db.coro_persist_witness_hashes(hash2, tuple(Hash32Factory.create_batch(5))) # *almost* push the first witness out of history for _ in range(wit_db._max_witness_history - 2): await wit_db.coro_persist_witness_hashes(Hash32Factory(), Hash32Factory.create_batch(2)) # It should still be there... loaded_hashes = await wit_db.coro_get_witness_hashes(hash1) assert set(loaded_hashes) == set(hash1_witnesses) # Add one more new witness, for an existing block await wit_db.coro_persist_witness_hashes(hash2, Hash32Factory.create_batch(2)) # That new witness should *not* consume a block slot in history, so the first hash's # witness should still be available. loaded_hashes = await wit_db.coro_get_witness_hashes(hash1) assert set(loaded_hashes) == set(hash1_witnesses)
async def test_witness_for_recent_blocks(): wit_db = AsyncWitnessDB(AtomicDB()) hash1 = Hash32Factory() hash1_witnesses = tuple(Hash32Factory.create_batch(5)) await wit_db.coro_persist_witness_hashes(hash1, hash1_witnesses) # *almost* push the first witness out of history for _ in range(wit_db._max_witness_history - 1): await wit_db.coro_persist_witness_hashes(Hash32Factory(), Hash32Factory.create_batch(2)) # It should still be there... assert await wit_db.coro_get_witness_hashes(hash1) == hash1_witnesses # Until one more new witness is added. await wit_db.coro_persist_witness_hashes(Hash32Factory(), Hash32Factory.create_batch(2)) # Now the old witness has been flushed out of the history with pytest.raises(WitnessHashesUnavailable): await wit_db.coro_get_witness_hashes(hash1) assert len(wit_db._get_recent_blocks_with_witnesses()) == wit_db._max_witness_history
async def test_proxy_peer_requests(request, event_bus, other_event_bus, event_loop, chaindb_20, client_and_server): server_event_bus = event_bus client_event_bus = other_event_bus client_peer, server_peer = client_and_server client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer], event_bus=client_event_bus) server_peer_pool = MockPeerPoolWithConnectedPeers( [server_peer], event_bus=server_event_bus) async with contextlib.AsyncExitStack() as stack: await stack.enter_async_context( run_peer_pool_event_server(client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer)) await stack.enter_async_context( run_peer_pool_event_server(server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer)) base_db = chaindb_20.db await stack.enter_async_context( background_asyncio_service( ETHRequestServer( server_event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(base_db), ))) await stack.enter_async_context( background_asyncio_service( WitRequestServer( server_event_bus, TO_NETWORKING_BROADCAST_CONFIG, base_db, ))) client_proxy_peer_pool = ETHProxyPeerPool( client_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(client_proxy_peer_pool)) proxy_peer_pool = ETHProxyPeerPool(server_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(proxy_peer_pool)) proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer( client_peer.session) headers = await proxy_peer.eth_api.get_block_headers(0, 1, 0, False) assert len(headers) == 1 block_header = headers[0] assert block_header.block_number == 0 receipts = await proxy_peer.eth_api.get_receipts(headers) assert len(receipts) == 1 receipt = receipts[0] assert receipt[1][0] == block_header.receipt_root block_bundles = await proxy_peer.eth_api.get_block_bodies(headers) assert len(block_bundles) == 1 first_bundle = block_bundles[0] assert first_bundle[1][0] == block_header.transaction_root node_data = await proxy_peer.eth_api.get_node_data( (block_header.state_root, )) assert node_data[0][0] == block_header.state_root block_hash = block_header.hash node_hashes = tuple(Hash32Factory.create_batch(5)) # Populate the server's witness DB so that it can reply to our request. wit_db = AsyncWitnessDB(base_db) wit_db.persist_witness_hashes(block_hash, node_hashes) response = await proxy_peer.wit_api.get_block_witness_hashes(block_hash ) assert set(response) == set(node_hashes)