async def test_fast_syncer(request, event_loop, chaindb_fresh, chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db))
    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
    client = FastChainSyncer(FrontierTestChain(chaindb_fresh.db), chaindb_fresh, client_peer_pool)
    server = RegularChainSyncer(
        FrontierTestChain(chaindb_20.db),
        chaindb_20,
        MockPeerPoolWithConnectedPeers([server_peer]))
    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(server.cancel())
        # Yield control so that server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))
    request.addfinalizer(finalizer)

    # FastChainSyncer.run() will return as soon as it's caught up with the peer.
    await asyncio.wait_for(client.run(), timeout=2)

    head = chaindb_fresh.get_canonical_head()
    assert head == chaindb_20.get_canonical_head()

    # Now download the state for the chain's head.
    state_downloader = StateDownloader(
        chaindb_fresh, chaindb_fresh.db, head.state_root, client_peer_pool)
    await asyncio.wait_for(state_downloader.run(), timeout=2)

    assert head.state_root in chaindb_fresh.db
async def test_regular_syncer(request, event_loop, chaindb_fresh, chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db))
    client = RegularChainSyncer(
        FrontierTestChain(chaindb_fresh.db),
        chaindb_fresh,
        MockPeerPoolWithConnectedPeers([client_peer]))
    server = RegularChainSyncer(
        FrontierTestChain(chaindb_20.db),
        chaindb_20,
        MockPeerPoolWithConnectedPeers([server_peer]))
    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(asyncio.gather(
            client.cancel(),
            server.cancel(),
            loop=event_loop,
        ))
        # Yield control so that client/server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))
    request.addfinalizer(finalizer)

    asyncio.ensure_future(client.run())

    await wait_for_head(client.db, server.db.get_canonical_head())
    head = client.db.get_canonical_head()
    assert head.state_root in client.db.db
Beispiel #3
0
async def _test_sync_with_fixed_sync_parameters(
        request,
        event_loop,
        client_db,
        server_db,
        timestamp_to_sync_to,
        sync_stage_id,
        validation_function,
        blocks_to_import=None,
        blocks_to_import_from_rpc=False):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop, alice_db=client_db, bob_db=server_db)

    client_node = FakeMainnetFullNode(
        base_db=client_peer.context.base_db,
        priv_key=client_peer.context.chains[0].private_key,
    )

    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])

    # lets do a fast sync to newest root hash timestamp
    expected_root_hash = server_peer.chain_head_db.get_historical_root_hash(
        timestamp_to_sync_to)
    existing_root_hash = client_peer.chain_head_db.get_historical_root_hash(
        timestamp_to_sync_to)

    client_sync_parameters = SyncParameters(timestamp_to_sync_to,
                                            existing_root_hash,
                                            expected_root_hash, [client_peer],
                                            sync_stage_id)

    client_consensus = MockConsensusService(
        sync_parameters=client_sync_parameters)

    client = RegularChainSyncer(
        context=client_peer.context,
        peer_pool=client_peer_pool,
        consensus=client_consensus,
        node=client_node,
    )

    server_node = FakeMainnetFullNode(
        base_db=server_peer.context.base_db,
        priv_key=server_peer.context.chains[0].private_key,
    )

    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer])

    server_consensus = MockConsensusService(sync_parameters="fully-synced")

    server_context = server_peer.context
    server_context.chain_config.node_type = 4
    server_context.chain_config.network_startup_node = True
    server = RegularChainSyncer(
        context=server_peer.context,
        peer_pool=server_peer_pool,
        consensus=server_consensus,
        node=server_node,
    )

    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(
            asyncio.gather(
                client.cancel(),
                server.cancel(),
                loop=event_loop,
            ))
        # Yield control so that client/server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    asyncio.ensure_future(client.run())

    if blocks_to_import is not None:
        for block in blocks_to_import:
            new_block_queue_item = NewBlockQueueItem(
                block, from_rpc=blocks_to_import_from_rpc)
            client._new_blocks_to_import.put_nowait(new_block_queue_item)

    #await client.run()
    await wait_for_chain_head_hash(client.chain_head_db, expected_root_hash,
                                   timestamp_to_sync_to)

    await asyncio.sleep(0.2)

    validation_function(server_db, client_db)
Beispiel #4
0
async def _test_sync_with_variable_sync_parameters(
        request,
        event_loop,
        client_db,
        server_db,
        validation_function,
        sync_stage_id_override=None,
        waiting_function=None,
        blocks_to_import=None,
        blocks_to_import_from_rpc=False):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop, alice_db=client_db, bob_db=server_db)

    client_node = FakeMainnetFullNode(
        base_db=client_peer.context.base_db,
        priv_key=client_peer.context.chains[0].private_key,
    )

    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])

    expected_root_hash_timestamp = server_peer.chain_head_db.get_historical_root_hashes(
    )[-1]

    client_consensus = MockConsensusService(
        client_peer.chain_head_db,
        client_peer_pool,
        chain_to_sync_to=server_peer.context.chains[0],
        sync_stage_override=sync_stage_id_override)

    client = RegularChainSyncer(
        context=client_peer.context,
        peer_pool=client_peer_pool,
        consensus=client_consensus,
        node=client_node,
    )

    #client.logger = logging.getLogger('dummy')

    server_node = FakeMainnetFullNode(
        base_db=server_peer.context.base_db,
        priv_key=server_peer.context.chains[0].private_key,
    )

    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer])

    server_consensus = MockConsensusService(sync_parameters="fully-synced",
                                            peer_pool=server_peer_pool,
                                            is_server=True)

    server_context = server_peer.context
    server_context.chain_config.node_type = 4
    server_context.chain_config.network_startup_node = True
    server = RegularChainSyncer(
        context=server_peer.context,
        peer_pool=server_peer_pool,
        consensus=server_consensus,
        node=server_node,
    )

    server.logger = logging.getLogger('dummy')

    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(
            asyncio.gather(
                client.cancel(),
                server.cancel(),
                loop=event_loop,
            ))
        # Yield control so that client/server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    asyncio.ensure_future(client.run())

    if blocks_to_import is not None:
        for block in blocks_to_import:
            new_block_queue_item = NewBlockQueueItem(
                block, from_rpc=blocks_to_import_from_rpc)
            client._new_blocks_to_import.put_nowait(new_block_queue_item)

    if waiting_function is None:
        await wait_for_both_nodes_to_be_synced(client.chain_head_db,
                                               server.chain_head_db)
    else:
        await waiting_function(client, server)

    #give the nodes a second to finish. They might still be writing to the database.
    await asyncio.sleep(0.2)

    validation_function(server_db, client_db)