Beispiel #1
0
async def test_fast_syncer(request, event_loop, chaindb_fresh, chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop, ETHPeer, FakeAsyncHeaderDB(chaindb_fresh.db),
        ETHPeer, FakeAsyncHeaderDB(chaindb_20.db))
    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
    client = FastChainSyncer(FrontierTestChain(chaindb_fresh.db),
                             chaindb_fresh, client_peer_pool)
    server = RegularChainSyncer(FrontierTestChain(chaindb_20.db), chaindb_20,
                                MockPeerPoolWithConnectedPeers([server_peer]))
    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(server.cancel())
        # Yield control so that server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    # FastChainSyncer.run() will return as soon as it's caught up with the peer.
    await asyncio.wait_for(client.run(), timeout=2)

    head = chaindb_fresh.get_canonical_head()
    assert head == chaindb_20.get_canonical_head()

    # Now download the state for the chain's head.
    state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db,
                                       head.state_root, client_peer_pool)
    await asyncio.wait_for(state_downloader.run(), timeout=2)

    assert head.state_root in chaindb_fresh.db
Beispiel #2
0
async def test_regular_syncer(request, event_loop, event_bus, chaindb_fresh,
                              chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request,
        event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db))
    client = RegularChainSyncer(ByzantiumTestChain(chaindb_fresh.db),
                                chaindb_fresh,
                                MockPeerPoolWithConnectedPeers([client_peer]))
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                      event_bus=event_bus)

    async with run_peer_pool_event_server(
            event_bus, server_peer_pool,
            handler_type=ETHPeerPoolEventServer), run_request_server(
                event_bus, FakeAsyncChainDB(chaindb_20.db)):

        server_peer.logger.info("%s is serving 20 blocks", server_peer)
        client_peer.logger.info("%s is syncing up 20", client_peer)

        def finalizer():
            event_loop.run_until_complete(client.cancel())
            # Yield control so that client/server.run() returns, otherwise asyncio will complain.
            event_loop.run_until_complete(asyncio.sleep(0.1))

        request.addfinalizer(finalizer)

        asyncio.ensure_future(client.run())

        await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
        head = chaindb_fresh.get_canonical_head()
        assert head.state_root in chaindb_fresh.db
Beispiel #3
0
async def test_regular_syncer(request, event_loop, event_bus, chaindb_fresh,
                              chaindb_20):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )

    async with peer_pair as (client_peer, server_peer):

        client = RegularChainSyncer(
            ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus))
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(event_bus, TO_NETWORKING_BROADCAST_CONFIG,
                                 AsyncChainDB(chaindb_20.db))):

            server_peer.logger.info("%s is serving 20 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 20", client_peer)

            async with background_asyncio_service(client):
                await wait_for_head(chaindb_fresh,
                                    chaindb_20.get_canonical_head())
                head = chaindb_fresh.get_canonical_head()
                assert head.state_root in chaindb_fresh.db
Beispiel #4
0
async def test_skeleton_syncer(request, event_loop, chaindb_fresh,
                               chaindb_1000):
    client_peer, server_peer = await get_directly_linked_peers(
        request,
        event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_1000.db))
    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
    client = FastChainSyncer(ByzantiumTestChain(chaindb_fresh.db),
                             chaindb_fresh, client_peer_pool)
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer])
    server = RegularChainSyncer(
        ByzantiumTestChain(chaindb_1000.db),
        chaindb_1000,
        server_peer_pool,
    )
    asyncio.ensure_future(server.run())
    server_request_handler = ETHRequestServer(
        FakeAsyncChainDB(chaindb_1000.db), server_peer_pool)
    asyncio.ensure_future(server_request_handler.run())
    client_peer.logger.info("%s is serving 1000 blocks", client_peer)
    server_peer.logger.info("%s is syncing up 1000 blocks", server_peer)

    def finalizer():
        event_loop.run_until_complete(server.cancel())
        # Yield control so that server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    # FastChainSyncer.run() will return as soon as it's caught up with the peer.
    await asyncio.wait_for(client.run(), timeout=20)

    head = chaindb_fresh.get_canonical_head()
    assert head == chaindb_1000.get_canonical_head()

    # Now download the state for the chain's head.
    state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db,
                                       head.state_root, client_peer_pool)
    await asyncio.wait_for(state_downloader.run(), timeout=20)

    assert head.state_root in chaindb_fresh.db
Beispiel #5
0
async def test_regular_syncer(request, event_loop, event_bus, chaindb_fresh,
                              chaindb_20):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )

    async with peer_pair as (client_peer, server_peer):

        client = RegularChainSyncer(
            ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus))
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(event_bus, TO_NETWORKING_BROADCAST_CONFIG,
                                 AsyncChainDB(chaindb_20.db))):

            server_peer.logger.info("%s is serving 20 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 20", client_peer)

            def finalizer():
                event_loop.run_until_complete(client.cancel())
                # Yield control so that client/server.run() returns, otherwise
                # asyncio will complain.
                event_loop.run_until_complete(asyncio.sleep(0.1))

            request.addfinalizer(finalizer)

            asyncio.ensure_future(client.run())

            await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
            head = chaindb_fresh.get_canonical_head()
            assert head.state_root in chaindb_fresh.db
Beispiel #6
0
async def test_regular_syncer(request, event_loop, chaindb_fresh, chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop, ETHPeer, FakeAsyncHeaderDB(chaindb_fresh.db),
        ETHPeer, FakeAsyncHeaderDB(chaindb_20.db))
    client = RegularChainSyncer(FrontierTestChain(chaindb_fresh.db),
                                chaindb_fresh,
                                MockPeerPoolWithConnectedPeers([client_peer]))
    server = RegularChainSyncer(FrontierTestChain(chaindb_20.db), chaindb_20,
                                MockPeerPoolWithConnectedPeers([server_peer]))
    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(
            asyncio.gather(
                client.cancel(),
                server.cancel(),
                loop=event_loop,
            ))
        # Yield control so that client/server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    asyncio.ensure_future(client.run())

    await wait_for_head(client.db, server.db.get_canonical_head())
    head = client.db.get_canonical_head()
    assert head.state_root in client.db.db
Beispiel #7
0
async def _main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-light', action="store_true")
    parser.add_argument('-nodekey', type=str)
    parser.add_argument('-enode',
                        type=str,
                        required=False,
                        help="The enode we should connect to")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    loop = asyncio.get_event_loop()

    base_db = LevelDB(args.db)
    headerdb = AsyncHeaderDB(AtomicDB(base_db))
    chaindb = AsyncChainDB(AtomicDB(base_db))
    try:
        genesis = chaindb.get_canonical_block_header_by_number(BlockNumber(0))
    except HeaderNotFound:
        genesis = ROPSTEN_GENESIS_HEADER
        chaindb.persist_header(genesis)

    peer_pool_class: Type[Union[ETHPeerPool, LESPeerPool]] = ETHPeerPool
    if args.light:
        peer_pool_class = LESPeerPool

    chain_class: Union[Type[AsyncRopstenChain], Type[AsyncMainnetChain]]
    if genesis.hash == ROPSTEN_GENESIS_HEADER.hash:
        chain_id = RopstenChain.chain_id
        vm_config = ROPSTEN_VM_CONFIGURATION
        chain_class = AsyncRopstenChain
    elif genesis.hash == MAINNET_GENESIS_HEADER.hash:
        chain_id = MainnetChain.chain_id
        vm_config = MAINNET_VM_CONFIGURATION  # type: ignore
        chain_class = AsyncMainnetChain
    else:
        raise RuntimeError("Unknown genesis: %s", genesis)

    if args.nodekey:
        privkey = load_nodekey(Path(args.nodekey))
    else:
        privkey = ecies.generate_privkey()

    context = ChainContext(
        headerdb=headerdb,
        network_id=chain_id,
        vm_configuration=vm_config,
        client_version_string=construct_trinity_client_identifier(),
        listen_port=30309,
        p2p_version=DEVP2P_V5,
    )

    peer_pool = peer_pool_class(privkey=privkey, context=context)

    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[chain_id]

    async with background_asyncio_service(peer_pool) as manager:
        manager.run_task(connect_to_peers_loop(peer_pool,
                                               nodes))  # type: ignore
        chain = chain_class(base_db)
        syncer: Service = None
        if args.light:
            syncer = LightChainSyncer(chain, headerdb,
                                      cast(LESPeerPool, peer_pool))
        else:
            syncer = RegularChainSyncer(chain, chaindb,
                                        cast(ETHPeerPool, peer_pool))
        logging.getLogger().setLevel(log_level)

        sigint_received = asyncio.Event()
        for sig in [signal.SIGINT, signal.SIGTERM]:
            loop.add_signal_handler(sig, sigint_received.set)

        async def exit_on_sigint() -> None:
            await sigint_received.wait()
            syncer.get_manager().cancel()

        asyncio.ensure_future(exit_on_sigint())

        async with background_asyncio_service(syncer) as syncer_manager:
            await syncer_manager.wait_finished()
Beispiel #8
0
def _test() -> None:
    import argparse
    from pathlib import Path
    import signal
    from p2p import ecies
    from p2p.kademlia import Node
    from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from eth.chains.mainnet import MainnetChain, MAINNET_GENESIS_HEADER, MAINNET_VM_CONFIGURATION
    from eth.db.backends.level import LevelDB
    from tests.trinity.core.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncMainnetChain, FakeAsyncRopstenChain,
        FakeAsyncHeaderDB, connect_to_peers_loop)
    from trinity.constants import DEFAULT_PREFERRED_NODES
    from trinity.protocol.common.context import ChainContext
    from trinity._utils.chains import load_nodekey

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-fast', action="store_true")
    parser.add_argument('-light', action="store_true")
    parser.add_argument('-nodekey', type=str)
    parser.add_argument('-enode',
                        type=str,
                        required=False,
                        help="The enode we should connect to")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    loop = asyncio.get_event_loop()

    base_db = LevelDB(args.db)
    headerdb = FakeAsyncHeaderDB(base_db)
    chaindb = FakeAsyncChainDB(base_db)
    try:
        genesis = chaindb.get_canonical_block_header_by_number(0)
    except HeaderNotFound:
        genesis = ROPSTEN_GENESIS_HEADER
        chaindb.persist_header(genesis)

    peer_pool_class: Type[Union[ETHPeerPool, LESPeerPool]] = ETHPeerPool
    if args.light:
        peer_pool_class = LESPeerPool

    if genesis.hash == ROPSTEN_GENESIS_HEADER.hash:
        network_id = RopstenChain.network_id
        vm_config = ROPSTEN_VM_CONFIGURATION  # type: ignore
        chain_class = FakeAsyncRopstenChain
    elif genesis.hash == MAINNET_GENESIS_HEADER.hash:
        network_id = MainnetChain.network_id
        vm_config = MAINNET_VM_CONFIGURATION  # type: ignore
        chain_class = FakeAsyncMainnetChain
    else:
        raise RuntimeError("Unknown genesis: %s", genesis)

    if args.nodekey:
        privkey = load_nodekey(Path(args.nodekey))
    else:
        privkey = ecies.generate_privkey()

    context = ChainContext(
        headerdb=headerdb,
        network_id=network_id,
        vm_configuration=vm_config,
    )

    peer_pool = peer_pool_class(privkey=privkey, context=context)

    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]

    asyncio.ensure_future(peer_pool.run())
    peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes))
    chain = chain_class(base_db)
    syncer: BaseHeaderChainSyncer = None
    if args.fast:
        syncer = FastChainSyncer(chain, chaindb, cast(ETHPeerPool, peer_pool))
    elif args.light:
        syncer = LightChainSyncer(chain, headerdb,
                                  cast(LESPeerPool, peer_pool))
    else:
        syncer = RegularChainSyncer(chain, chaindb,
                                    cast(ETHPeerPool, peer_pool))
    syncer.logger.setLevel(log_level)
    syncer.min_peers_to_sync = 1

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await syncer.cancel()
        loop.stop()

    async def run() -> None:
        await syncer.run()
        syncer.logger.info("run() finished, exiting")
        sigint_received.set()

    # loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(run())
    loop.run_forever()
    loop.close()
Beispiel #9
0
async def test_sync_integration(request, caplog, geth_ipc_path, enode,
                                geth_process):
    """Test a regular chain sync against a running geth instance.

    In order to run this manually, you can use `tox -e py37-sync_integration` or:

        pytest --integration --capture=no tests/integration/test_sync.py

    The fixture for this test was generated with:

        geth --ropsten --syncmode full

    It only needs the first 11 blocks for this test to succeed.
    """
    if not request.config.getoption("--integration"):
        pytest.skip("Not asked to run integration tests")

    # will almost certainly want verbose logging in a failure
    caplog.set_level(logging.DEBUG)

    # make sure geth has been launched
    wait_for_socket(geth_ipc_path)

    remote = Node.from_uri(enode)
    base_db = AtomicDB()
    chaindb = AsyncChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    headerdb = AsyncHeaderDB(base_db)
    chain_config = Eth1ChainConfig.from_preconfigured_network(
        ROPSTEN_NETWORK_ID)
    chain = chain_config.initialize_chain(base_db)
    context = ChainContext(
        headerdb=headerdb,
        network_id=ROPSTEN_NETWORK_ID,
        vm_configuration=ROPSTEN_VM_CONFIGURATION,
        client_version_string='trinity-test',
        listen_port=30303,
        p2p_version=DEVP2P_V5,
    )
    peer_pool = ETHPeerPool(privkey=ecies.generate_privkey(), context=context)
    syncer = RegularChainSyncer(chain, chaindb, peer_pool)

    async with background_asyncio_service(peer_pool) as manager:
        await manager.wait_started()
        await peer_pool.connect_to_nodes([remote])
        assert len(peer_pool) == 1

        async with background_asyncio_service(syncer) as syncer_manager:
            await syncer_manager.wait_started()

            n = 11

            manager.logger.info(f"Waiting for the chain to sync {n} blocks")

            async def wait_for_header_sync(block_number):
                while chaindb.get_canonical_head().block_number < block_number:
                    await asyncio.sleep(0.1)

            await asyncio.wait_for(wait_for_header_sync(n), 5)

            # https://ropsten.etherscan.io/block/11
            header = chaindb.get_canonical_block_header_by_number(n)
            transactions = chaindb.get_block_transactions(
                header, BaseTransactionFields)
            assert len(transactions) == 15

            receipts = chaindb.get_receipts(header, Receipt)
            assert len(receipts) == 15
            assert encode_hex(keccak(rlp.encode(receipts[0]))) == (
                '0xf709ed2c57efc18a1675e8c740f3294c9e2cb36ba7bb3b89d3ab4c8fef9d8860'
            )