Пример #1
0
async def test_fast_syncer(request, event_loop, chaindb_fresh, chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db))
    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
    client = FastChainSyncer(FrontierTestChain(chaindb_fresh.db), chaindb_fresh, client_peer_pool)
    server = RegularChainSyncer(
        FrontierTestChain(chaindb_20.db),
        chaindb_20,
        MockPeerPoolWithConnectedPeers([server_peer]))
    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(server.cancel())
        # Yield control so that server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))
    request.addfinalizer(finalizer)

    # FastChainSyncer.run() will return as soon as it's caught up with the peer.
    await asyncio.wait_for(client.run(), timeout=2)

    head = chaindb_fresh.get_canonical_head()
    assert head == chaindb_20.get_canonical_head()

    # Now download the state for the chain's head.
    state_downloader = StateDownloader(
        chaindb_fresh, chaindb_fresh.db, head.state_root, client_peer_pool)
    await asyncio.wait_for(state_downloader.run(), timeout=2)

    assert head.state_root in chaindb_fresh.db
Пример #2
0
async def test_regular_syncer(request, event_loop, chaindb_fresh, chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db))
    client = RegularChainSyncer(
        FrontierTestChain(chaindb_fresh.db),
        chaindb_fresh,
        MockPeerPoolWithConnectedPeers([client_peer]))
    server = RegularChainSyncer(
        FrontierTestChain(chaindb_20.db),
        chaindb_20,
        MockPeerPoolWithConnectedPeers([server_peer]))
    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(asyncio.gather(
            client.cancel(),
            server.cancel(),
            loop=event_loop,
        ))
        # Yield control so that client/server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))
    request.addfinalizer(finalizer)

    asyncio.ensure_future(client.run())

    await wait_for_head(client.db, server.db.get_canonical_head())
    head = client.db.get_canonical_head()
    assert head.state_root in client.db.db
Пример #3
0
async def test_tx_sending(request, event_loop, chain_with_block_validation, tx_validator):
    # This test covers the communication end to end whereas the previous
    # focusses on the rules of the transaction pool on when to send tx to whom
    peer1, peer2 = await get_directly_linked_peers(
        request,
        event_loop,
    )

    peer2_subscriber = SamplePeerSubscriber()
    peer2.add_subscriber(peer2_subscriber)

    pool = TxPool(MockPeerPoolWithConnectedPeers([peer1, peer2]), tx_validator)

    asyncio.ensure_future(pool.run())

    def finalizer():
        event_loop.run_until_complete(pool.cancel())
    request.addfinalizer(finalizer)

    txs = [create_random_tx(chain_with_block_validation)]

    peer1.sub_proto.send_transactions(txs)

    # Ensure that peer2 gets the transactions
    peer, cmd, msg = await asyncio.wait_for(
        peer2_subscriber.msg_queue.get(),
        timeout=0.1,
    )

    assert peer == peer2
    assert isinstance(cmd, Transactions)
    assert msg[0].hash == txs[0].hash
Пример #4
0
async def bootstrap_test_setup(monkeypatch, request, event_loop, chain, tx_validator):
    peer1, peer2 = await get_directly_linked_peers(
        request,
        event_loop,
    )

    # We intercept sub_proto.send_transactions to record detailed information
    # about which peer received what and was invoked how often.
    peer1_txs_recorder = create_tx_recorder(monkeypatch, peer1)
    peer2_txs_recorder = create_tx_recorder(monkeypatch, peer2)

    pool = TxPool(
        MockPeerPoolWithConnectedPeers([peer1, peer2]),
        tx_validator
    )

    asyncio.ensure_future(pool.run())

    def finalizer():
        event_loop.run_until_complete(pool.cancel())
    request.addfinalizer(finalizer)

    return peer1, peer1_txs_recorder, peer2, peer2_txs_recorder, pool
Пример #5
0
async def _test_sync_with_fixed_sync_parameters(
        request,
        event_loop,
        client_db,
        server_db,
        timestamp_to_sync_to,
        sync_stage_id,
        validation_function,
        blocks_to_import=None,
        blocks_to_import_from_rpc=False):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop, alice_db=client_db, bob_db=server_db)

    client_node = FakeMainnetFullNode(
        base_db=client_peer.context.base_db,
        priv_key=client_peer.context.chains[0].private_key,
    )

    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])

    # lets do a fast sync to newest root hash timestamp
    expected_root_hash = server_peer.chain_head_db.get_historical_root_hash(
        timestamp_to_sync_to)
    existing_root_hash = client_peer.chain_head_db.get_historical_root_hash(
        timestamp_to_sync_to)

    client_sync_parameters = SyncParameters(timestamp_to_sync_to,
                                            existing_root_hash,
                                            expected_root_hash, [client_peer],
                                            sync_stage_id)

    client_consensus = MockConsensusService(
        sync_parameters=client_sync_parameters)

    client = RegularChainSyncer(
        context=client_peer.context,
        peer_pool=client_peer_pool,
        consensus=client_consensus,
        node=client_node,
    )

    server_node = FakeMainnetFullNode(
        base_db=server_peer.context.base_db,
        priv_key=server_peer.context.chains[0].private_key,
    )

    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer])

    server_consensus = MockConsensusService(sync_parameters="fully-synced")

    server_context = server_peer.context
    server_context.chain_config.node_type = 4
    server_context.chain_config.network_startup_node = True
    server = RegularChainSyncer(
        context=server_peer.context,
        peer_pool=server_peer_pool,
        consensus=server_consensus,
        node=server_node,
    )

    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(
            asyncio.gather(
                client.cancel(),
                server.cancel(),
                loop=event_loop,
            ))
        # Yield control so that client/server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    asyncio.ensure_future(client.run())

    if blocks_to_import is not None:
        for block in blocks_to_import:
            new_block_queue_item = NewBlockQueueItem(
                block, from_rpc=blocks_to_import_from_rpc)
            client._new_blocks_to_import.put_nowait(new_block_queue_item)

    #await client.run()
    await wait_for_chain_head_hash(client.chain_head_db, expected_root_hash,
                                   timestamp_to_sync_to)

    await asyncio.sleep(0.2)

    validation_function(server_db, client_db)
Пример #6
0
async def _test_sync_with_variable_sync_parameters(
        request,
        event_loop,
        client_db,
        server_db,
        validation_function,
        sync_stage_id_override=None,
        waiting_function=None,
        blocks_to_import=None,
        blocks_to_import_from_rpc=False):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop, alice_db=client_db, bob_db=server_db)

    client_node = FakeMainnetFullNode(
        base_db=client_peer.context.base_db,
        priv_key=client_peer.context.chains[0].private_key,
    )

    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])

    expected_root_hash_timestamp = server_peer.chain_head_db.get_historical_root_hashes(
    )[-1]

    client_consensus = MockConsensusService(
        client_peer.chain_head_db,
        client_peer_pool,
        chain_to_sync_to=server_peer.context.chains[0],
        sync_stage_override=sync_stage_id_override)

    client = RegularChainSyncer(
        context=client_peer.context,
        peer_pool=client_peer_pool,
        consensus=client_consensus,
        node=client_node,
    )

    #client.logger = logging.getLogger('dummy')

    server_node = FakeMainnetFullNode(
        base_db=server_peer.context.base_db,
        priv_key=server_peer.context.chains[0].private_key,
    )

    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer])

    server_consensus = MockConsensusService(sync_parameters="fully-synced",
                                            peer_pool=server_peer_pool,
                                            is_server=True)

    server_context = server_peer.context
    server_context.chain_config.node_type = 4
    server_context.chain_config.network_startup_node = True
    server = RegularChainSyncer(
        context=server_peer.context,
        peer_pool=server_peer_pool,
        consensus=server_consensus,
        node=server_node,
    )

    server.logger = logging.getLogger('dummy')

    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(
            asyncio.gather(
                client.cancel(),
                server.cancel(),
                loop=event_loop,
            ))
        # Yield control so that client/server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    asyncio.ensure_future(client.run())

    if blocks_to_import is not None:
        for block in blocks_to_import:
            new_block_queue_item = NewBlockQueueItem(
                block, from_rpc=blocks_to_import_from_rpc)
            client._new_blocks_to_import.put_nowait(new_block_queue_item)

    if waiting_function is None:
        await wait_for_both_nodes_to_be_synced(client.chain_head_db,
                                               server.chain_head_db)
    else:
        await waiting_function(client, server)

    #give the nodes a second to finish. They might still be writing to the database.
    await asyncio.sleep(0.2)

    validation_function(server_db, client_db)
Пример #7
0
async def _test_consensus_swarm(request, event_loop, bootnode_db, client_db,
                                peer_swarm, validation_function):

    # 0 = bootnode, 1 = client, 2 .... n = peers in swarm
    dbs_for_linking = [bootnode_db, client_db, *peer_swarm]

    # initialize array
    linked_peer_array = []
    for i in range(len(dbs_for_linking)):
        linked_peer_array.append([None] * (len(dbs_for_linking)))

    private_helios_keys = [
        TESTNET_GENESIS_PRIVATE_KEY,
        keys.PrivateKey(random_private_keys[0]), *[
            keys.PrivateKey(random_private_keys[i + 1])
            for i in range(len(peer_swarm))
        ]
    ]

    # Create all of the linked peers
    for i in range(len(dbs_for_linking)):
        client_db = dbs_for_linking[i]
        client_private_helios_key = private_helios_keys[i]
        for j in range(len(dbs_for_linking)):
            # Don't link it with itself
            if i == j:
                continue

            if linked_peer_array[i][j] is None and linked_peer_array[j][
                    i] is None:
                peer_db = dbs_for_linking[j]
                peer_private_helios_key = private_helios_keys[j]

                client_peer, server_peer = await get_directly_linked_peers(
                    request,
                    event_loop,
                    alice_db=client_db,
                    bob_db=peer_db,
                    alice_private_helios_key=client_private_helios_key,
                    bob_private_helios_key=peer_private_helios_key)

                linked_peer_array[i][j] = client_peer
                linked_peer_array[j][i] = server_peer

    node_index_to_listen_with_logger = 7
    consensus_services = []
    for i in range(len(dbs_for_linking)):
        if i == 0:
            context = linked_peer_array[i][1].context
            context.chain_config.node_type = 4
            context.chain_config.network_startup_node = True
            bootstrap_nodes = []
        else:
            context = linked_peer_array[i][0].context
            bootstrap_nodes = [linked_peer_array[i][0].remote]

        peer_pool = MockPeerPoolWithConnectedPeers(
            [x for x in linked_peer_array[i] if x is not None])

        node = FakeTestnetFullNode(dbs_for_linking[i], private_helios_keys[i])

        consensus = Consensus(context=context,
                              peer_pool=peer_pool,
                              bootstrap_nodes=bootstrap_nodes,
                              node=node)

        if i != node_index_to_listen_with_logger:
            # disable logger by renaming it to one we arent listening to
            consensus.logger = logging.getLogger('dummy')
            pass

        consensus_services.append(consensus)

    asyncio.ensure_future(consensus_services[0].run())

    def finalizer():
        event_loop.run_until_complete(
            asyncio.gather(
                *[x.cancel() for x in consensus_services],
                loop=event_loop,
            ))
        # Yield control so that client/server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    for i in range(2, len(consensus_services)):
        asyncio.ensure_future(consensus_services[i].run())

    asyncio.ensure_future(consensus_services[1].run())

    await wait_for_consensus_all(consensus_services)

    print("WAITING FUNCTION FIRED")

    await asyncio.sleep(1)
    await validation_function(consensus_services)