예제 #1
0
async def test_fast_syncer(request, event_loop, chaindb_fresh, chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop, ETHPeer, FakeAsyncHeaderDB(chaindb_fresh.db),
        ETHPeer, FakeAsyncHeaderDB(chaindb_20.db))
    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
    client = FastChainSyncer(FrontierTestChain(chaindb_fresh.db),
                             chaindb_fresh, client_peer_pool)
    server = RegularChainSyncer(FrontierTestChain(chaindb_20.db), chaindb_20,
                                MockPeerPoolWithConnectedPeers([server_peer]))
    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(server.cancel())
        # Yield control so that server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    # FastChainSyncer.run() will return as soon as it's caught up with the peer.
    await asyncio.wait_for(client.run(), timeout=2)

    head = chaindb_fresh.get_canonical_head()
    assert head == chaindb_20.get_canonical_head()

    # Now download the state for the chain's head.
    state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db,
                                       head.state_root, client_peer_pool)
    await asyncio.wait_for(state_downloader.run(), timeout=2)

    assert head.state_root in chaindb_fresh.db
예제 #2
0
async def test_regular_syncer(request, event_loop, chaindb_fresh, chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request, event_loop, ETHPeer, FakeAsyncHeaderDB(chaindb_fresh.db),
        ETHPeer, FakeAsyncHeaderDB(chaindb_20.db))
    client = RegularChainSyncer(FrontierTestChain(chaindb_fresh.db),
                                chaindb_fresh,
                                MockPeerPoolWithConnectedPeers([client_peer]))
    server = RegularChainSyncer(FrontierTestChain(chaindb_20.db), chaindb_20,
                                MockPeerPoolWithConnectedPeers([server_peer]))
    asyncio.ensure_future(server.run())

    def finalizer():
        event_loop.run_until_complete(
            asyncio.gather(
                client.cancel(),
                server.cancel(),
                loop=event_loop,
            ))
        # Yield control so that client/server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    asyncio.ensure_future(client.run())

    await wait_for_head(client.db, server.db.get_canonical_head())
    head = client.db.get_canonical_head()
    assert head.state_root in client.db.db
예제 #3
0
async def test_tx_sending(request, event_loop, chain_with_block_validation,
                          tx_validator):
    # This test covers the communication end to end whereas the previous
    # focusses on the rules of the transaction pool on when to send tx to whom
    peer1, peer2 = await get_directly_linked_peers(
        request,
        event_loop,
    )

    peer2_subscriber = SamplePeerSubscriber()
    peer2.add_subscriber(peer2_subscriber)

    pool = TxPool(MockPeerPoolWithConnectedPeers([peer1, peer2]), tx_validator)

    asyncio.ensure_future(pool.run())

    def finalizer():
        event_loop.run_until_complete(pool.cancel())

    request.addfinalizer(finalizer)

    txs = [create_random_tx(chain_with_block_validation)]

    peer1.sub_proto.send_transactions(txs)

    # Ensure that peer2 gets the transactions
    peer, cmd, msg = await asyncio.wait_for(
        peer2_subscriber.msg_queue.get(),
        timeout=0.1,
    )

    assert peer == peer2
    assert isinstance(cmd, Transactions)
    assert msg[0].hash == txs[0].hash
예제 #4
0
async def test_syncer_proposing(request, event_loop):
    # setup a-b topology
    peer_a_b, peer_b_a = await get_directly_linked_sharding_peers(request, event_loop)
    peer_a_b_subscriber = SamplePeerSubscriber()
    peer_a_b.add_subscriber(peer_a_b_subscriber)
    peer_pool_b = MockPeerPoolWithConnectedPeers([peer_b_a])

    # setup shard dbs at b
    shard_db = ShardDB(MemoryDB())
    shard = Shard(shard_db, 0)

    # start shard syncer
    syncer = ShardSyncer(shard, peer_pool_b)
    asyncio.ensure_future(syncer.run())

    def finalizer():
        event_loop.run_until_complete(syncer.cancel())
    request.addfinalizer(finalizer)

    # propose at b and check that it announces its proposal
    await syncer.propose()
    peer, cmd, msg = await asyncio.wait_for(
        peer_a_b_subscriber.msg_queue.get(),
        timeout=1,
    )
    assert peer == peer_a_b
    assert isinstance(cmd, NewCollationHashes)
    assert len(msg["collation_hashes_and_periods"]) == 1
    proposed_hash = msg["collation_hashes_and_periods"][0][0]

    # test that the collation has been added to the shard
    shard.get_collation_by_hash(proposed_hash)
예제 #5
0
async def test_syncer_requests_new_collations(request, event_loop):
    # setup a-b topology
    peer_a_b, peer_b_a = await get_directly_linked_sharding_peers(request, event_loop)
    peer_a_b_subscriber = SamplePeerSubscriber()
    peer_a_b.add_subscriber(peer_a_b_subscriber)
    peer_pool_b = MockPeerPoolWithConnectedPeers([peer_b_a])

    # setup shard dbs at b
    shard_db = ShardDB(MemoryDB())
    shard = Shard(shard_db, 0)

    # start shard syncer
    syncer = ShardSyncer(shard, peer_pool_b)
    asyncio.ensure_future(syncer.run())

    def finalizer():
        event_loop.run_until_complete(syncer.cancel())
    request.addfinalizer(finalizer)

    # notify b about new hashes at a and check that it requests them
    hashes_and_periods = ((b"\xaa" * 32, 0),)
    peer_a_b.sub_proto.send_new_collation_hashes(hashes_and_periods)
    peer, cmd, msg = await asyncio.wait_for(
        peer_a_b_subscriber.msg_queue.get(),
        timeout=1,
    )
    assert peer == peer_a_b
    assert isinstance(cmd, GetCollations)
    assert msg["collation_hashes"] == (hashes_and_periods[0][0],)
예제 #6
0
async def test_skeleton_syncer(request, event_loop, chaindb_fresh,
                               chaindb_1000):
    client_peer, server_peer = await get_directly_linked_peers(
        request,
        event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_1000.db))
    client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
    client = FastChainSyncer(ByzantiumTestChain(chaindb_fresh.db),
                             chaindb_fresh, client_peer_pool)
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer])
    server = RegularChainSyncer(
        ByzantiumTestChain(chaindb_1000.db),
        chaindb_1000,
        server_peer_pool,
    )
    asyncio.ensure_future(server.run())
    server_request_handler = ETHRequestServer(
        FakeAsyncChainDB(chaindb_1000.db), server_peer_pool)
    asyncio.ensure_future(server_request_handler.run())
    client_peer.logger.info("%s is serving 1000 blocks", client_peer)
    server_peer.logger.info("%s is syncing up 1000 blocks", server_peer)

    def finalizer():
        event_loop.run_until_complete(server.cancel())
        # Yield control so that server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    # FastChainSyncer.run() will return as soon as it's caught up with the peer.
    await asyncio.wait_for(client.run(), timeout=20)

    head = chaindb_fresh.get_canonical_head()
    assert head == chaindb_1000.get_canonical_head()

    # Now download the state for the chain's head.
    state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db,
                                       head.state_root, client_peer_pool)
    await asyncio.wait_for(state_downloader.run(), timeout=20)

    assert head.state_root in chaindb_fresh.db
예제 #7
0
async def test_regular_syncer(request, event_loop, chaindb_fresh, chaindb_20):
    client_peer, server_peer = await get_directly_linked_peers(
        request,
        event_loop,
        alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db),
        bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db))
    client = RegularChainSyncer(ByzantiumTestChain(chaindb_fresh.db),
                                chaindb_fresh,
                                MockPeerPoolWithConnectedPeers([client_peer]))
    server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer])
    server = RegularChainSyncer(
        ByzantiumTestChain(chaindb_20.db),
        chaindb_20,
        server_peer_pool,
    )
    asyncio.ensure_future(server.run())
    server_request_handler = ETHRequestServer(FakeAsyncChainDB(chaindb_20.db),
                                              server_peer_pool)
    asyncio.ensure_future(server_request_handler.run())
    server_peer.logger.info("%s is serving 20 blocks", server_peer)
    client_peer.logger.info("%s is syncing up 20", client_peer)

    def finalizer():
        event_loop.run_until_complete(
            asyncio.gather(
                client.cancel(),
                server.cancel(),
                loop=event_loop,
            ))
        # Yield control so that client/server.run() returns, otherwise asyncio will complain.
        event_loop.run_until_complete(asyncio.sleep(0.1))

    request.addfinalizer(finalizer)

    asyncio.ensure_future(client.run())

    await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
    head = chaindb_fresh.get_canonical_head()
    assert head.state_root in chaindb_fresh.db
예제 #8
0
async def test_new_collations_notification(request, event_loop):
    # setup a-b-c topology
    peer_a_b, peer_b_a = await get_directly_linked_sharding_peers(request, event_loop)
    peer_b_c, peer_c_b = await get_directly_linked_sharding_peers(request, event_loop)
    peer_c_b_subscriber = SamplePeerSubscriber()
    peer_c_b.add_subscriber(peer_c_b_subscriber)
    peer_pool_b = MockPeerPoolWithConnectedPeers([peer_b_a, peer_b_c])

    # setup shard dbs at b
    shard_db = ShardDB(MemoryDB())
    shard = Shard(shard_db, 0)

    # start shard syncer
    syncer = ShardSyncer(shard, peer_pool_b)
    asyncio.ensure_future(syncer.run())

    def finalizer():
        event_loop.run_until_complete(syncer.cancel())
    request.addfinalizer(finalizer)

    # send collation from a to b and check that c gets notified
    c1 = next(collations)
    peer_a_b.sub_proto.send_collations(0, [c1])
    peer, cmd, msg = await asyncio.wait_for(
        peer_c_b_subscriber.msg_queue.get(),
        timeout=1,
    )
    assert peer == peer_c_b
    assert isinstance(cmd, NewCollationHashes)
    assert msg["collation_hashes_and_periods"] == ((c1.hash, c1.period),)

    # check that c won't be notified about c1 again
    c2 = next(collations)
    peer_a_b.sub_proto.send_collations(0, [c1, c2])
    peer, cmd, msg = await asyncio.wait_for(
        peer_c_b_subscriber.msg_queue.get(),
        timeout=1,
    )
    assert peer == peer_c_b
    assert isinstance(cmd, NewCollationHashes)
    assert msg["collation_hashes_and_periods"] == ((c2.hash, c2.period),)
예제 #9
0
async def test_peer_pool_iter(request, event_loop):
    peer1, _ = await get_directly_linked_peers(request, event_loop)
    peer2, _ = await get_directly_linked_peers(request, event_loop)
    peer3, _ = await get_directly_linked_peers(request, event_loop)
    pool = MockPeerPoolWithConnectedPeers([peer1, peer2, peer3])
    peers = list([peer async for peer in pool])

    assert len(peers) == 3
    assert peer1 in peers
    assert peer2 in peers
    assert peer3 in peers

    peers = []
    asyncio.ensure_future(
        peer2.disconnect(DisconnectReason.disconnect_requested))
    async for peer in pool:
        peers.append(peer)

    assert len(peers) == 2
    assert peer1 in peers
    assert peer2 not in peers
    assert peer3 in peers
예제 #10
0
async def bootstrap_test_setup(monkeypatch, request, event_loop, chain,
                               tx_validator):
    peer1, peer2 = await get_directly_linked_peers(
        request,
        event_loop,
    )

    # We intercept sub_proto.send_transactions to record detailed information
    # about which peer received what and was invoked how often.
    peer1_txs_recorder = create_tx_recorder(monkeypatch, peer1)
    peer2_txs_recorder = create_tx_recorder(monkeypatch, peer2)

    pool = TxPool(MockPeerPoolWithConnectedPeers([peer1, peer2]), tx_validator)

    asyncio.ensure_future(pool.run())

    def finalizer():
        event_loop.run_until_complete(pool.cancel())

    request.addfinalizer(finalizer)

    return peer1, peer1_txs_recorder, peer2, peer2_txs_recorder, pool
예제 #11
0
async def test_collation_requests(request, event_loop):
    # setup two peers
    sender, receiver = await get_directly_linked_sharding_peers(request, event_loop)
    receiver_peer_pool = MockPeerPoolWithConnectedPeers([receiver])

    # setup shard db for request receiving node
    receiver_db = ShardDB(MemoryDB())
    receiver_shard = Shard(receiver_db, 0)

    # create three collations and add two to the shard of the receiver
    # body is shared to avoid unnecessary chunk root calculation
    body = zpad_right(b"body", COLLATION_SIZE)
    chunk_root = calc_chunk_root(body)
    c1 = Collation(CollationHeader(0, chunk_root, 0, zpad_right(b"proposer1", 20)), body)
    c2 = Collation(CollationHeader(0, chunk_root, 1, zpad_right(b"proposer2", 20)), body)
    c3 = Collation(CollationHeader(0, chunk_root, 2, zpad_right(b"proposer3", 20)), body)
    for collation in [c1, c2]:
        receiver_shard.add_collation(collation)

    # start shard syncer
    receiver_syncer = ShardSyncer(receiver_shard, receiver_peer_pool)
    asyncio.ensure_future(receiver_syncer.run())

    def finalizer():
        event_loop.run_until_complete(receiver_syncer.cancel())
    request.addfinalizer(finalizer)

    cancel_token = CancelToken("test")

    # request single collation
    received_collations = await asyncio.wait_for(
        sender.get_collations([c1.hash], cancel_token),
        timeout=1,
    )
    assert received_collations == set([c1])

    # request multiple collations
    received_collations = await asyncio.wait_for(
        sender.get_collations([c1.hash, c2.hash], cancel_token),
        timeout=1,
    )
    assert received_collations == set([c1, c2])

    # request no collations
    received_collations = await asyncio.wait_for(
        sender.get_collations([], cancel_token),
        timeout=1,
    )
    assert received_collations == set()

    # request unknown collation
    received_collations = await asyncio.wait_for(
        sender.get_collations([c3.hash], cancel_token),
        timeout=1,
    )
    assert received_collations == set()

    # request multiple collations, including unknown one
    received_collations = await asyncio.wait_for(
        sender.get_collations([c1.hash, c2.hash, c3.hash], cancel_token),
        timeout=1,
    )
    assert received_collations == set([c1, c2])