def atomic_db(request, tmpdir):
    if request.param == 'atomic':
        return AtomicDB()
    elif request.param == 'level':
        return LevelDB(db_path=tmpdir.mkdir("level_db_path"))
    else:
        raise ValueError("Unexpected database type: {}".format(request.param))
Example #2
0
def prepare_computation(vm_class):

    message = Message(
        to=CANONICAL_ADDRESS_A,
        sender=CANONICAL_ADDRESS_B,
        value=100,
        data=b'',
        code=b'',
        gas=800,
    )

    tx_context = vm_class._state_class.transaction_context_class(
        gas_price=1,
        origin=CANONICAL_ADDRESS_B,
    )

    vm = vm_class(GENESIS_HEADER, ChainDB(AtomicDB()))

    computation = vm_class._state_class.computation_class(
        state=vm.state,
        message=message,
        transaction_context=tx_context,
    )

    computation.state.account_db.touch_account(
        decode_hex(EMPTY_ADDRESS_IN_STATE))
    computation.state.account_db.set_code(decode_hex(ADDRESS_WITH_CODE[0]),
                                          ADDRESS_WITH_CODE[1])

    return computation
Example #3
0
def new_chain_from_fixture(fixture, chain_cls=MainnetChain):
    base_db = AtomicDB()

    vm_config = chain_vm_configuration(fixture)

    ChainFromFixture = chain_cls.configure(
        'ChainFromFixture',
        vm_configuration=vm_config,
    )

    return ChainFromFixture.from_genesis(
        base_db,
        genesis_params=genesis_params_from_fixture(fixture),
        genesis_state=fixture['pre'],
    )
Example #4
0
def get_server(privkey, address):
    base_db = AtomicDB()
    headerdb = FakeAsyncHeaderDB(base_db)
    chaindb = ChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    chain = RopstenChain(base_db)
    server = ParagonServer(
        privkey=privkey,
        port=address.tcp_port,
        chain=chain,
        chaindb=chaindb,
        headerdb=headerdb,
        base_db=base_db,
        network_id=NETWORK_ID,
    )
    return server
Example #5
0
def copy(chain):
    """
    Make a copy of the chain at the given state.  Actions performed on the
    resulting chain will not affect the original chain.
    """
    if not isinstance(chain, MiningChain):
        raise ValidationError("`at_block_number` may only be used with 'MiningChain")
    base_db = chain.chaindb.db
    if not isinstance(base_db, AtomicDB):
        raise ValidationError("Unsupported database type: {0}".format(type(base_db)))

    if isinstance(base_db.wrapped_db, MemoryDB):
        db = AtomicDB(MemoryDB(base_db.wrapped_db.kv_store.copy()))
    else:
        raise ValidationError("Unsupported wrapped database: {0}".format(type(base_db.wrapped_db)))

    chain_copy = type(chain)(db, chain.header)
    return chain_copy
Example #6
0
def genesis(chain_class, db=None, params=None, state=None):
    """
    Initialize the given chain class with the given genesis header parameters
    and chain state.
    """
    if state is None:
        genesis_state = {}  # type: Dict[str, Union[int, bytes, Dict[int, int]]]
    else:
        genesis_state = _fill_and_normalize_state(state)

    genesis_params_defaults = _get_default_genesis_params(genesis_state)

    if params is None:
        genesis_params = genesis_params_defaults
    else:
        genesis_params = merge(genesis_params_defaults, params)

    if db is None:
        base_db = AtomicDB()
    else:
        base_db = db

    return chain_class.from_genesis(base_db, genesis_params, genesis_state)
def database_server_ipc_path():
    core_db = AtomicDB()
    core_db[b'key-a'] = b'value-a'

    chaindb = ChainDB(core_db)
    # TODO: use a custom chain class only for testing.
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)

    with tempfile.TemporaryDirectory() as temp_dir:
        chain_config = ChainConfig(network_id=ROPSTEN_NETWORK_ID, max_peers=1, data_dir=temp_dir)

        manager = get_chaindb_manager(chain_config, core_db)
        chaindb_server_process = multiprocessing.Process(
            target=serve_chaindb,
            args=(manager,),
        )
        chaindb_server_process.start()

        wait_for_ipc(chain_config.database_ipc_path)

        try:
            yield chain_config.database_ipc_path
        finally:
            kill_process_gracefully(chaindb_server_process, logging.getLogger())
Example #8
0
def base_db():
    return AtomicDB()
def atomic_db():
    return AtomicDB()
async def test_lightchain_integration(request, event_loop, caplog,
                                      geth_ipc_path, enode, geth_process):
    """Test LightChainSyncer/LightPeerChain against a running geth instance.

    In order to run this manually, you can use `tox -e py36-lightchain_integration` or:

        pytest --integration --capture=no tests/helios/integration/test_lightchain_integration.py

    The fixture for this test was generated with:

        geth --testnet --syncmode full

    It only needs the first 11 blocks for this test to succeed.
    """
    if not pytest.config.getoption("--integration"):
        pytest.skip("Not asked to run integration tests")

    # will almost certainly want verbose logging in a failure
    caplog.set_level(logging.DEBUG)

    # make sure geth has been launched
    wait_for_socket(geth_ipc_path)

    remote = Node.from_uri(enode)
    base_db = AtomicDB()
    chaindb = FakeAsyncChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    headerdb = FakeAsyncHeaderDB(base_db)
    context = ChainContext(
        headerdb=headerdb,
        network_id=ROPSTEN_NETWORK_ID,
        vm_configuration=ROPSTEN_VM_CONFIGURATION,
    )
    peer_pool = LESPeerPool(
        privkey=ecies.generate_privkey(),
        context=context,
    )
    chain = FakeAsyncRopstenChain(base_db)
    syncer = LightChainSyncer(chain, chaindb, peer_pool)
    syncer.min_peers_to_sync = 1
    peer_chain = LightPeerChain(headerdb, peer_pool)

    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, tuple([remote])))
    asyncio.ensure_future(peer_chain.run())
    asyncio.ensure_future(syncer.run())
    await asyncio.sleep(
        0)  # Yield control to give the LightChainSyncer a chance to start

    def finalizer():
        event_loop.run_until_complete(peer_pool.cancel())
        event_loop.run_until_complete(peer_chain.cancel())
        event_loop.run_until_complete(syncer.cancel())

    request.addfinalizer(finalizer)

    n = 11

    # Wait for the chain to sync a few headers.
    async def wait_for_header_sync(block_number):
        while headerdb.get_canonical_head().block_number < block_number:
            await asyncio.sleep(0.1)

    await asyncio.wait_for(wait_for_header_sync(n), 5)

    # https://ropsten.etherscan.io/block/11
    header = headerdb.get_canonical_block_header_by_number(n)
    body = await peer_chain.coro_get_block_body_by_hash(header.hash)
    assert len(body['transactions']) == 15

    receipts = await peer_chain.coro_get_receipts(header.hash)
    assert len(receipts) == 15
    assert encode_hex(keccak(rlp.encode(receipts[0]))) == (
        '0xf709ed2c57efc18a1675e8c740f3294c9e2cb36ba7bb3b89d3ab4c8fef9d8860')

    assert len(peer_pool) == 1
    peer = peer_pool.highest_td_peer
    head = await peer_chain.coro_get_block_header_by_hash(peer.head_hash)

    # In order to answer queries for contract code, geth needs the state trie entry for the block
    # we specify in the query, but because of fast sync we can only assume it has that for recent
    # blocks, so we use the current head to lookup the code for the contract below.
    # https://ropsten.etherscan.io/address/0x95a48dca999c89e4e284930d9b9af973a7481287
    contract_addr = decode_hex('0x8B09D9ac6A4F7778fCb22852e879C7F3B2bEeF81')
    contract_code = await peer_chain.coro_get_contract_code(
        head.hash, contract_addr)
    assert encode_hex(contract_code) == '0x600060006000600060006000356000f1'

    account = await peer_chain.coro_get_account(head.hash, contract_addr)
    assert account.code_hash == keccak(contract_code)
    assert account.balance == 0
def fork_chain(chain):
    # make a duplicate chain with no shared state
    fork_db = AtomicDB(MemoryDB(chain.chaindb.db.wrapped_db.kv_store.copy()))
    fork_chain = type(chain)(fork_db, chain.header)

    return fork_chain
def get_fresh_mainnet_headerdb():
    headerdb = FakeAsyncHeaderDB(AtomicDB())
    headerdb.persist_header(MAINNET_GENESIS_HEADER)
    return headerdb