def database_server_ipc_path(): core_db = MemoryDB() core_db[b'key-a'] = b'value-a' chaindb = ChainDB(core_db) # TODO: use a custom chain class only for testing. chaindb.persist_header(ROPSTEN_GENESIS_HEADER) with tempfile.TemporaryDirectory() as temp_dir: chain_config = ChainConfig(network_id=ROPSTEN_NETWORK_ID, data_dir=temp_dir) chaindb_server_process = multiprocessing.Process( target=serve_chaindb, args=(chain_config, core_db), ) chaindb_server_process.start() wait_for_ipc(chain_config.database_ipc_path) try: yield chain_config.database_ipc_path finally: kill_process_gracefully(chaindb_server_process, logging.getLogger())
def _test(): import argparse from evm.p2p import ecies from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from evm.db.backends.level import LevelDB from evm.db.backends.memory import MemoryDB logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-root-hash', type=str, required=True, help='Hex encoded root hash') args = parser.parse_args() chaindb = ChainDB(MemoryDB()) chaindb.persist_header_to_db(ROPSTEN_GENESIS_HEADER) peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id, ecies.generate_privkey()) asyncio.ensure_future(peer_pool.run()) state_db = LevelDB(args.db) root_hash = decode_hex(args.root_hash) downloader = StateDownloader(state_db, root_hash, peer_pool) loop = asyncio.get_event_loop() try: loop.run_until_complete(downloader.run()) except KeyboardInterrupt: pass loop.run_until_complete(downloader.stop()) loop.run_until_complete(peer_pool.stop()) loop.close()
def is_database_initialized(chaindb: ChainDB) -> bool: try: chaindb.get_canonical_head() except CanonicalHeadNotFound: # empty chain database return False else: return True
def chaindb_mainnet_100(): """Return a chaindb with mainnet headers numbered from 0 to 100.""" here = os.path.dirname(__file__) headers_rlp = open(os.path.join(here, 'fixtures', 'sample_1000_headers_rlp'), 'r+b').read() headers = rlp.decode(headers_rlp, sedes=sedes.CountableList(BlockHeader)) chaindb = ChainDB(MemoryDB()) for i in range(0, 101): chaindb.persist_header_to_db(headers[i]) return chaindb
def initialize_database(chain_config: ChainConfig, chaindb: ChainDB) -> None: try: chaindb.get_canonical_head() except CanonicalHeadNotFound: if chain_config.chain_identifier == ROPSTEN: # We're starting with a fresh DB. # TODO: log that we initialized the chain chaindb.persist_header(ROPSTEN_GENESIS_HEADER) else: # TODO: add genesis data to ChainConfig and if it's present, use it # here to initialize the chain. raise NotImplementedError("Not implemented for other chains yet")
def create_block(cls, transaction_packages, prev_hashes, coinbase, parent_header): """ Create a block with transaction witness """ block = cls.generate_block_from_parent_header_and_coinbase( parent_header, coinbase, ) recent_trie_nodes = {} receipts = [] for (transaction, transaction_witness) in transaction_packages: transaction_witness.update(recent_trie_nodes) witness_db = ChainDB(MemoryDB(transaction_witness)) execution_context = ExecutionContext.from_block_header( block.header, prev_hashes) vm_state = cls.get_state_class()( chaindb=witness_db, execution_context=execution_context, state_root=block.header.state_root, receipts=receipts, ) computation, result_block, _ = vm_state.apply_transaction( transaction=transaction, block=block, ) if not computation.is_error: block = result_block receipts = computation.vm_state.receipts recent_trie_nodes.update( computation.vm_state.access_logs.writes) else: pass # Finalize witness_db = ChainDB(MemoryDB(recent_trie_nodes)) execution_context = ExecutionContext.from_block_header( block.header, prev_hashes) vm_state = cls.get_state_class()( chaindb=witness_db, execution_context=execution_context, state_root=block.header.state_root, receipts=receipts, ) block = vm_state.finalize_block(block) return block
def vm(): header = CollationHeader( shard_id=0, expected_period_number=2, period_start_prevhash=decode_hex( "3c4cc7b99c7eb9281e9a8d15cd4b2f98c5df085e929f15388c699b41cdde78d7" ), parent_hash=ZERO_HASH32, transaction_root=EMPTY_SHA3, coinbase=to_canonical_address( "8888f1f195afa192cfee860698584c030f4c9db1"), state_root=EMPTY_SHA3, receipt_root=EMPTY_SHA3, number=10, ) chaindb = ChainDB( get_db_backend(), account_state_class=ShardingAccountStateDB, trie_class=BinaryTrie, ) vm = ShardingVM(header=header, chaindb=chaindb) vm_state = vm.state with vm_state.state_db() as statedb: for address, code in HELPER_CONTRACTS.items(): statedb.set_code(address, code) statedb.set_balance(ACCOUNT_ADDRESS, INITIAL_BALANCE) # Update state_root manually vm.block.header.state_root = vm_state.state_root return vm
def get_server(privkey, address, peer_class): base_db = MemoryDB() headerdb = HeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server = Server( privkey, address.tcp_port, chain, chaindb, headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class, ) return server
def get_server(privkey, address, peer_class): db = MemoryDB() headerdb = HeaderDB(db) chaindb = ChainDB(db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(chaindb) server = Server( privkey, address, chain, chaindb, headerdb, db, network_id=1, min_peers=1, peer_class=peer_class, ) return server
def chaindb(request): if request.param is MainAccountStateDB: trie_class = HexaryTrie else: trie_class = BinaryTrie return ChainDB( get_db_backend(), account_state_class=request.param, trie_class=trie_class, )
def make_trie_root_and_nodes(transactions, trie_class=HexaryTrie): chaindb = ChainDB(MemoryDB()) db = chaindb.db transaction_db = trie_class(db) for index, transaction in enumerate(transactions): index_key = rlp.encode(index, sedes=rlp.sedes.big_endian_int) transaction_db[index_key] = rlp.encode(transaction) return transaction_db.root_hash, transaction_db.db.wrapped_db.kv_store
def _test(): import argparse import signal from evm.p2p import ecies from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from evm.db.backends.level import LevelDB from evm.db.backends.memory import MemoryDB logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s') parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-root-hash', type=str, required=True, help='Hex encoded root hash') args = parser.parse_args() chaindb = ChainDB(MemoryDB()) chaindb.persist_header_to_db(ROPSTEN_GENESIS_HEADER) peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id, ecies.generate_privkey()) asyncio.ensure_future(peer_pool.run()) state_db = LevelDB(args.db) root_hash = decode_hex(args.root_hash) downloader = StateDownloader(state_db, root_hash, peer_pool) loop = asyncio.get_event_loop() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, downloader.cancel_token.trigger) async def run(): # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel # token, at which point it returns and we stop the pool and downloader. await downloader.run() await peer_pool.stop() await downloader.stop() loop.run_until_complete(run()) loop.close()
def setup_tester_chain(): from evm.chains.tester import MainnetTesterChain from evm.db import get_db_backend from evm.db.chain import ChainDB db = ChainDB(get_db_backend()) genesis_params = get_default_genesis_params() account_keys = get_default_account_keys() genesis_state = generate_genesis_state(account_keys) chain = MainnetTesterChain.from_genesis(db, genesis_params, genesis_state) return account_keys, chain
def serve_chaindb(db, ipc_path): chaindb = ChainDB(db) class DBManager(BaseManager): pass DBManager.register('get_db', callable=lambda: db, proxytype=DBProxy) DBManager.register('get_chaindb', callable=lambda: chaindb, proxytype=ChainDBProxy) manager = DBManager(address=ipc_path) server = manager.get_server() server.serve_forever()
def get_server(privkey, address, peer_class): bootstrap_nodes = [] chaindb = ChainDB(MemoryDB()) server = Server( privkey, address, chaindb, bootstrap_nodes, network_id=1, min_peers=1, peer_class=peer_class, ) return server
def database_server_ipc_path(): core_db = MemoryDB() core_db[b'key-a'] = b'value-a' chaindb = ChainDB(core_db) # TODO: use a custom chain class only for testing. chaindb.persist_header_to_db(ROPSTEN_GENESIS_HEADER) with tempfile.TemporaryDirectory() as temp_dir: ipc_path = os.path.join(temp_dir, 'chaindb.ipc') chaindb_server_process = multiprocessing.Process( target=serve_chaindb, args=(core_db, ipc_path), ) chaindb_server_process.start() wait_for_ipc(ipc_path) try: yield ipc_path finally: kill_process_gracefully(chaindb_server_process)
def get_server(privkey, address, bootstrap_nodes=None, peer_class=DumbPeer): if bootstrap_nodes is None: bootstrap_nodes = [] chaindb = ChainDB(MemoryDB()) server = Server( privkey, address, chaindb, bootstrap_nodes, 1, min_peers=1, peer_class=peer_class, ) return server
def serve_chaindb(db: BaseDB, ipc_path: str) -> None: chaindb = ChainDB(db) class DBManager(BaseManager): pass # Typeshed definitions for multiprocessing.managers is incomplete, so ignore them for now: # https://github.com/python/typeshed/blob/85a788dbcaa5e9e9a62e55f15d44530cd28ba830/stdlib/3/multiprocessing/managers.pyi#L3 DBManager.register('get_db', callable=lambda: db, proxytype=DBProxy) # type: ignore DBManager.register('get_chaindb', callable=lambda: chaindb, proxytype=ChainDBProxy) # type: ignore manager = DBManager(address=ipc_path) # type: ignore server = manager.get_server() # type: ignore server.serve_forever() # type: ignore
def new_chain_from_fixture(fixture): db = ChainDB(get_db_backend()) vm_config = chain_vm_configuration(fixture) ChainFromFixture = MainnetChain.configure( 'ChainFromFixture', vm_configuration=vm_config, ) return ChainFromFixture.from_genesis( db, genesis_params=genesis_params_from_fixture(fixture), genesis_state=fixture['pre'], )
def initialize_database(chain_config: ChainConfig, chaindb: ChainDB) -> None: try: chaindb.get_canonical_head() except CanonicalHeadNotFound: if chain_config.network_id == ROPSTEN_NETWORK_ID: # We're starting with a fresh DB. chaindb.persist_header(ROPSTEN_GENESIS_HEADER) elif chain_config.network_id == MAINNET_NETWORK_ID: chaindb.persist_header(MAINNET_GENESIS_HEADER) else: # TODO: add genesis data to ChainConfig and if it's present, use it # here to initialize the chain. raise NotImplementedError( "Only the mainnet and ropsten chains are currently supported")
def chain_without_block_validation(chaindb, funded_address, funded_address_initial_balance): """ Return a Chain object containing just the genesis block. This Chain does not perform any validation when importing new blocks. The Chain's state includes one funded account and a private key for it, which can be found in the funded_address and private_keys variables in the chain itself. """ # Disable block validation so that we don't need to construct finalized blocks. overrides = { 'import_block': import_block_without_validation, 'validate_block': lambda self, block: None, } klass = Chain.configure( name='TestChainWithoutBlockValidation', vm_configuration=((constants.GENESIS_BLOCK_NUMBER, FrontierVM), ), **overrides, ) genesis_params = { 'block_number': constants.GENESIS_BLOCK_NUMBER, 'difficulty': constants.GENESIS_DIFFICULTY, 'gas_limit': constants.GENESIS_GAS_LIMIT, 'parent_hash': constants.GENESIS_PARENT_HASH, 'coinbase': constants.GENESIS_COINBASE, 'nonce': constants.GENESIS_NONCE, 'mix_hash': constants.GENESIS_MIX_HASH, 'extra_data': constants.GENESIS_EXTRA_DATA, 'timestamp': 1501851927, } genesis_state = { funded_address: { 'balance': funded_address_initial_balance, 'nonce': 0, 'code': b'', 'storage': {}, } } chain = klass.from_genesis(ChainDB(get_db_backend()), genesis_params, genesis_state) return chain
def test_state_fixtures(fixture, fixture_vm_class): header = BlockHeader( coinbase=fixture['env']['currentCoinbase'], difficulty=fixture['env']['currentDifficulty'], block_number=fixture['env']['currentNumber'], gas_limit=fixture['env']['currentGasLimit'], timestamp=fixture['env']['currentTimestamp'], parent_hash=fixture['env']['previousHash'], ) chaindb = ChainDB(get_db_backend()) vm = fixture_vm_class(header=header, chaindb=chaindb) vm_state = vm.state with vm_state.mutable_state_db() as state_db: setup_state_db(fixture['pre'], state_db) # Update state_root manually vm.block.header.state_root = vm_state.state_root if 'secretKey' in fixture['transaction']: unsigned_transaction = vm.create_unsigned_transaction( nonce=fixture['transaction']['nonce'], gas_price=fixture['transaction']['gasPrice'], gas=fixture['transaction']['gasLimit'], to=fixture['transaction']['to'], value=fixture['transaction']['value'], data=fixture['transaction']['data'], ) private_key = keys.PrivateKey(fixture['transaction']['secretKey']) transaction = unsigned_transaction.as_signed_transaction( private_key=private_key) elif 'vrs' in fixture['transaction']: v, r, s = ( fixture['transaction']['v'], fixture['transaction']['r'], fixture['transaction']['s'], ) transaction = vm.create_transaction( nonce=fixture['transaction']['nonce'], gas_price=fixture['transaction']['gasPrice'], gas=fixture['transaction']['gasLimit'], to=fixture['transaction']['to'], value=fixture['transaction']['value'], data=fixture['transaction']['data'], v=v, r=r, s=s, ) try: computation, _ = vm.apply_transaction(transaction) except ValidationError as err: transaction_error = err LOGGER.warn("Got transaction error:") LOGGER.warn(traceback.format_exc()) else: transaction_error = False if not transaction_error: log_entries = computation.get_log_entries() actual_logs_hash = hash_log_entries(log_entries) if 'logs' in fixture['post']: expected_logs_hash = fixture['post']['logs'] assert expected_logs_hash == actual_logs_hash elif log_entries: raise AssertionError("Got log {0} entries. hash:{1}".format( len(log_entries), actual_logs_hash, )) if 'out' in fixture: expected_output = fixture['out'] if isinstance(expected_output, int): assert len(computation.output) == expected_output else: assert computation.output == expected_output assert vm.block.header.state_root == fixture['post']['hash']
parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-mainnet', action="store_true") args = parser.parse_args() GENESIS_HEADER = ROPSTEN_GENESIS_HEADER NETWORK_ID = ROPSTEN_NETWORK_ID if args.mainnet: GENESIS_HEADER = MAINNET_GENESIS_HEADER NETWORK_ID = MAINNET_NETWORK_ID DemoLightChain = LightChain.configure( 'RPCDemoLightChain', vm_configuration=MAINNET_VM_CONFIGURATION, network_id=NETWORK_ID, privkey=ecies.generate_privkey(), ) chaindb = ChainDB(LevelDB(args.db)) try: chaindb.get_canonical_head() except CanonicalHeadNotFound: # We're starting with a fresh DB. chain = DemoLightChain.from_genesis_header(chaindb, GENESIS_HEADER) else: # We're reusing an existing db. chain = DemoLightChain(chaindb) app = App(chain) web.run_app(app, port=8080)
def chaindb(): return ChainDB(get_db_backend())
parser.add_argument('-db', type=str, required=True) parser.add_argument('-debug', action='store_true') args = parser.parse_args() print("Logging to", LOGFILE) if args.debug: LOGLEVEL = logging.DEBUG logging.basicConfig(level=LOGLEVEL, filename=LOGFILE) DemoLightChain = LightChain.configure( name='Demo LightChain', vm_configuration=MAINNET_VM_CONFIGURATION, network_id=ROPSTEN_NETWORK_ID, ) chaindb = ChainDB(LevelDB(args.db)) peer_pool = PeerPool(LESPeer, chaindb, ROPSTEN_NETWORK_ID, ecies.generate_privkey()) try: chaindb.get_canonical_head() except CanonicalHeadNotFound: # We're starting with a fresh DB. chain = DemoLightChain.from_genesis_header(chaindb, ROPSTEN_GENESIS_HEADER, peer_pool) else: # We're reusing an existing db. chain = DemoLightChain(chaindb, peer_pool) async def run(): asyncio.ensure_future(peer_pool.run())
def test_vm_fixtures(fixture, vm_class, computation_getter): chaindb = ChainDB(get_db_backend()) header = BlockHeader( coinbase=fixture['env']['currentCoinbase'], difficulty=fixture['env']['currentDifficulty'], block_number=fixture['env']['currentNumber'], gas_limit=fixture['env']['currentGasLimit'], timestamp=fixture['env']['currentTimestamp'], ) vm = vm_class(header=header, chaindb=chaindb) vm_state = vm.state with vm_state.state_db() as state_db: setup_state_db(fixture['pre'], state_db) code = state_db.get_code(fixture['exec']['address']) # Update state_root manually vm.block.header.state_root = vm_state.state_root computation = computation_getter(fixture, code, vm) # Update state_root manually vm.block.header.state_root = computation.vm_state.state_root if 'post' in fixture: # # Success checks # assert not computation.is_error log_entries = computation.get_log_entries() if 'logs' in fixture: actual_logs_hash = hash_log_entries(log_entries) expected_logs_hash = fixture['logs'] assert expected_logs_hash == actual_logs_hash elif log_entries: raise AssertionError("Got log entries: {0}".format(log_entries)) expected_output = fixture['out'] assert computation.output == expected_output gas_meter = computation.gas_meter expected_gas_remaining = fixture['gas'] actual_gas_remaining = gas_meter.gas_remaining gas_delta = actual_gas_remaining - expected_gas_remaining assert gas_delta == 0, "Gas difference: {0}".format(gas_delta) call_creates = fixture.get('callcreates', []) assert len(computation.children) == len(call_creates) call_creates = fixture.get('callcreates', []) for child_computation, created_call in zip(computation.children, call_creates): to_address = created_call['destination'] data = created_call['data'] gas_limit = created_call['gasLimit'] value = created_call['value'] assert child_computation.msg.to == to_address assert data == child_computation.msg.data or child_computation.msg.code assert gas_limit == child_computation.msg.gas assert value == child_computation.msg.value post_state = fixture['post'] else: # # Error checks # assert computation.is_error assert isinstance(computation._error, VMError) post_state = fixture['pre'] with vm.state.state_db(read_only=True) as state_db: verify_state_db(post_state, state_db)
def chaindb(base_db): return ChainDB(base_db)
def test_apply_transaction( # noqa: F811 chain_without_block_validation, funded_address, funded_address_private_key): chain = chain_without_block_validation # noqa: F811 # Don't change these variables vm = chain.get_vm() chaindb = copy.deepcopy(vm.chaindb) block0 = copy.deepcopy(vm.block) prev_block_hash = chain.get_canonical_block_by_number(0).hash initial_state_root = vm.block.header.state_root # (1) Get VM.apply_transaction(transaction) result for assertion # The first transaction chain1 = copy.deepcopy(chain) vm_example = chain1.get_vm() recipient1 = decode_hex('0x1111111111111111111111111111111111111111') amount = 100 from_ = funded_address tx1 = new_transaction( vm_example, from_, recipient1, amount, private_key=funded_address_private_key, ) computation, result_block = vm_example.apply_transaction(tx1) # The second transaction recipient2 = decode_hex('0x2222222222222222222222222222222222222222') tx2 = new_transaction( vm_example, from_, recipient2, amount, private_key=funded_address_private_key, ) computation, result_block = vm_example.apply_transaction(tx2) assert len(result_block.transactions) == 2 # (2) Test VMState.apply_transaction(...) # Use SpuriousDragonVMState to apply transaction chaindb1 = copy.deepcopy(chaindb) block1 = copy.deepcopy(block0) prev_hashes = vm.get_prev_hashes( last_block_hash=prev_block_hash, db=vm.chaindb, ) execution_context = block1.header.create_execution_context(prev_hashes) vm_state1 = SpuriousDragonVMState( chaindb=chaindb1, execution_context=execution_context, state_root=block1.header.state_root, receipts=[], ) parent_hash = copy.deepcopy(prev_hashes[0]) computation, block, _ = vm_state1.apply_transaction( tx1, block1, ) access_logs1 = computation.vm_state.access_logs # Check if prev_hashes hasn't been changed assert parent_hash == prev_hashes[0] # Make sure that block1 hasn't been changed assert block1.header.state_root == initial_state_root execution_context = block.header.create_execution_context(prev_hashes) vm_state1 = SpuriousDragonVMState( chaindb=chaindb1, execution_context=execution_context, state_root=block.header.state_root, receipts=computation.vm_state.receipts, ) computation, block, _ = vm_state1.apply_transaction( tx2, block, ) access_logs2 = computation.vm_state.access_logs post_vm_state = computation.vm_state # Check AccessLogs witness_db = ChainDB(MemoryDB(access_logs2.writes)) state_db = witness_db.get_state_db(block.header.state_root, read_only=True) assert state_db.get_balance(recipient2) == amount with pytest.raises(KeyError): _ = state_db.get_balance(recipient1) # Check block data are correct assert block.header.state_root == result_block.header.state_root assert block.header.gas_limit == result_block.header.gas_limit assert block.header.gas_used == result_block.header.gas_used assert block.header.transaction_root == result_block.header.transaction_root assert block.header.receipt_root == result_block.header.receipt_root # Make sure that vm_state1 hasn't been changed assert post_vm_state.state_root == result_block.header.state_root # (3) Testing using witness as db data # Witness_db block2 = copy.deepcopy(block0) witness_db = ChainDB(MemoryDB(access_logs1.reads)) prev_hashes = vm.get_prev_hashes( last_block_hash=prev_block_hash, db=vm.chaindb, ) execution_context = block2.header.create_execution_context(prev_hashes) # Apply the first transaction vm_state2 = SpuriousDragonVMState( chaindb=witness_db, execution_context=execution_context, state_root=block2.header.state_root, receipts=[], ) computation, block, _ = vm_state2.apply_transaction( tx1, block2, ) # Update witness_db recent_trie_nodes = merge(access_logs2.reads, access_logs1.writes) witness_db = ChainDB(MemoryDB(recent_trie_nodes)) execution_context = block.header.create_execution_context(prev_hashes) # Apply the second transaction vm_state2 = SpuriousDragonVMState( chaindb=witness_db, execution_context=execution_context, state_root=block.header.state_root, receipts=computation.vm_state.receipts, ) computation, block, _ = vm_state2.apply_transaction( tx2, block, ) # After applying assert block.header.state_root == computation.vm_state.state_root assert block.header.transaction_root == result_block.header.transaction_root assert block.header.receipt_root == result_block.header.receipt_root assert block.hash == result_block.hash # (3) Testing using witness_db and block_header to reconstruct vm_state prev_hashes = vm.get_prev_hashes( last_block_hash=prev_block_hash, db=vm.chaindb, ) execution_context = block.header.create_execution_context(prev_hashes) vm_state3 = SpuriousDragonVMState( chaindb=witness_db, execution_context=execution_context, state_root=block.header.state_root, ) assert vm_state3.state_root == post_vm_state.state_root assert vm_state3.state_root == result_block.header.state_root
def chain(): """ Return a Chain object containing just the genesis block. The Chain's state includes one funded account, which can be found in the funded_address in the chain itself. This Chain will perform all validations when importing new blocks, so only valid and finalized blocks can be used with it. If you want to test importing arbitrarily constructe, not finalized blocks, use the chain_without_block_validation fixture instead. """ genesis_params = { "bloom": 0, "coinbase": to_canonical_address("8888f1f195afa192cfee860698584c030f4c9db1"), "difficulty": 131072, "extra_data": b"B", "gas_limit": 3141592, "gas_used": 0, "mix_hash": decode_hex( "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" ), "nonce": decode_hex("0102030405060708"), "block_number": 0, "parent_hash": decode_hex( "0000000000000000000000000000000000000000000000000000000000000000" ), "receipt_root": decode_hex( "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" ), "state_root": decode_hex( "cafd881ab193703b83816c49ff6c2bf6ba6f464a1be560c42106128c8dbc35e7" ), "timestamp": 1422494849, "transaction_root": decode_hex( "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" ), "uncles_hash": decode_hex( "1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") } funded_addr = to_canonical_address( "a94f5374fce5edbc8e2a8697c15331677e6ebf0b") initial_balance = 10000000000 genesis_state = { funded_addr: { "balance": initial_balance, "nonce": 0, "code": b"", "storage": {} } } klass = Chain.configure(name='TestChain', vm_configuration=((constants.GENESIS_BLOCK_NUMBER, FrontierVM), )) chain = klass.from_genesis(ChainDB(get_db_backend()), genesis_params, genesis_state) chain.funded_address = funded_addr chain.funded_address_initial_balance = initial_balance return chain
def chaindb(chain_config): return ChainDB(LevelDB(db_path=chain_config.database_dir))
def chain_without_block_validation(): """ Return a Chain object containing just the genesis block. This Chain does not perform any validation when importing new blocks. The Chain's state includes one funded account and a private key for it, which can be found in the funded_address and private_keys variables in the chain itself. """ # Disable block validation so that we don't need to construct finalized blocks. overrides = { 'import_block': import_block_without_validation, 'validate_block': lambda self, block: None, } klass = Chain.configure( name='TestChainWithoutBlockValidation', vm_configuration=((constants.GENESIS_BLOCK_NUMBER, FrontierVM), ), **overrides, ) private_key = KeyAPI().PrivateKey( decode_hex( '0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8' )) funded_addr = private_key.public_key.to_canonical_address() initial_balance = 100000000 genesis_params = { 'block_number': constants.GENESIS_BLOCK_NUMBER, 'difficulty': constants.GENESIS_DIFFICULTY, 'gas_limit': constants.GENESIS_GAS_LIMIT, 'parent_hash': constants.GENESIS_PARENT_HASH, 'coinbase': constants.GENESIS_COINBASE, 'nonce': constants.GENESIS_NONCE, 'mix_hash': constants.GENESIS_MIX_HASH, 'extra_data': constants.GENESIS_EXTRA_DATA, 'timestamp': 1501851927, 'state_root': decode_hex( '0x9d354f9b5ba851a35eced279ef377111387197581429cfcc7f744ef89a30b5d4' ) } genesis_state = { funded_addr: { 'balance': initial_balance, 'nonce': 0, 'code': b'', 'storage': {}, } } chain = klass.from_genesis(ChainDB(get_db_backend()), genesis_params, genesis_state) chain.funded_address = funded_addr chain.funded_address_initial_balance = initial_balance chain.funded_address_private_key = private_key return chain
def test_apply_transaction( # noqa: F811 chain_without_block_validation, funded_address, funded_address_private_key): chain = chain_without_block_validation # noqa: F811 # Don't change these variables vm = chain.get_vm() chaindb = copy.deepcopy(vm.chaindb) block0 = copy.deepcopy(vm.block) prev_block_hash = chain.get_canonical_block_by_number(0).hash initial_state_root = vm.block.header.state_root # (1) Get VM.apply_transaction(transaction) result for assertion # The first transaction chain1 = copy.deepcopy(chain) vm_example = chain1.get_vm() recipient1 = decode_hex('0x1111111111111111111111111111111111111111') amount = 100 from_ = funded_address tx1 = new_transaction( vm_example, from_, recipient1, amount, private_key=funded_address_private_key, ) computation, result_block = vm_example.apply_transaction(tx1) # The second transaction recipient2 = decode_hex('0x2222222222222222222222222222222222222222') tx2 = new_transaction( vm_example, from_, recipient2, amount, private_key=funded_address_private_key, ) computation, result_block = vm_example.apply_transaction(tx2) assert len(result_block.transactions) == 2 # (2) Test VMState.apply_transaction(...) # Use FrontierVMState to apply transaction chaindb1 = copy.deepcopy(chaindb) block1 = copy.deepcopy(block0) prev_hashes = vm.get_prev_hashes( last_block_hash=prev_block_hash, db=vm.chaindb, ) execution_context = ExecutionContext.from_block_header(block1.header, prev_hashes) vm_state1 = FrontierVMState( chaindb=chaindb1, execution_context=execution_context, state_root=block1.header.state_root, receipts=[], ) parent_hash = copy.deepcopy(prev_hashes[0]) computation, block, _ = vm_state1.apply_transaction( tx1, block1, ) access_logs1 = computation.vm_state.access_logs # Check if prev_hashes hasn't been changed assert parent_hash == prev_hashes[0] # Make sure that block1 hasn't been changed assert block1.header.state_root == initial_state_root execution_context = ExecutionContext.from_block_header(block.header, prev_hashes) vm_state1 = FrontierVMState( chaindb=chaindb1, execution_context=execution_context, state_root=block.header.state_root, receipts=computation.vm_state.receipts, ) computation, block, _ = vm_state1.apply_transaction( tx2, block, ) access_logs2 = computation.vm_state.access_logs post_vm_state = computation.vm_state # Check AccessLogs witness_db = ChainDB(MemoryDB(access_logs2.writes)) state_db = witness_db.get_state_db(block.header.state_root, read_only=True) assert state_db.get_balance(recipient2) == amount with pytest.raises(KeyError): _ = state_db.get_balance(recipient1) # Check block data are correct assert block.header.state_root == result_block.header.state_root assert block.header.gas_limit == result_block.header.gas_limit assert block.header.gas_used == result_block.header.gas_used assert block.header.transaction_root == result_block.header.transaction_root assert block.header.receipt_root == result_block.header.receipt_root # Make sure that vm_state1 hasn't been changed assert post_vm_state.state_root == result_block.header.state_root # (3) Testing using witness as db data # Witness_db block2 = copy.deepcopy(block0) witness_db = ChainDB(MemoryDB(access_logs1.reads)) prev_hashes = vm.get_prev_hashes( last_block_hash=prev_block_hash, db=vm.chaindb, ) execution_context = ExecutionContext.from_block_header(block2.header, prev_hashes) # Apply the first transaction vm_state2 = FrontierVMState( chaindb=witness_db, execution_context=execution_context, state_root=block2.header.state_root, receipts=[], ) computation, block, _ = vm_state2.apply_transaction( tx1, block2, ) # Update witness_db recent_trie_nodes = merge(access_logs2.reads, access_logs1.writes) witness_db = ChainDB(MemoryDB(recent_trie_nodes)) execution_context = ExecutionContext.from_block_header(block.header, prev_hashes) # Apply the second transaction vm_state2 = FrontierVMState( chaindb=witness_db, execution_context=execution_context, state_root=block.header.state_root, receipts=computation.vm_state.receipts, ) computation, block, _ = vm_state2.apply_transaction( tx2, block, ) # After applying assert block.header.state_root == computation.vm_state.state_root assert block.header.transaction_root == result_block.header.transaction_root assert block.header.receipt_root == result_block.header.receipt_root assert block.hash == result_block.hash # (3) Testing using witness_db and block_header to reconstruct vm_state prev_hashes = vm.get_prev_hashes( last_block_hash=prev_block_hash, db=vm.chaindb, ) execution_context = ExecutionContext.from_block_header(block.header, prev_hashes) vm_state3 = FrontierVMState( chaindb=witness_db, execution_context=execution_context, state_root=block.header.state_root, ) assert vm_state3.state_root == post_vm_state.state_root assert vm_state3.state_root == result_block.header.state_root