def db_shell(use_ipython: bool, database_dir: Path, trinity_config: TrinityConfig) -> None: db = LevelDB(database_dir) chaindb = ChainDB(db) head = chaindb.get_canonical_head() chain_config = trinity_config.get_chain_config() chain = chain_config.full_chain_class(db) greeter = f""" Head: #{head.block_number} Hash: {head.hex_hash} State Root: {encode_hex(head.state_root)} Available Context Variables: - `db`: base database object - `chaindb`: `ChainDB` instance - `trinity_config`: `TrinityConfig` instance - `chain_config`: `ChainConfig` instance - `chain`: `Chain` instance """ namespace = { 'db': db, 'chaindb': chaindb, 'trinity_config': trinity_config, 'chain_config': chain_config, 'chain': chain, } shell(use_ipython, namespace, DB_SHELL_BANNER + greeter)
def get_eth1_shell_context(database_dir: Path, trinity_config: TrinityConfig) -> Dict[str, Any]: app_config = trinity_config.get_app_config(Eth1AppConfig) ipc_path = trinity_config.database_ipc_path db: DatabaseAPI trinity_already_running = ipc_path.exists() if trinity_already_running: db = DBClient.connect(ipc_path) else: db = LevelDB(database_dir) chaindb = ChainDB(db) head = chaindb.get_canonical_head() chain_config = app_config.get_chain_config() chain = chain_config.full_chain_class(db) mining_chain_class = MiningChain.configure( __name__=chain_config.full_chain_class.__name__, vm_configuration=chain.vm_configuration, chain_id=chain.chain_id, ) mining_chain = mining_chain_class(db) return { 'db': db, 'chaindb': chaindb, 'trinity_config': trinity_config, 'chain_config': chain_config, 'chain': chain, 'mining_chain': mining_chain, 'block_number': head.block_number, 'hex_hash': head.hex_hash, 'state_root_hex': encode_hex(head.state_root), 'trinity_already_running': trinity_already_running, }
def database_server_ipc_path(): core_db = MemoryDB() core_db[b'key-a'] = b'value-a' chaindb = ChainDB(core_db) # TODO: use a custom chain class only for testing. chaindb.persist_header(ROPSTEN_GENESIS_HEADER) with tempfile.TemporaryDirectory() as temp_dir: chain_config = ChainConfig(network_id=ROPSTEN_NETWORK_ID, data_dir=temp_dir) manager = get_chaindb_manager(chain_config, core_db) chaindb_server_process = multiprocessing.Process( target=serve_chaindb, args=(manager, ), ) chaindb_server_process.start() wait_for_ipc(chain_config.database_ipc_path) try: yield chain_config.database_ipc_path finally: kill_process_gracefully(chaindb_server_process, logging.getLogger())
def database_server_ipc_path(): core_db = AtomicDB() core_db[b'key-a'] = b'value-a' chaindb = ChainDB(core_db) # TODO: use a custom chain class only for testing. chaindb.persist_header(ROPSTEN_GENESIS_HEADER) with tempfile.TemporaryDirectory() as temp_dir: trinity_config = TrinityConfig( network_id=ROPSTEN_NETWORK_ID, trinity_root_dir=temp_dir, ) trinity_config.add_app_config(Eth1AppConfig(trinity_config, None)) initialize_data_dir(trinity_config) manager = create_db_server_manager(trinity_config, core_db) chaindb_server_process = multiprocessing.Process( target=serve_chaindb, args=(manager, ), ) chaindb_server_process.start() wait_for_ipc(trinity_config.database_ipc_path) try: yield trinity_config.database_ipc_path finally: kill_process_gracefully(chaindb_server_process, logging.getLogger())
def get_eth1_shell_context(database_dir: Path, trinity_config: TrinityConfig) -> Dict[str, Any]: app_config = trinity_config.get_app_config(Eth1AppConfig) ipc_path = trinity_config.database_ipc_path trinity_already_running = ipc_path.exists() if trinity_already_running: db_manager = eth1.manager.create_db_consumer_manager( ipc_path) # type: ignore db = db_manager.get_db() else: db = LevelDB(database_dir) chaindb = ChainDB(db) head = chaindb.get_canonical_head() chain_config = app_config.get_chain_config() chain = chain_config.full_chain_class(db) return { 'db': db, 'chaindb': chaindb, 'trinity_config': trinity_config, 'chain_config': chain_config, 'chain': chain, 'block_number': head.block_number, 'hex_hash': head.hex_hash, 'state_root_hex': encode_hex(head.state_root), 'trinity_already_running': trinity_already_running, }
def get_eth1_shell_context( database_dir: Path, trinity_config: TrinityConfig) -> Iterator[Dict[str, Any]]: app_config = trinity_config.get_app_config(Eth1AppConfig) ipc_path = trinity_config.database_ipc_path trinity_already_running = ipc_path.exists() with _get_base_db(database_dir, ipc_path) as db: chaindb = ChainDB(db) head = chaindb.get_canonical_head() chain_config = app_config.get_chain_config() chain = chain_config.full_chain_class(db) mining_chain_class = MiningChain.configure( __name__=chain_config.full_chain_class.__name__, vm_configuration=chain.vm_configuration, consensus_context_class=chain.consensus_context_class, chain_id=chain.chain_id, ) mining_chain = mining_chain_class(db) yield { 'db': db, 'chaindb': chaindb, 'trinity_config': trinity_config, 'chain_config': chain_config, 'chain': chain, 'mining_chain': mining_chain, 'block_number': head.block_number, 'hex_hash': head.hex_hash, 'state_root_hex': encode_hex(head.state_root), 'trinity_already_running': trinity_already_running, }
def db_shell(use_ipython: bool, database_dir: Path) -> None: chaindb = ChainDB(LevelDB(database_dir)) head = chaindb.get_canonical_head() greeter = f""" Head: #{head.block_number} Hash: {head.hex_hash} State Root: {encode_hex(head.state_root)} """ shell(use_ipython, {'chaindb': chaindb}, DB_SHELL_BANNER + greeter)
def get_server(privkey, address): base_db = AtomicDB() headerdb = FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server = ParagonServer( privkey=privkey, port=address.tcp_port, chain=chain, chaindb=chaindb, headerdb=headerdb, base_db=base_db, network_id=NETWORK_ID, ) return server
def prepare_computation(vm_class): message = Message( to=CANONICAL_ADDRESS_A, sender=CANONICAL_ADDRESS_B, value=100, data=b'', code=b'', gas=800, ) tx_context = vm_class._state_class.transaction_context_class( gas_price=1, origin=CANONICAL_ADDRESS_B, ) vm = vm_class(GENESIS_HEADER, ChainDB(MemoryDB())) computation = vm_class._state_class.computation_class( state=vm.state, message=message, transaction_context=tx_context, ) computation.state.account_db.touch_account( decode_hex(EMPTY_ADDRESS_IN_STATE)) computation.state.account_db.set_code(decode_hex(ADDRESS_WITH_CODE[0]), ADDRESS_WITH_CODE[1]) return computation
def setup_computation(vm_class, create_address, code): message = Message( to=CANONICAL_ADDRESS_A, sender=CANONICAL_ADDRESS_B, create_address=create_address, value=0, data=b'', code=code, gas=1000000, ) tx_context = vm_class._state_class.transaction_context_class( gas_price=1, origin=CANONICAL_ADDRESS_B, ) vm = vm_class(GENESIS_HEADER, ChainDB(AtomicDB())) computation = vm_class._state_class.computation_class( state=vm.state, message=message, transaction_context=tx_context, ) return computation
def create_db_server_manager(trinity_config: TrinityConfig, base_db: BaseAtomicDB) -> BaseManager: chain_config = trinity_config.get_chain_config() chaindb = ChainDB(base_db) if not is_database_initialized(chaindb): initialize_database(chain_config, chaindb, base_db) headerdb = HeaderDB(base_db) class DBManager(BaseManager): pass DBManager.register('get_db', callable=lambda: TracebackRecorder(base_db), proxytype=AsyncDBProxy) DBManager.register( 'get_chaindb', callable=lambda: TracebackRecorder(chaindb), proxytype=AsyncChainDBProxy, ) DBManager.register( 'get_headerdb', callable=lambda: TracebackRecorder(headerdb), proxytype=AsyncHeaderDBProxy, ) manager = DBManager(address=str( trinity_config.database_ipc_path)) # type: ignore return manager
def get_base_db(boot_info: BootInfo) -> LevelDB: app_config = boot_info.trinity_config.get_app_config(Eth1AppConfig) base_db = LevelDB(db_path=app_config.database_dir) chaindb = ChainDB(base_db) if not is_database_initialized(chaindb): chain_config = app_config.get_chain_config() initialize_database(chain_config, chaindb, base_db) return base_db
def get_server(privkey, address, peer_class): base_db = MemoryDB() headerdb = FakeAsyncHeaderDB(base_db) chaindb = ChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = RopstenChain(base_db) server = Server( privkey, address.tcp_port, chain, chaindb, headerdb, base_db, network_id=NETWORK_ID, peer_class=peer_class, ) return server
async def test_generate_eth_cap_enr_field(): base_db = AtomicDB() ChainDB(base_db).persist_header(ROPSTEN_GENESIS_HEADER) enr_field = await generate_eth_cap_enr_field(ROPSTEN_VM_CONFIGURATION, AsyncHeaderDB(base_db)) enr = ENRFactory(custom_kv_pairs={enr_field[0]: enr_field[1]}) assert extract_forkid(enr) == ForkID(hash=to_bytes(hexstr='0x30c7ddbc'), next=10)
def setup_vm(vm_class, chain_id=None): db = AtomicDB() chain_context = ChainContext(chain_id) genesis_header = vm_class.create_genesis_header( difficulty=constants.GENESIS_DIFFICULTY, timestamp=0, ) return vm_class(genesis_header, ChainDB(db), chain_context, ConsensusContext(db))
def instantiate_vm(vm_class): GENESIS_HEADER = BlockHeader( difficulty=17179869184, block_number=BlockNumber(0), gas_limit=5000, ) chain_context = ChainContext(None) db = AtomicDB() return vm_class(GENESIS_HEADER, ChainDB(db), chain_context, ConsensusContext(db))
def __init__(self, base_db: AtomicDatabaseAPI, epoch_length: int = EPOCH_LENGTH) -> None: if base_db is None: raise ValueError("Can not instantiate without `base_db`") self._epoch_length = epoch_length self._chain_db = ChainDB(base_db) self._header_cache = HeaderCache(self._chain_db) self._snapshot_manager = SnapshotManager( self._chain_db, self._header_cache, self._epoch_length, )
def _run() -> None: from eth.db.backends.level import LevelDB from eth.db.chain import ChainDB from trinity.cli_parser import parser from trinity.config import Eth1AppConfig, TrinityConfig from trinity.constants import APP_IDENTIFIER_ETH1 from trinity.initialization import ( initialize_data_dir, is_data_dir_initialized, is_database_initialized, initialize_database, ensure_eth1_dirs, ) # Require a root dir to be specified as we don't want to mess with the default one. for action in parser._actions: if action.dest == 'trinity_root_dir': action.required = True break args = parser.parse_args() # FIXME: Figure out a way to avoid having to set this. args.sync_mode = "full" logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S') for name, level in args.log_levels.items(): logging.getLogger(name).setLevel(level) trinity_config = TrinityConfig.from_parser_args(args, APP_IDENTIFIER_ETH1, (Eth1AppConfig, )) trinity_config.trinity_root_dir.mkdir(exist_ok=True) if not is_data_dir_initialized(trinity_config): initialize_data_dir(trinity_config) with trinity_config.process_id_file('database'): app_config = trinity_config.get_app_config(Eth1AppConfig) ensure_eth1_dirs(app_config) base_db = LevelDB(db_path=app_config.database_dir) chaindb = ChainDB(base_db) if not is_database_initialized(chaindb): chain_config = app_config.get_chain_config() initialize_database(chain_config, chaindb, base_db) manager = DBManager(base_db) with manager.run(trinity_config.database_ipc_path): try: manager.wait_stopped() except KeyboardInterrupt: pass
def db_shell(use_ipython: bool, database_dir: Path, trinity_config: TrinityConfig) -> None: db_ipc_path = trinity_config.database_ipc_path trinity_already_running = db_ipc_path.exists() if trinity_already_running: db_manager = create_db_consumer_manager(db_ipc_path) db = db_manager.get_db() # type: ignore else: db = LevelDB(database_dir) chaindb = ChainDB(db) head = chaindb.get_canonical_head() app_config = trinity_config.get_app_config(Eth1AppConfig) chain_config = app_config.get_chain_config() chain = chain_config.full_chain_class(db) greeter = f""" Head: #{head.block_number} Hash: {head.hex_hash} State Root: {encode_hex(head.state_root)} Inspecting active Trinity? {trinity_already_running} Available Context Variables: - `db`: base database object - `chaindb`: `ChainDB` instance - `trinity_config`: `TrinityConfig` instance - `chain_config`: `ChainConfig` instance - `chain`: `Chain` instance """ namespace = { 'db': db, 'chaindb': chaindb, 'trinity_config': trinity_config, 'chain_config': chain_config, 'chain': chain, } shell(use_ipython, namespace, DB_SHELL_BANNER + greeter)
def __init__(self, vm_class): header = BlockHeader( difficulty=1, gas_limit=8000000, gas_used=0, bloom=0, mix_hash=constants.ZERO_HASH32, nonce=constants.GENESIS_NONCE, block_number=0, parent_hash=constants.ZERO_HASH32, receipt_root=constants.BLANK_ROOT_HASH, uncles_hash=constants.EMPTY_UNCLE_HASH, timestamp=0, transaction_root=constants.BLANK_ROOT_HASH, ) self.vm = vm_class(header, ChainDB(AtomicDB()))
def run_database_process(trinity_config: TrinityConfig, db_class: Type[LevelDB]) -> None: with trinity_config.process_id_file('database'): app_config = trinity_config.get_app_config(Eth1AppConfig) base_db = db_class(db_path=app_config.database_dir) chaindb = ChainDB(base_db) if not is_database_initialized(chaindb): chain_config = app_config.get_chain_config() initialize_database(chain_config, chaindb, base_db) manager = DBManager(base_db) with manager.run(trinity_config.database_ipc_path): try: manager.wait_stopped() except KeyboardInterrupt: pass
def create_db_server_manager(trinity_config: TrinityConfig, base_db: BaseAtomicDB) -> BaseManager: eth1_app_config = trinity_config.get_app_config(Eth1AppConfig) chain_config = eth1_app_config.get_chain_config() chaindb = ChainDB(base_db) if not is_database_initialized(chaindb): initialize_database(chain_config, chaindb, base_db) headerdb = HeaderDB(base_db) # This enables connection when clients launch from another process on the shell multiprocessing.current_process().authkey = AUTH_KEY class DBManager(BaseManager): pass DBManager.register('get_db', callable=lambda: TracebackRecorder(base_db), proxytype=AsyncDBProxy) DBManager.register( 'get_chaindb', callable=lambda: TracebackRecorder(chaindb), proxytype=AsyncChainDBProxy, ) DBManager.register( 'get_headerdb', callable=lambda: TracebackRecorder(headerdb), proxytype=AsyncHeaderDBProxy, ) manager = DBManager(address=str( trinity_config.database_ipc_path)) # type: ignore return manager
def setup_computation(vm_class, create_address, code, chain_id=None, gas=1000000, to=CANONICAL_ADDRESS_A, data=b''): message = Message( to=to, sender=CANONICAL_ADDRESS_B, create_address=create_address, value=0, data=data, code=code, gas=gas, ) chain_context = ChainContext(chain_id) tx_context = vm_class._state_class.transaction_context_class( gas_price=1, origin=CANONICAL_ADDRESS_B, ) db = AtomicDB() vm = vm_class(GENESIS_HEADER, ChainDB(db), chain_context, ConsensusContext(db)) computation = vm_class._state_class.computation_class( state=vm.state, message=message, transaction_context=tx_context, ) return computation
def chaindb(base_db): return ChainDB(base_db)
def test_vm_fixtures(fixture, vm_class, computation_getter): chaindb = ChainDB(get_db_backend()) header = BlockHeader( coinbase=fixture['env']['currentCoinbase'], difficulty=fixture['env']['currentDifficulty'], block_number=fixture['env']['currentNumber'], gas_limit=fixture['env']['currentGasLimit'], timestamp=fixture['env']['currentTimestamp'], ) vm = vm_class(header=header, chaindb=chaindb) state = vm.state setup_state(fixture['pre'], state) code = state.get_code(fixture['exec']['address']) # Update state_root manually vm._block = vm.get_block().copy(header=vm.get_header().copy( state_root=state.state_root)) message = Message( to=fixture['exec']['address'], sender=fixture['exec']['caller'], value=fixture['exec']['value'], data=fixture['exec']['data'], code=code, gas=fixture['exec']['gas'], ) transaction_context = BaseTransactionContext( origin=fixture['exec']['origin'], gas_price=fixture['exec']['gasPrice'], ) computation = vm.state.get_computation( message, transaction_context).apply_computation( vm.state, message, transaction_context, ) # Update state_root manually vm._block = vm.get_block().copy( header=vm.get_header().copy(state_root=computation.state.state_root), ) if 'post' in fixture: # # Success checks # assert not computation.is_error log_entries = computation.get_log_entries() if 'logs' in fixture: actual_logs_hash = hash_log_entries(log_entries) expected_logs_hash = fixture['logs'] assert expected_logs_hash == actual_logs_hash elif log_entries: raise AssertionError("Got log entries: {0}".format(log_entries)) expected_output = fixture['out'] assert computation.output == expected_output gas_meter = computation._gas_meter expected_gas_remaining = fixture['gas'] actual_gas_remaining = gas_meter.gas_remaining gas_delta = actual_gas_remaining - expected_gas_remaining assert gas_delta == 0, "Gas difference: {0}".format(gas_delta) call_creates = fixture.get('callcreates', []) assert len(computation.children) == len(call_creates) call_creates = fixture.get('callcreates', []) for child_computation, created_call in zip(computation.children, call_creates): to_address = created_call['destination'] data = created_call['data'] gas_limit = created_call['gasLimit'] value = created_call['value'] assert child_computation.msg.to == to_address assert data == child_computation.msg.data or child_computation.msg.code assert gas_limit == child_computation.msg.gas assert value == child_computation.msg.value expected_account_db = fixture['post'] else: # # Error checks # assert computation.is_error assert isinstance(computation._error, VMError) expected_account_db = fixture['pre'] verify_state(expected_account_db, vm.state)
def setup_vm(vm_class, chain_id=None): db = AtomicDB() chain_context = ChainContext(chain_id) return vm_class(GENESIS_HEADER, ChainDB(db), chain_context, ConsensusContext(db))
def chaindb(chain_config): return ChainDB(LevelDB(db_path=chain_config.database_dir))
def test_state_fixtures(fixture, fixture_vm_class): header = BlockHeader( coinbase=fixture['env']['currentCoinbase'], difficulty=fixture['env']['currentDifficulty'], block_number=fixture['env']['currentNumber'], gas_limit=fixture['env']['currentGasLimit'], timestamp=fixture['env']['currentTimestamp'], parent_hash=fixture['env']['previousHash'], ) chaindb = ChainDB(get_db_backend()) vm = fixture_vm_class(header=header, chaindb=chaindb) state = vm.state apply_state_dict(state.account_db, fixture['pre']) state.account_db.persist() # Update state_root manually vm.block = vm.block.copy(header=vm.block.header.copy( state_root=state.state_root)) if 'secretKey' in fixture['transaction']: unsigned_transaction = vm.create_unsigned_transaction( nonce=fixture['transaction']['nonce'], gas_price=fixture['transaction']['gasPrice'], gas=fixture['transaction']['gasLimit'], to=fixture['transaction']['to'], value=fixture['transaction']['value'], data=fixture['transaction']['data'], ) private_key = keys.PrivateKey(fixture['transaction']['secretKey']) transaction = unsigned_transaction.as_signed_transaction( private_key=private_key) elif 'vrs' in fixture['transaction']: v, r, s = ( fixture['transaction']['v'], fixture['transaction']['r'], fixture['transaction']['s'], ) transaction = vm.create_transaction( nonce=fixture['transaction']['nonce'], gas_price=fixture['transaction']['gasPrice'], gas=fixture['transaction']['gasLimit'], to=fixture['transaction']['to'], value=fixture['transaction']['value'], data=fixture['transaction']['data'], v=v, r=r, s=s, ) try: header, receipt, computation = vm.apply_transaction( vm.block.header, transaction) transactions = vm.block.transactions + (transaction, ) receipts = vm.block.get_receipts(chaindb) + (receipt, ) block = vm.set_block_transactions(vm.block, header, transactions, receipts) except ValidationError as err: block = vm.block transaction_error = err logger.warn("Got transaction error", exc_info=True) else: transaction_error = False if not transaction_error: log_entries = computation.get_log_entries() actual_logs_hash = hash_log_entries(log_entries) if 'logs' in fixture['post']: expected_logs_hash = fixture['post']['logs'] assert expected_logs_hash == actual_logs_hash elif log_entries: raise AssertionError("Got log {0} entries. hash:{1}".format( len(log_entries), actual_logs_hash, )) if 'out' in fixture: expected_output = fixture['out'] if isinstance(expected_output, int): assert len(computation.output) == expected_output else: assert computation.output == expected_output assert block.header.state_root == fixture['post']['hash']
def test_vm_fixtures(fixture, vm_class, computation_getter): db = get_db_backend() chaindb = ChainDB(db) consensus_context = ConsensusContext(db) header = BlockHeader( coinbase=fixture['env']['currentCoinbase'], difficulty=fixture['env']['currentDifficulty'], block_number=fixture['env']['currentNumber'], gas_limit=fixture['env']['currentGasLimit'], timestamp=fixture['env']['currentTimestamp'], ) # None of the VM tests (currently) test chain ID, so the setting doesn't matter here. # When they *do* start testing ID, they will have to supply it as part of the environment. # For now, just hard-code it to something not used in practice: chain_context = ChainContext(chain_id=0) vm = vm_class(header=header, chaindb=chaindb, chain_context=chain_context, consensus_context=consensus_context) state = vm.state setup_state(fixture['pre'], state) code = state.get_code(fixture['exec']['address']) # Update state_root manually vm._block = vm.get_block().copy(header=vm.get_header().copy( state_root=state.state_root)) message = Message( to=fixture['exec']['address'], sender=fixture['exec']['caller'], value=fixture['exec']['value'], data=fixture['exec']['data'], code=code, gas=fixture['exec']['gas'], ) transaction_context = BaseTransactionContext( origin=fixture['exec']['origin'], gas_price=fixture['exec']['gasPrice'], ) computation = vm.state.get_computation( message, transaction_context).apply_computation( vm.state, message, transaction_context, ) # Update state_root manually vm._block = vm.get_block().copy( header=vm.get_header().copy(state_root=computation.state.state_root), ) if 'post' in fixture: # # Success checks # assert not computation.is_error log_entries = computation.get_log_entries() if 'logs' in fixture: actual_logs_hash = hash_log_entries(log_entries) expected_logs_hash = fixture['logs'] assert expected_logs_hash == actual_logs_hash elif log_entries: raise AssertionError(f"Got log entries: {log_entries}") expected_output = fixture['out'] assert computation.output == expected_output gas_meter = computation._gas_meter expected_gas_remaining = fixture['gas'] actual_gas_remaining = gas_meter.gas_remaining gas_delta = actual_gas_remaining - expected_gas_remaining assert gas_delta == 0, f"Gas difference: {gas_delta}" call_creates = fixture.get('callcreates', []) assert len(computation.children) == len(call_creates) call_creates = fixture.get('callcreates', []) for child_computation, created_call in zip(computation.children, call_creates): to_address = created_call['destination'] data = created_call['data'] gas_limit = created_call['gasLimit'] value = created_call['value'] assert child_computation.msg.to == to_address assert data == child_computation.msg.data or child_computation.msg.code assert gas_limit == child_computation.msg.gas assert value == child_computation.msg.value expected_account_db = fixture['post'] else: # # Error checks # assert computation.is_error assert isinstance(computation._error, VMError) expected_account_db = fixture['pre'] verify_state(expected_account_db, vm.state)
import argparse from eth_utils import encode_hex from eth.chains.mainnet import MAINNET_NETWORK_ID from eth.chains.ropsten import ROPSTEN_NETWORK_ID from eth.db.chain import ChainDB from eth.db.backends.level import LevelDB from trinity.config import ChainConfig from trinity.constants import SYNC_FULL, SYNC_LIGHT if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-ropsten', action='store_true') parser.add_argument('-light', action='store_true') args = parser.parse_args() network_id = MAINNET_NETWORK_ID if args.ropsten: network_id = ROPSTEN_NETWORK_ID sync_mode = SYNC_FULL if args.light: sync_mode = SYNC_LIGHT cfg = ChainConfig(network_id, sync_mode=sync_mode) chaindb = ChainDB(LevelDB(cfg.database_dir)) head = chaindb.get_canonical_head() print("Head #%d; hash: %s, state_root: %s" % (head.block_number, head.hex_hash, encode_hex(head.state_root)))