async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: config = boot_info.trinity_config db = DBClient.connect(config.database_ipc_path) if boot_info.args.disable_discovery: discovery_service: async_service.Service = StaticDiscoveryService( event_bus, config.preferred_nodes, ) else: vm_config = config.get_app_config(Eth1AppConfig).get_chain_config().vm_configuration headerdb = TrioHeaderDB(db) eth_cap_provider = functools.partial(generate_eth_cap_enr_field, vm_config, headerdb) socket = trio.socket.socket(family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM) await socket.bind(("0.0.0.0", config.port)) base_db = LevelDB(config.node_db_dir) node_db = NodeDB(default_identity_scheme_registry, base_db) discovery_service = PreferredNodeDiscoveryService( config.nodekey, config.port, config.port, config.bootstrap_nodes, config.preferred_nodes, event_bus, socket, node_db, (eth_cap_provider,), ) try: with db: await async_service.run_trio_service(discovery_service) except Exception: event_bus.broadcast_nowait(ShutdownRequest("Discovery ended unexpectedly")) raise
def db_shell(use_ipython: bool, database_dir: Path, trinity_config: TrinityConfig) -> None: db = LevelDB(database_dir) chaindb = ChainDB(db) head = chaindb.get_canonical_head() chain_config = trinity_config.get_chain_config() chain = chain_config.full_chain_class(db) greeter = f""" Head: #{head.block_number} Hash: {head.hex_hash} State Root: {encode_hex(head.state_root)} Available Context Variables: - `db`: base database object - `chaindb`: `ChainDB` instance - `trinity_config`: `TrinityConfig` instance - `chain_config`: `ChainConfig` instance - `chain`: `Chain` instance """ namespace = { 'db': db, 'chaindb': chaindb, 'trinity_config': trinity_config, 'chain_config': chain_config, 'chain': chain, } shell(use_ipython, namespace, DB_SHELL_BANNER + greeter)
async def do_run(self, event_bus: EndpointAPI) -> None: boot_info = self._boot_info config = boot_info.trinity_config db = DBClient.connect(config.database_ipc_path) if boot_info.args.disable_discovery: discovery_service: async_service.Service = NoopDiscoveryService( event_bus, ) else: vm_config = config.get_app_config( Eth1AppConfig).get_chain_config().vm_configuration headerdb = TrioHeaderDB(db) eth_cap_provider = functools.partial(generate_eth_cap_enr_field, vm_config, headerdb) socket = trio.socket.socket(family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM) await socket.bind(("0.0.0.0", config.port)) base_db = LevelDB(config.enr_db_dir) enr_db = ENRDB(base_db) discovery_service = PreferredNodeDiscoveryService( config.nodekey, config.port, config.port, config.bootstrap_nodes, config.preferred_nodes, event_bus, socket, enr_db, (eth_cap_provider, ), ) with db: await async_service.run_trio_service(discovery_service)
def atomic_db(request, tmpdir): if request.param == 'atomic': return AtomicDB() elif request.param == 'level': return LevelDB(db_path=tmpdir.mkdir("level_db_path")) else: raise ValueError("Unexpected database type: {}".format(request.param))
def __init__(self, chaindb: AsyncChainDB, account_db: AsyncBaseDB, root_hash: Hash32, peer_pool: ETHPeerPool, token: CancelToken = None) -> None: super().__init__(token) self.chaindb = chaindb self.peer_pool = peer_pool self.root_hash = root_hash # We use a LevelDB instance for the nodes cache because a full state download, if run # uninterrupted will visit more than 180M nodes, making an in-memory cache unfeasible. self._nodes_cache_dir = tempfile.TemporaryDirectory( prefix="pyevm-state-sync-cache") # Allow the LevelDB instance to consume half of the entire file descriptor limit that # the OS permits. Let the other half be reserved for other db access, networking etc. max_open_files = get_open_fd_limit() // 2 self.scheduler = StateSync( root_hash, account_db, LevelDB(Path(self._nodes_cache_dir.name), max_open_files), self.logger) self.request_tracker = TrieNodeRequestTracker(self._reply_timeout, self.logger) self._peer_missing_nodes: Dict[ ETHPeer, Set[Hash32]] = collections.defaultdict(set)
def get_eth1_shell_context(database_dir: Path, trinity_config: TrinityConfig) -> Dict[str, Any]: app_config = trinity_config.get_app_config(Eth1AppConfig) ipc_path = trinity_config.database_ipc_path trinity_already_running = ipc_path.exists() if trinity_already_running: db_manager = eth1.manager.create_db_consumer_manager( ipc_path) # type: ignore db = db_manager.get_db() else: db = LevelDB(database_dir) chaindb = ChainDB(db) head = chaindb.get_canonical_head() chain_config = app_config.get_chain_config() chain = chain_config.full_chain_class(db) return { 'db': db, 'chaindb': chaindb, 'trinity_config': trinity_config, 'chain_config': chain_config, 'chain': chain, 'block_number': head.block_number, 'hex_hash': head.hex_hash, 'state_root_hex': encode_hex(head.state_root), 'trinity_already_running': trinity_already_running, }
def get_eth1_shell_context(database_dir: Path, trinity_config: TrinityConfig) -> Dict[str, Any]: app_config = trinity_config.get_app_config(Eth1AppConfig) ipc_path = trinity_config.database_ipc_path db: DatabaseAPI trinity_already_running = ipc_path.exists() if trinity_already_running: db = DBClient.connect(ipc_path) else: db = LevelDB(database_dir) chaindb = ChainDB(db) head = chaindb.get_canonical_head() chain_config = app_config.get_chain_config() chain = chain_config.full_chain_class(db) mining_chain_class = MiningChain.configure( __name__=chain_config.full_chain_class.__name__, vm_configuration=chain.vm_configuration, chain_id=chain.chain_id, ) mining_chain = mining_chain_class(db) return { 'db': db, 'chaindb': chaindb, 'trinity_config': trinity_config, 'chain_config': chain_config, 'chain': chain, 'mining_chain': mining_chain, 'block_number': head.block_number, 'hex_hash': head.hex_hash, 'state_root_hex': encode_hex(head.state_root), 'trinity_already_running': trinity_already_running, }
def get_beacon_shell_context(database_dir: Path, trinity_config: TrinityConfig) -> Dict[str, Any]: app_config = trinity_config.get_app_config(BeaconAppConfig) ipc_path = trinity_config.database_ipc_path trinity_already_running = ipc_path.exists() if trinity_already_running: db_manager = beacon.manager.create_db_consumer_manager( ipc_path) # type: ignore db = db_manager.get_db() else: db = LevelDB(database_dir) chain_config = app_config.get_chain_config() chain = chain_config.beacon_chain_class attestation_pool = AttestationPool() chain = chain_config.beacon_chain_class(db, attestation_pool, chain_config.genesis_config) chaindb = BeaconChainDB(db, chain_config.genesis_config) head = chaindb.get_canonical_head(BeaconBlock) return { 'db': db, 'chaindb': chaindb, 'trinity_config': trinity_config, 'chain_config': chain_config, 'chain': chain, 'block_number': head.slot, 'hex_hash': head.hash_tree_root.hex(), 'state_root_hex': encode_hex(head.state_root), 'trinity_already_running': trinity_already_running }
def get_beacon_shell_context(database_dir: Path, trinity_config: TrinityConfig) -> Dict[str, Any]: app_config = trinity_config.get_app_config(BeaconAppConfig) ipc_path = trinity_config.database_ipc_path db: DatabaseAPI trinity_already_running = ipc_path.exists() if trinity_already_running: db = DBClient.connect(ipc_path) else: db = LevelDB(database_dir) chain_config = app_config.get_chain_config() chain = chain_config.beacon_chain_class(db, chain_config.genesis_config) chaindb = BeaconChainDB(db, chain_config.genesis_config) head = chaindb.get_canonical_head(BeaconBlock) return { 'db': db, 'chaindb': chaindb, 'trinity_config': trinity_config, 'chain_config': chain_config, 'chain': chain, 'block_number': head.slot, 'hex_hash': head.hash_tree_root.hex(), 'state_root_hex': encode_hex(head.state_root), 'trinity_already_running': trinity_already_running }
def get_base_db(boot_info: BootInfo) -> LevelDB: app_config = boot_info.trinity_config.get_app_config(Eth1AppConfig) base_db = LevelDB(db_path=app_config.database_dir) chaindb = ChainDB(base_db) if not is_database_initialized(chaindb): chain_config = app_config.get_chain_config() initialize_database(chain_config, chaindb, base_db) return base_db
def get_base_db(boot_info: BootInfo) -> LevelDB: app_config = boot_info.trinity_config.get_app_config(BeaconAppConfig) chain_config = app_config.get_chain_config() base_db = LevelDB(db_path=app_config.database_dir) chaindb = BeaconChainDB(base_db, chain_config.genesis_config) if not is_beacon_database_initialized(chaindb): initialize_beacon_database(chain_config, chaindb, base_db) return base_db
def _get_base_db(database_dir: Path, ipc_path: Path) -> Iterator[AtomicDatabaseAPI]: trinity_already_running = ipc_path.exists() if trinity_already_running: db = DBClient.connect(ipc_path) with db: yield db else: yield LevelDB(database_dir)
def _test() -> None: import argparse import asyncio import signal from eth.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION from eth.db.backends.level import LevelDB from p2p import ecies from p2p.kademlia import Node from trinity.protocol.common.constants import DEFAULT_PREFERRED_NODES from trinity.protocol.common.context import ChainContext from tests.trinity.core.integration_test_helpers import ( FakeAsyncChainDB, FakeAsyncRopstenChain, connect_to_peers_loop) logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s') parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to") args = parser.parse_args() chaindb = FakeAsyncChainDB(LevelDB(args.db)) chain = FakeAsyncRopstenChain(chaindb) network_id = RopstenChain.network_id privkey = ecies.generate_privkey() context = ChainContext(headerdb=chaindb, network_id=network_id, vm_configuration=ROPSTEN_VM_CONFIGURATION) peer_pool = ETHPeerPool(privkey=privkey, context=context) if args.enode: nodes = tuple([Node.from_uri(args.enode)]) else: nodes = DEFAULT_PREFERRED_NODES[network_id] asyncio.ensure_future(peer_pool.run()) peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes)) loop = asyncio.get_event_loop() syncer = FullNodeSyncer(chain, chaindb, chaindb.db, peer_pool) sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint() -> None: await sigint_received.wait() await syncer.cancel() await peer_pool.cancel() loop.stop() loop.set_debug(True) asyncio.ensure_future(exit_on_sigint()) asyncio.ensure_future(syncer.run()) loop.run_forever() loop.close()
def _initialize_beacon_filesystem_and_db(boot_info: BootInfo) -> None: app_config = boot_info.trinity_config.get_app_config(BeaconAppConfig) ensure_beacon_dirs(app_config) base_db = LevelDB(db_path=app_config.database_dir) chain_config = app_config.get_chain_config() chaindb = BeaconChainDB(base_db) if not is_beacon_database_initialized(chaindb): initialize_beacon_database(chain_config, chaindb, base_db)
def _test() -> None: import argparse import signal from p2p import ecies from p2p.peer import DEFAULT_PREFERRED_NODES from eth.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION from eth.db.backends.level import LevelDB from tests.p2p.integration_test_helpers import FakeAsyncChainDB, connect_to_peers_loop logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s') parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-debug', action="store_true") args = parser.parse_args() log_level = logging.INFO if args.debug: log_level = logging.DEBUG logging.getLogger('p2p.state.StateDownloader').setLevel(log_level) db = LevelDB(args.db) chaindb = FakeAsyncChainDB(db) network_id = RopstenChain.network_id nodes = DEFAULT_PREFERRED_NODES[network_id] peer_pool = PeerPool(ETHPeer, chaindb, network_id, ecies.generate_privkey(), ROPSTEN_VM_CONFIGURATION) asyncio.ensure_future(peer_pool.run()) asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes)) head = chaindb.get_canonical_head() downloader = StateDownloader(chaindb, db, head.state_root, peer_pool) loop = asyncio.get_event_loop() sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint() -> None: await sigint_received.wait() await peer_pool.cancel() await downloader.cancel() loop.stop() async def run() -> None: await downloader.run() downloader.logger.info("run() finished, exiting") sigint_received.set() # loop.set_debug(True) asyncio.ensure_future(exit_on_sigint()) asyncio.ensure_future(run()) loop.run_forever() loop.close()
def get_chain(vm: Type[VirtualMachineAPI], genesis_state: GenesisState) -> Iterable[MiningChain]: with tempfile.TemporaryDirectory() as temp_dir: level_db_obj = LevelDB(Path(temp_dir)) level_db_chain = build( MiningChain, fork_at(vm, constants.GENESIS_BLOCK_NUMBER), disable_pow_check(), genesis(db=level_db_obj, params=GENESIS_PARAMS, state=genesis_state)) yield level_db_chain
def get_chain(trinity_config: TrinityConfig) -> ChainAPI: app_config = trinity_config.get_app_config(Eth1AppConfig) ensure_eth1_dirs(app_config) base_db = LevelDB(db_path=app_config.database_dir) chain_config = app_config.get_chain_config() chain = chain_config.full_chain_class(AtomicDB(base_db)) initialize_database(chain_config, chain.chaindb, base_db) return chain
def db_shell(use_ipython: bool, database_dir: Path) -> None: chaindb = ChainDB(LevelDB(database_dir)) head = chaindb.get_canonical_head() greeter = f""" Head: #{head.block_number} Hash: {head.hex_hash} State Root: {encode_hex(head.state_root)} """ shell(use_ipython, {'chaindb': chaindb}, DB_SHELL_BANNER + greeter)
def run_server(ipc_path): with tempfile.TemporaryDirectory() as db_path: db = LevelDB(db_path=db_path) manager = DBManager(db) with manager.run(ipc_path): try: manager.wait_stopped() except KeyboardInterrupt: pass ipc_path.unlink()
def _run() -> None: from eth.db.backends.level import LevelDB from eth.db.chain import ChainDB from trinity.cli_parser import parser from trinity.config import Eth1AppConfig, TrinityConfig from trinity.constants import APP_IDENTIFIER_ETH1 from trinity.initialization import ( initialize_data_dir, is_data_dir_initialized, is_database_initialized, initialize_database, ensure_eth1_dirs, ) # Require a root dir to be specified as we don't want to mess with the default one. for action in parser._actions: if action.dest == 'trinity_root_dir': action.required = True break args = parser.parse_args() # FIXME: Figure out a way to avoid having to set this. args.sync_mode = "full" logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S') for name, level in args.log_levels.items(): logging.getLogger(name).setLevel(level) trinity_config = TrinityConfig.from_parser_args(args, APP_IDENTIFIER_ETH1, (Eth1AppConfig, )) trinity_config.trinity_root_dir.mkdir(exist_ok=True) if not is_data_dir_initialized(trinity_config): initialize_data_dir(trinity_config) with trinity_config.process_id_file('database'): app_config = trinity_config.get_app_config(Eth1AppConfig) ensure_eth1_dirs(app_config) base_db = LevelDB(db_path=app_config.database_dir) chaindb = ChainDB(base_db) if not is_database_initialized(chaindb): chain_config = app_config.get_chain_config() initialize_database(chain_config, chaindb, base_db) manager = DBManager(base_db) with manager.run(trinity_config.database_ipc_path): try: manager.wait_stopped() except KeyboardInterrupt: pass
def __init__(self, chaindb: AsyncChainDB, account_db: AsyncBaseDB, root_hash: Hash32, peer_pool: ETHPeerPool, token: CancelToken = None) -> None: super().__init__(token) self.chaindb = chaindb self.peer_pool = peer_pool self.root_hash = root_hash # We use a LevelDB instance for the nodes cache because a full state download, if run # uninterrupted will visit more than 180M nodes, making an in-memory cache unfeasible. self._nodes_cache_dir = tempfile.TemporaryDirectory(prefix="pyevm-state-sync-cache") self.scheduler = StateSync( root_hash, account_db, LevelDB(cast(Path, self._nodes_cache_dir.name)), self.logger) self.request_tracker = TrieNodeRequestTracker(self._reply_timeout, self.logger) self._peer_missing_nodes: Dict[ETHPeer, Set[Hash32]] = collections.defaultdict(set)
def from_config( cls, config: BeaconNodeConfig, time_provider: TimeProvider = get_unix_time) -> "BeaconNode": base_db = LevelDB(db_path=config.database_dir) chain_db = config.chain_db_class(base_db) genesis_block = chain_db.get_block_by_slot(GENESIS_SLOT, BeaconBlock) if genesis_block is None and config.genesis_state_ssz is None: raise Exception("node cannot proceed with out genesis state") if genesis_block is None: with open(config.genesis_state_ssz, "rb") as genesis_state_ssz_file: genesis_state_ssz = genesis_state_ssz_file.read() genesis_state = ssz.decode(genesis_state_ssz, BeaconState) chain = config.chain_class.from_genesis(base_db, genesis_state) genesis_time = genesis_state.genesis_time else: # TODO fix slot polymorphism... # NOTE: accessing private property, ignoring type for now... sm = config.chain_class._sm_configuration[0][1] # type: ignore fork_choice_class = sm.fork_choice_class block_sink = ChainDBBlockSink(chain_db) fork_choice = fork_choice_class.from_db(chain_db, config, block_sink) chain = config.chain_class(chain_db, fork_choice) genesis_time = chain_db.genesis_time clock = _mk_clock(config.eth2_config, genesis_time, time_provider) return cls( config.local_node_key, config.eth2_config, clock, chain, config.validator_api_port, config.client_identifier, config.p2p_maddr, config.preferred_nodes, config.bootstrap_nodes, )
def db_shell(use_ipython: bool, database_dir: Path, trinity_config: TrinityConfig) -> None: db_ipc_path = trinity_config.database_ipc_path trinity_already_running = db_ipc_path.exists() if trinity_already_running: db_manager = create_db_consumer_manager(db_ipc_path) db = db_manager.get_db() # type: ignore else: db = LevelDB(database_dir) chaindb = ChainDB(db) head = chaindb.get_canonical_head() app_config = trinity_config.get_app_config(Eth1AppConfig) chain_config = app_config.get_chain_config() chain = chain_config.full_chain_class(db) greeter = f""" Head: #{head.block_number} Hash: {head.hex_hash} State Root: {encode_hex(head.state_root)} Inspecting active Trinity? {trinity_already_running} Available Context Variables: - `db`: base database object - `chaindb`: `ChainDB` instance - `trinity_config`: `TrinityConfig` instance - `chain_config`: `ChainConfig` instance - `chain`: `Chain` instance """ namespace = { 'db': db, 'chaindb': chaindb, 'trinity_config': trinity_config, 'chain_config': chain_config, 'chain': chain, } shell(use_ipython, namespace, DB_SHELL_BANNER + greeter)
def __init__( self, local_node_key: PrivateKey, eth2_config: Eth2Config, chain_config: BeaconChainConfig, database_dir: Path, chain_class: Type[BaseBeaconChain], time_provider: TimeProvider = get_unix_time, ) -> None: self._local_key_pair = create_new_key_pair(local_node_key.to_bytes()) self._eth2_config = eth2_config self._clock = _mk_clock(eth2_config, chain_config.genesis_time, time_provider) self._base_db = LevelDB(db_path=database_dir) self._chain_db = BeaconChainDB(self._base_db, eth2_config) if not is_beacon_database_initialized(self._chain_db): initialize_beacon_database(chain_config, self._chain_db, self._base_db) self._chain = chain_class(self._base_db, eth2_config)
async def do_run(self, event_bus: EndpointAPI) -> None: boot_info = self._boot_info identity_scheme_registry = default_identity_scheme_registry message_type_registry = default_message_type_registry nodedb_dir = get_nodedb_dir(boot_info) nodedb_dir.mkdir(exist_ok=True) node_db = NodeDB(default_identity_scheme_registry, LevelDB(nodedb_dir)) local_private_key = get_local_private_key(boot_info) local_enr = await get_local_enr(boot_info, node_db, local_private_key) local_node_id = local_enr.node_id routing_table = KademliaRoutingTable(local_node_id, NUM_ROUTING_TABLE_BUCKETS) node_db.set_enr(local_enr) for enr_repr in boot_info.args.discovery_boot_enrs or (): enr = ENR.from_repr(enr_repr) node_db.set_enr(enr) routing_table.update(enr.node_id) port = boot_info.args.discovery_port socket = trio.socket.socket(family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM) outgoing_datagram_channels = trio.open_memory_channel[ OutgoingDatagram](0) incoming_datagram_channels = trio.open_memory_channel[ IncomingDatagram](0) outgoing_packet_channels = trio.open_memory_channel[OutgoingPacket](0) incoming_packet_channels = trio.open_memory_channel[IncomingPacket](0) outgoing_message_channels = trio.open_memory_channel[OutgoingMessage]( 0) incoming_message_channels = trio.open_memory_channel[IncomingMessage]( 0) endpoint_vote_channels = trio.open_memory_channel[EndpointVote](0) # types ignored due to https://github.com/ethereum/async-service/issues/5 datagram_sender = DatagramSender( # type: ignore outgoing_datagram_channels[1], socket) datagram_receiver = DatagramReceiver( # type: ignore socket, incoming_datagram_channels[0]) packet_encoder = PacketEncoder( # type: ignore outgoing_packet_channels[1], outgoing_datagram_channels[0]) packet_decoder = PacketDecoder( # type: ignore incoming_datagram_channels[1], incoming_packet_channels[0]) packer = Packer( local_private_key=local_private_key.to_bytes(), local_node_id=local_node_id, node_db=node_db, message_type_registry=message_type_registry, incoming_packet_receive_channel=incoming_packet_channels[1], incoming_message_send_channel=incoming_message_channels[0], outgoing_message_receive_channel=outgoing_message_channels[1], outgoing_packet_send_channel=outgoing_packet_channels[0], ) message_dispatcher = MessageDispatcher( node_db=node_db, incoming_message_receive_channel=incoming_message_channels[1], outgoing_message_send_channel=outgoing_message_channels[0], ) endpoint_tracker = EndpointTracker( local_private_key=local_private_key.to_bytes(), local_node_id=local_node_id, node_db=node_db, identity_scheme_registry=identity_scheme_registry, vote_receive_channel=endpoint_vote_channels[1], ) routing_table_manager = RoutingTableManager( local_node_id=local_node_id, routing_table=routing_table, message_dispatcher=message_dispatcher, node_db=node_db, outgoing_message_send_channel=outgoing_message_channels[0], endpoint_vote_send_channel=endpoint_vote_channels[0], ) logger.info(f"Starting discovery, listening on port {port}") logger.info(f"Local Node ID: {encode_hex(local_enr.node_id)}") logger.info(f"Local ENR: {local_enr}") services = ( datagram_sender, datagram_receiver, packet_encoder, packet_decoder, packer, message_dispatcher, endpoint_tracker, routing_table_manager, ) await socket.bind(("0.0.0.0", port)) with socket: async with trio.open_nursery() as nursery: for service in services: nursery.start_soon(async_service.TrioManager.run_service, service)
def _test() -> None: import argparse import signal from p2p import ecies from p2p.kademlia import Node from p2p.peer import DEFAULT_PREFERRED_NODES from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION from eth.db.backends.level import LevelDB from tests.p2p.integration_test_helpers import ( FakeAsyncChainDB, FakeAsyncRopstenChain, FakeAsyncHeaderDB, connect_to_peers_loop) parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-fast', action="store_true") parser.add_argument('-light', action="store_true") parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to") parser.add_argument('-debug', action="store_true") args = parser.parse_args() logging.basicConfig( level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S') log_level = logging.INFO if args.debug: log_level = logging.DEBUG loop = asyncio.get_event_loop() base_db = LevelDB(args.db) chaindb = FakeAsyncChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) headerdb = FakeAsyncHeaderDB(base_db) peer_class: Type[HeaderRequestingPeer] = ETHPeer if args.light: peer_class = LESPeer network_id = RopstenChain.network_id privkey = ecies.generate_privkey() peer_pool = PeerPool(peer_class, headerdb, network_id, privkey, ROPSTEN_VM_CONFIGURATION) if args.enode: nodes = tuple([Node.from_uri(args.enode)]) else: nodes = DEFAULT_PREFERRED_NODES[network_id] asyncio.ensure_future(peer_pool.run()) asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes)) chain = FakeAsyncRopstenChain(base_db) syncer: BaseHeaderChainSyncer = None if args.fast: syncer = FastChainSyncer(chain, chaindb, peer_pool) elif args.light: syncer = LightChainSyncer(chain, headerdb, peer_pool) else: syncer = RegularChainSyncer(chain, chaindb, peer_pool) syncer.logger.setLevel(log_level) syncer.min_peers_to_sync = 1 sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint() -> None: await sigint_received.wait() await peer_pool.cancel() await syncer.cancel() loop.stop() async def run() -> None: await syncer.run() syncer.logger.info("run() finished, exiting") sigint_received.set() # loop.set_debug(True) asyncio.ensure_future(exit_on_sigint()) asyncio.ensure_future(run()) loop.run_forever() loop.close()
def chaindb(chain_config): return ChainDB(LevelDB(db_path=chain_config.database_dir))
def _test() -> None: import argparse from pathlib import Path import signal from p2p import ecies from p2p.kademlia import Node from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION from eth.chains.mainnet import MainnetChain, MAINNET_GENESIS_HEADER, MAINNET_VM_CONFIGURATION from eth.db.backends.level import LevelDB from tests.trinity.core.integration_test_helpers import ( FakeAsyncChainDB, FakeAsyncMainnetChain, FakeAsyncRopstenChain, FakeAsyncHeaderDB, connect_to_peers_loop) from trinity.constants import DEFAULT_PREFERRED_NODES from trinity.protocol.common.context import ChainContext from trinity._utils.chains import load_nodekey parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-fast', action="store_true") parser.add_argument('-light', action="store_true") parser.add_argument('-nodekey', type=str) parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to") parser.add_argument('-debug', action="store_true") args = parser.parse_args() logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S') log_level = logging.INFO if args.debug: log_level = logging.DEBUG loop = asyncio.get_event_loop() base_db = LevelDB(args.db) headerdb = FakeAsyncHeaderDB(base_db) chaindb = FakeAsyncChainDB(base_db) try: genesis = chaindb.get_canonical_block_header_by_number(0) except HeaderNotFound: genesis = ROPSTEN_GENESIS_HEADER chaindb.persist_header(genesis) peer_pool_class: Type[Union[ETHPeerPool, LESPeerPool]] = ETHPeerPool if args.light: peer_pool_class = LESPeerPool if genesis.hash == ROPSTEN_GENESIS_HEADER.hash: network_id = RopstenChain.network_id vm_config = ROPSTEN_VM_CONFIGURATION # type: ignore chain_class = FakeAsyncRopstenChain elif genesis.hash == MAINNET_GENESIS_HEADER.hash: network_id = MainnetChain.network_id vm_config = MAINNET_VM_CONFIGURATION # type: ignore chain_class = FakeAsyncMainnetChain else: raise RuntimeError("Unknown genesis: %s", genesis) if args.nodekey: privkey = load_nodekey(Path(args.nodekey)) else: privkey = ecies.generate_privkey() context = ChainContext( headerdb=headerdb, network_id=network_id, vm_configuration=vm_config, ) peer_pool = peer_pool_class(privkey=privkey, context=context) if args.enode: nodes = tuple([Node.from_uri(args.enode)]) else: nodes = DEFAULT_PREFERRED_NODES[network_id] asyncio.ensure_future(peer_pool.run()) peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes)) chain = chain_class(base_db) syncer: BaseHeaderChainSyncer = None if args.fast: syncer = FastChainSyncer(chain, chaindb, cast(ETHPeerPool, peer_pool)) elif args.light: syncer = LightChainSyncer(chain, headerdb, cast(LESPeerPool, peer_pool)) else: syncer = RegularChainSyncer(chain, chaindb, cast(ETHPeerPool, peer_pool)) syncer.logger.setLevel(log_level) syncer.min_peers_to_sync = 1 sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint() -> None: await sigint_received.wait() await peer_pool.cancel() await syncer.cancel() loop.stop() async def run() -> None: await syncer.run() syncer.logger.info("run() finished, exiting") sigint_received.set() # loop.set_debug(True) asyncio.ensure_future(exit_on_sigint()) asyncio.ensure_future(run()) loop.run_forever() loop.close()
async def _main() -> None: parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-light', action="store_true") parser.add_argument('-nodekey', type=str) parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to") parser.add_argument('-debug', action="store_true") args = parser.parse_args() logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S') log_level = logging.INFO if args.debug: log_level = logging.DEBUG loop = asyncio.get_event_loop() base_db = LevelDB(args.db) headerdb = AsyncHeaderDB(AtomicDB(base_db)) chaindb = AsyncChainDB(AtomicDB(base_db)) try: genesis = chaindb.get_canonical_block_header_by_number(BlockNumber(0)) except HeaderNotFound: genesis = ROPSTEN_GENESIS_HEADER chaindb.persist_header(genesis) peer_pool_class: Type[Union[ETHPeerPool, LESPeerPool]] = ETHPeerPool if args.light: peer_pool_class = LESPeerPool chain_class: Union[Type[AsyncRopstenChain], Type[AsyncMainnetChain]] if genesis.hash == ROPSTEN_GENESIS_HEADER.hash: chain_id = RopstenChain.chain_id vm_config = ROPSTEN_VM_CONFIGURATION chain_class = AsyncRopstenChain elif genesis.hash == MAINNET_GENESIS_HEADER.hash: chain_id = MainnetChain.chain_id vm_config = MAINNET_VM_CONFIGURATION # type: ignore chain_class = AsyncMainnetChain else: raise RuntimeError("Unknown genesis: %s", genesis) if args.nodekey: privkey = load_nodekey(Path(args.nodekey)) else: privkey = ecies.generate_privkey() context = ChainContext( headerdb=headerdb, network_id=chain_id, vm_configuration=vm_config, client_version_string=construct_trinity_client_identifier(), listen_port=30309, p2p_version=DEVP2P_V5, ) peer_pool = peer_pool_class(privkey=privkey, context=context) if args.enode: nodes = tuple([Node.from_uri(args.enode)]) else: nodes = DEFAULT_PREFERRED_NODES[chain_id] async with background_asyncio_service(peer_pool) as manager: manager.run_task(connect_to_peers_loop(peer_pool, nodes)) # type: ignore chain = chain_class(base_db) syncer: Service = None if args.light: syncer = LightChainSyncer(chain, headerdb, cast(LESPeerPool, peer_pool)) else: syncer = RegularChainSyncer(chain, chaindb, cast(ETHPeerPool, peer_pool)) logging.getLogger().setLevel(log_level) sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint() -> None: await sigint_received.wait() syncer.get_manager().cancel() asyncio.ensure_future(exit_on_sigint()) async with background_asyncio_service(syncer) as syncer_manager: await syncer_manager.wait_finished()
def base_db(eth1_app_config): return LevelDB(db_path=eth1_app_config.database_dir)