Example #1
0
def _test():
    import argparse
    from evm.p2p import ecies
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from evm.db.backends.memory import MemoryDB
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-root-hash',
                        type=str,
                        required=True,
                        help='Hex encoded root hash')
    args = parser.parse_args()

    chaindb = BaseChainDB(MemoryDB())
    chaindb.persist_header_to_db(ROPSTEN_GENESIS_HEADER)
    peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id,
                         ecies.generate_privkey())
    asyncio.ensure_future(peer_pool.run())

    state_db = LevelDB(args.db)
    root_hash = decode_hex(args.root_hash)
    downloader = StateDownloader(state_db, root_hash, peer_pool)
    loop = asyncio.get_event_loop()
    try:
        loop.run_until_complete(downloader.run())
    except KeyboardInterrupt:
        pass

    loop.run_until_complete(downloader.stop())
    loop.run_until_complete(peer_pool.stop())
    loop.close()
Example #2
0
def _test():
    import argparse
    import signal
    from evm.p2p import ecies
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from evm.db.backends.memory import MemoryDB
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-root-hash',
                        type=str,
                        required=True,
                        help='Hex encoded root hash')
    args = parser.parse_args()

    chaindb = ChainDB(MemoryDB())
    chaindb.persist_header_to_db(ROPSTEN_GENESIS_HEADER)
    peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id,
                         ecies.generate_privkey())
    asyncio.ensure_future(peer_pool.run())

    state_db = LevelDB(args.db)
    root_hash = decode_hex(args.root_hash)
    downloader = StateDownloader(state_db, root_hash, peer_pool)
    loop = asyncio.get_event_loop()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        await downloader.run()
        await peer_pool.stop()
        await downloader.stop()

    loop.run_until_complete(run())
    loop.close()
Example #3
0
def run_networking_process(chain_config, sync_mode):
    class DBManager(BaseManager):
        pass

    DBManager.register('get_db', proxytype=DBProxy)
    DBManager.register('get_chaindb', proxytype=ChainDBProxy)

    manager = DBManager(address=chain_config.database_ipc_path)
    manager.connect()

    chaindb = manager.get_chaindb()

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    if not is_database_initialized(chaindb):
        initialize_database(chain_config, chaindb)

    chain_class = get_chain_protocol_class(chain_config, sync_mode=sync_mode)
    peer_pool = PeerPool(LESPeer, chaindb, chain_config.network_id,
                         chain_config.nodekey)

    async def run():
        try:
            asyncio.ensure_future(peer_pool.run())
            # chain.run() will run in a loop until our atexit handler is called, at which point it returns
            # and we cleanly stop the pool and chain.
            await chain.run()
        finally:
            await peer_pool.stop()
            await chain.stop()

    chain = chain_class(chaindb, peer_pool)

    loop = asyncio.get_event_loop()

    try:
        loop.run_until_complete(run())
    except KeyboardInterrupt:
        pass

    def cleanup():
        # This is to instruct chain.run() to exit, which will cause the event loop to stop.
        chain._should_stop.set()

        loop.close()

    atexit.register(cleanup)
Example #4
0
 def __init__(self, chaindb, state_db, root_hash, network_id, privkey):
     self.peer_pool = PeerPool(ETHPeer, chaindb, network_id, privkey, self.msg_handler)
     self.root_hash = root_hash
     self.scheduler = StateSync(root_hash, state_db, self.logger)
Example #5
0
class StateDownloader:
    logger = logging.getLogger("evm.p2p.state.StateDownloader")
    _pending_nodes = {}  # type: Dict[Any, float]
    _total_processed_nodes = 0
    _report_interval = 10  # Number of seconds between progress reports.
    # TODO: Experiment with different timeout/max_pending values to find the combination that
    # yields the best results.
    # FIXME: Should use the # of peers times MAX_STATE_FETCH here
    _max_pending = 5 * MAX_STATE_FETCH
    _reply_timeout = 10  # seconds
    # For simplicity/readability we use 0 here to force a report on the first iteration of the
    # loop.
    _last_report_time = 0

    def __init__(self, chaindb, state_db, root_hash, network_id, privkey):
        self.peer_pool = PeerPool(ETHPeer, chaindb, network_id, privkey, self.msg_handler)
        self.root_hash = root_hash
        self.scheduler = StateSync(root_hash, state_db, self.logger)

    def msg_handler(self, peer: BasePeer, cmd: protocol.Command,
                    msg: protocol._DecodedMsgType) -> None:
        """The callback passed to BasePeer, called for every incoming message."""
        peer = cast(ETHPeer, peer)
        if isinstance(cmd, eth.NodeData):
            self.logger.debug("Processing NodeData with %d entries" % len(msg))
            for node in msg:
                self._total_processed_nodes += 1
                node_key = keccak(node)
                try:
                    self.scheduler.process([(node_key, node)])
                except SyncRequestAlreadyProcessed:
                    # This means we received a node more than once, which can happen when we retry
                    # after a timeout.
                    pass
                # A node may be received more than once, so pop() with a default value.
                self._pending_nodes.pop(node_key, None)

    # FIXME: Need a better criteria to select peers here.
    async def get_random_peer(self) -> ETHPeer:
        while len(self.peer_pool.peers) == 0:
            self.logger.debug("No connected peers, sleeping a bit")
            await asyncio.sleep(0.5)
        peer = random.choice(self.peer_pool.peers)
        return cast(ETHPeer, peer)

    async def stop(self):
        await self.peer_pool.stop()

    async def request_next_batch(self):
        requests = self.scheduler.next_batch(MAX_STATE_FETCH)
        if not len(requests):
            # Although our run() loop frequently yields control to let our msg handler process
            # received nodes (scheduling new requests), there may be cases when the pending nodes
            # take a while to arrive thus causing the scheduler to run out of new requests for a
            # while.
            self.logger.debug("Scheduler queue is empty, not requesting any nodes")
            return
        self.logger.debug("Requesting %d trie nodes" % len(requests))
        await self.request_nodes([request.node_key for request in requests])

    async def request_nodes(self, node_keys):
        peer = await self.get_random_peer()
        now = time.time()
        for node_key in node_keys:
            self._pending_nodes[node_key] = now
        peer.sub_proto.send_get_node_data(node_keys)

    async def retry_timedout(self):
        timed_out = []
        now = time.time()
        for node_key, req_time in list(self._pending_nodes.items()):
            if now - req_time > self._reply_timeout:
                timed_out.append(node_key)
        if len(timed_out) == 0:
            return
        self.logger.debug("Re-requesting %d trie nodes" % len(timed_out))
        await self.request_nodes(timed_out)

    async def run(self):
        asyncio.ensure_future(self.peer_pool.run())

        self.logger.info("Starting state sync for root hash %s" % encode_hex(self.root_hash))
        while self.scheduler.has_pending_requests:
            # Request new nodes if we haven't reached the limit of pending nodes.
            if len(self._pending_nodes) < self._max_pending:
                await self.request_next_batch()

            # Retry pending nodes that timed out.
            if len(self._pending_nodes):
                await self.retry_timedout()

            if len(self._pending_nodes) > self._max_pending:
                # Slow down if we've reached the limit of pending nodes.
                self.logger.debug("Pending trie nodes limit reached, sleeping a bit")
                await asyncio.sleep(0.3)
            else:
                # Yield control to ensure the Peer's msg_handler callback is called to process any
                # nodes we may have received already. Otherwise we spin too fast and don't process
                # received nodes often enough.
                await asyncio.sleep(0)

            self._maybe_report_progress()

        self.logger.info("Finished state sync with root hash %s" % encode_hex(self.root_hash))

    def _maybe_report_progress(self):
        if (time.time() - self._last_report_time) >= self._report_interval:
            self._last_report_time = time.time()
            self.logger.info("Nodes processed: %d" % self._total_processed_nodes)
            self.logger.info(
                "Nodes requested but not received yet: %d" % len(self._pending_nodes))
            self.logger.info(
                "Nodes scheduled but not requested yet: %d" % len(self.scheduler.requests))
Example #6
0
parser.add_argument('-debug', action='store_true')
args = parser.parse_args()

print("Logging to", LOGFILE)
if args.debug:
    LOGLEVEL = logging.DEBUG
logging.basicConfig(level=LOGLEVEL, filename=LOGFILE)

DemoLightChain = LightChain.configure(
    name='Demo LightChain',
    vm_configuration=MAINNET_VM_CONFIGURATION,
    network_id=ROPSTEN_NETWORK_ID,
)

chaindb = ChainDB(LevelDB(args.db))
peer_pool = PeerPool(LESPeer, chaindb, ROPSTEN_NETWORK_ID,
                     ecies.generate_privkey())
try:
    chaindb.get_canonical_head()
except CanonicalHeadNotFound:
    # We're starting with a fresh DB.
    chain = DemoLightChain.from_genesis_header(chaindb, ROPSTEN_GENESIS_HEADER,
                                               peer_pool)
else:
    # We're reusing an existing db.
    chain = DemoLightChain(chaindb, peer_pool)


async def run():
    asyncio.ensure_future(peer_pool.run())
    # chain.run() will run in a loop until our atexit handler is called, at which point it returns
    # and we cleanly stop the pool and chain.
Example #7
0
def _test():
    import argparse
    import signal
    from evm.chains.mainnet import (MAINNET_GENESIS_HEADER,
                                    MAINNET_VM_CONFIGURATION,
                                    MAINNET_NETWORK_ID)
    from evm.chains.ropsten import ROPSTEN_GENESIS_HEADER, ROPSTEN_NETWORK_ID
    from evm.db.backends.level import LevelDB
    from evm.exceptions import CanonicalHeadNotFound
    from evm.p2p import ecies
    from evm.p2p.integration_test_helpers import LocalGethPeerPool

    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')
    logging.getLogger("evm.p2p.lightchain.LightChain").setLevel(logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-mainnet', action="store_true")
    parser.add_argument('-local-geth', action="store_true")
    args = parser.parse_args()

    GENESIS_HEADER = ROPSTEN_GENESIS_HEADER
    NETWORK_ID = ROPSTEN_NETWORK_ID
    if args.mainnet:
        GENESIS_HEADER = MAINNET_GENESIS_HEADER
        NETWORK_ID = MAINNET_NETWORK_ID
    DemoLightChain = LightChain.configure(
        'DemoLightChain',
        vm_configuration=MAINNET_VM_CONFIGURATION,
        network_id=NETWORK_ID,
    )

    chaindb = BaseChainDB(LevelDB(args.db))
    if args.local_geth:
        peer_pool = LocalGethPeerPool(LESPeer, chaindb, NETWORK_ID,
                                      ecies.generate_privkey())
    else:
        peer_pool = PeerPool(LESPeer, chaindb, NETWORK_ID,
                             ecies.generate_privkey())
    try:
        chaindb.get_canonical_head()
    except CanonicalHeadNotFound:
        # We're starting with a fresh DB.
        chain = DemoLightChain.from_genesis_header(chaindb, GENESIS_HEADER,
                                                   peer_pool)
    else:
        # We're reusing an existing db.
        chain = DemoLightChain(chaindb, peer_pool)

    asyncio.ensure_future(peer_pool.run())
    loop = asyncio.get_event_loop()

    async def run():
        # chain.run() will run in a loop until stop() (registered as SIGINT/SIGTERM handler) is
        # called, at which point it returns and we cleanly stop the pool and chain.
        await chain.run()
        await peer_pool.stop()
        await chain.stop()

    def stop():
        chain._should_stop.set()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, stop)

    loop.run_until_complete(run())
    loop.close()
Example #8
0
def main():
    args = parser.parse_args()

    if args.ropsten:
        chain_identifier = ROPSTEN
    else:
        # TODO: mainnet
        chain_identifier = ROPSTEN

    if args.light:
        sync_mode = SYNC_LIGHT
    else:
        # TODO: actually use args.sync_mode (--sync-mode)
        sync_mode = SYNC_LIGHT

    chain_config = ChainConfig.from_parser_args(
        chain_identifier,
        args,
    )

    # if console command, run the trinity CLI
    if args.subcommand == 'console':
        use_ipython = not args.vanilla_shell
        debug = args.log_level.upper() == 'DEBUG'

        # TODO: this should use the base `Chain` class rather than the protocol
        # class since it's just a repl with access to the chain.
        chain_class = get_chain_protocol_class(chain_config, sync_mode)
        chaindb = ChainDB(LevelDB(chain_config.database_dir))
        peer_pool = PeerPool(LESPeer, chaindb, chain_config.network_id, chain_config.nodekey)

        chain = chain_class(chaindb, peer_pool)
        args.func(chain, use_ipython=use_ipython, debug=debug)
        sys.exit(0)

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs={'log_queue': log_queue}
    )

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(
        target=run_networking_process,
        args=(chain_config, sync_mode),
        kwargs={'log_queue': log_queue}
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')