def _test() -> None: import argparse from pathlib import Path import signal from eth.chains.ropsten import ROPSTEN_GENESIS_HEADER from p2p import ecies from p2p.constants import ROPSTEN_BOOTNODES from trinity.constants import ROPSTEN_NETWORK_ID from trinity._utils.chains import load_nodekey from tests.core.integration_test_helpers import ( FakeAsyncLevelDB, FakeAsyncHeaderDB, FakeAsyncChainDB, FakeAsyncRopstenChain) parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-debug', action="store_true") parser.add_argument('-bootnodes', type=str, default=[]) parser.add_argument('-nodekey', type=str) args = parser.parse_args() logging.basicConfig( level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S') log_level = logging.INFO if args.debug: log_level = logging.DEBUG loop = asyncio.get_event_loop() db = FakeAsyncLevelDB(args.db) headerdb = FakeAsyncHeaderDB(db) chaindb = FakeAsyncChainDB(db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = FakeAsyncRopstenChain(db) # NOTE: Since we may create a different priv/pub key pair every time we run this, remote nodes # may try to establish a connection using the pubkey from one of our previous runs, which will # result in lots of DecryptionErrors in receive_handshake(). if args.nodekey: privkey = load_nodekey(Path(args.nodekey)) else: privkey = ecies.generate_privkey() port = 30303 if args.bootnodes: bootstrap_nodes = args.bootnodes.split(',') else: bootstrap_nodes = ROPSTEN_BOOTNODES bootstrap_nodes = [Node.from_uri(enode) for enode in bootstrap_nodes] server = FullServer( privkey, port, chain, chaindb, headerdb, db, ROPSTEN_NETWORK_ID, bootstrap_nodes=bootstrap_nodes, ) server.logger.setLevel(log_level) sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint() -> None: await sigint_received.wait() await server.cancel() loop.stop() loop.set_debug(True) asyncio.ensure_future(exit_on_sigint()) asyncio.ensure_future(server.run()) loop.run_forever() loop.close()
async def test_lightchain_integration(request, event_loop, caplog, geth_ipc_path, enode, geth_process): """Test LightChainSyncer/LightPeerChain against a running geth instance. In order to run this manually, you can use `tox -e py36-lightchain_integration` or: pytest --integration --capture=no tests/integration/test_lightchain_integration.py The fixture for this test was generated with: geth --testnet --syncmode full It only needs the first 11 blocks for this test to succeed. """ if not pytest.config.getoption("--integration"): pytest.skip("Not asked to run integration tests") # will almost certainly want verbose logging in a failure caplog.set_level(logging.DEBUG) # make sure geth has been launched wait_for_socket(geth_ipc_path) remote = Node.from_uri(enode) base_db = AtomicDB() chaindb = FakeAsyncChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) headerdb = FakeAsyncHeaderDB(base_db) context = ChainContext( headerdb=headerdb, network_id=ROPSTEN_NETWORK_ID, vm_configuration=ROPSTEN_VM_CONFIGURATION, ) peer_pool = LESPeerPool( privkey=ecies.generate_privkey(), context=context, ) chain = FakeAsyncRopstenChain(base_db) syncer = LightChainSyncer(chain, chaindb, peer_pool) syncer.min_peers_to_sync = 1 peer_chain = LightPeerChain(headerdb, peer_pool) server_request_handler = LightRequestServer(headerdb, peer_pool) asyncio.ensure_future(peer_pool.run()) asyncio.ensure_future(connect_to_peers_loop(peer_pool, tuple([remote]))) asyncio.ensure_future(peer_chain.run()) asyncio.ensure_future(server_request_handler.run()) asyncio.ensure_future(syncer.run()) await asyncio.sleep( 0) # Yield control to give the LightChainSyncer a chance to start def finalizer(): event_loop.run_until_complete(peer_pool.cancel()) event_loop.run_until_complete(peer_chain.cancel()) event_loop.run_until_complete(syncer.cancel()) event_loop.run_until_complete(server_request_handler.cancel()) request.addfinalizer(finalizer) n = 11 # Wait for the chain to sync a few headers. async def wait_for_header_sync(block_number): while headerdb.get_canonical_head().block_number < block_number: await asyncio.sleep(0.1) await asyncio.wait_for(wait_for_header_sync(n), 5) # https://ropsten.etherscan.io/block/11 header = headerdb.get_canonical_block_header_by_number(n) body = await peer_chain.coro_get_block_body_by_hash(header.hash) assert len(body['transactions']) == 15 receipts = await peer_chain.coro_get_receipts(header.hash) assert len(receipts) == 15 assert encode_hex(keccak(rlp.encode(receipts[0]))) == ( '0xf709ed2c57efc18a1675e8c740f3294c9e2cb36ba7bb3b89d3ab4c8fef9d8860') assert len(peer_pool) == 1 peer = peer_pool.highest_td_peer head = await peer_chain.coro_get_block_header_by_hash(peer.head_hash) # In order to answer queries for contract code, geth needs the state trie entry for the block # we specify in the query, but because of fast sync we can only assume it has that for recent # blocks, so we use the current head to lookup the code for the contract below. # https://ropsten.etherscan.io/address/0x95a48dca999c89e4e284930d9b9af973a7481287 contract_addr = decode_hex('0x8B09D9ac6A4F7778fCb22852e879C7F3B2bEeF81') contract_code = await peer_chain.coro_get_contract_code( head.hash, contract_addr) assert encode_hex(contract_code) == '0x600060006000600060006000356000f1' account = await peer_chain.coro_get_account(head.hash, contract_addr) assert account.code_hash == keccak(contract_code) assert account.balance == 0
def _test() -> None: import argparse from pathlib import Path import signal from p2p import ecies from p2p.kademlia import Node from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION from eth.chains.mainnet import MainnetChain, MAINNET_GENESIS_HEADER, MAINNET_VM_CONFIGURATION from eth.db.backends.level import LevelDB from tests.core.integration_test_helpers import ( FakeAsyncChainDB, FakeAsyncMainnetChain, FakeAsyncRopstenChain, FakeAsyncHeaderDB, connect_to_peers_loop) from trinity.constants import DEFAULT_PREFERRED_NODES from trinity.protocol.common.context import ChainContext from trinity._utils.chains import load_nodekey parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-fast', action="store_true") parser.add_argument('-light', action="store_true") parser.add_argument('-nodekey', type=str) parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to") parser.add_argument('-debug', action="store_true") args = parser.parse_args() logging.basicConfig( level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S') log_level = logging.INFO if args.debug: log_level = logging.DEBUG loop = asyncio.get_event_loop() base_db = LevelDB(args.db) headerdb = FakeAsyncHeaderDB(base_db) chaindb = FakeAsyncChainDB(base_db) try: genesis = chaindb.get_canonical_block_header_by_number(0) except HeaderNotFound: genesis = ROPSTEN_GENESIS_HEADER chaindb.persist_header(genesis) peer_pool_class: Type[Union[ETHPeerPool, LESPeerPool]] = ETHPeerPool if args.light: peer_pool_class = LESPeerPool if genesis.hash == ROPSTEN_GENESIS_HEADER.hash: network_id = RopstenChain.network_id vm_config = ROPSTEN_VM_CONFIGURATION # type: ignore chain_class = FakeAsyncRopstenChain elif genesis.hash == MAINNET_GENESIS_HEADER.hash: network_id = MainnetChain.network_id vm_config = MAINNET_VM_CONFIGURATION # type: ignore chain_class = FakeAsyncMainnetChain else: raise RuntimeError("Unknown genesis: %s", genesis) if args.nodekey: privkey = load_nodekey(Path(args.nodekey)) else: privkey = ecies.generate_privkey() context = ChainContext( headerdb=headerdb, network_id=network_id, vm_configuration=vm_config, ) peer_pool = peer_pool_class(privkey=privkey, context=context) if args.enode: nodes = tuple([Node.from_uri(args.enode)]) else: nodes = DEFAULT_PREFERRED_NODES[network_id] asyncio.ensure_future(peer_pool.run()) peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes)) chain = chain_class(base_db) syncer: BaseHeaderChainSyncer = None if args.fast: syncer = FastChainSyncer(chain, chaindb, cast(ETHPeerPool, peer_pool)) elif args.light: syncer = LightChainSyncer(chain, headerdb, cast(LESPeerPool, peer_pool)) else: syncer = RegularChainSyncer(chain, chaindb, cast(ETHPeerPool, peer_pool)) syncer.logger.setLevel(log_level) syncer.min_peers_to_sync = 1 sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint() -> None: await sigint_received.wait() await peer_pool.cancel() await syncer.cancel() loop.stop() async def run() -> None: await syncer.run() syncer.logger.info("run() finished, exiting") sigint_received.set() # loop.set_debug(True) asyncio.ensure_future(exit_on_sigint()) asyncio.ensure_future(run()) loop.run_forever() loop.close()