def bootstrap_nodes(self): if self._bootstrap_nodes is None or len(self._bootstrap_nodes) == 0: if self.is_dev_test_node: self.save_node_address_to_local_peer_pool_file() if len(self.preferred_nodes) == 0: self._bootstrap_nodes = tuple( KademliaNode.from_uri(enode) for enode in MAINNET_BOOTNODES) else: saved_bootstrap_nodes = tuple( load_local_nodes(self.local_peer_pool_path)) #assume instance 0 is the bootstrap node for dev testing bootstrap_node = None for loop_bootstrap_node in saved_bootstrap_nodes: if loop_bootstrap_node.address.tcp_port == 30303: bootstrap_node = loop_bootstrap_node if bootstrap_node is not None and (bootstrap_node.pubkey != self.nodekey_public): self._bootstrap_nodes = (bootstrap_node, ) else: self._bootstrap_nodes = () else: if self.network_id == MAINNET_NETWORK_ID: self._bootstrap_nodes = tuple( KademliaNode.from_uri(enode) for enode in MAINNET_BOOTNODES) return self._bootstrap_nodes
def load_local_nodes(path, local_private_key = None): existing_peers = load_peers_from_file(path) peer_pool = [] for i, peer in enumerate(existing_peers): if local_private_key is None or peer[0] != local_private_key.public_key.to_hex(): peer_pool.append(Node(keys.PublicKey(decode_hex(peer[0])),Address(peer[1], peer[2], peer[3]))) return peer_pool
def _test() -> None: import argparse import asyncio import signal from hvm.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION from hvm.db.backends.level import LevelDB from hp2p import ecies from hp2p.kademlia import Node from helios.protocol.common.constants import DEFAULT_PREFERRED_NODES from helios.protocol.common.context import ChainContext from tests.helios.core.integration_test_helpers import ( FakeAsyncChainDB, FakeAsyncRopstenChain, connect_to_peers_loop) logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s') parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to") args = parser.parse_args() chaindb = FakeAsyncChainDB(LevelDB(args.db)) chain = FakeAsyncRopstenChain(chaindb) network_id = RopstenChain.network_id privkey = ecies.generate_privkey() context = ChainContext(headerdb=chaindb, network_id=network_id, vm_configuration=ROPSTEN_VM_CONFIGURATION) peer_pool = HLSPeerPool(privkey=privkey, context=context) if args.enode: nodes = tuple([Node.from_uri(args.enode)]) else: nodes = DEFAULT_PREFERRED_NODES[network_id] asyncio.ensure_future(peer_pool.run()) peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes)) loop = asyncio.get_event_loop() syncer = FullNodeSyncer(chain, chaindb, chaindb.db, peer_pool) sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint() -> None: await sigint_received.wait() await syncer.cancel() await peer_pool.cancel() loop.stop() loop.set_debug(True) asyncio.ensure_future(exit_on_sigint()) asyncio.ensure_future(syncer.run()) loop.run_forever() loop.close()
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, value: Any, option_string: str=None) -> None: if value is None: return enode = Node.from_uri(value) if getattr(namespace, self.dest) is None: setattr(namespace, self.dest, []) enode_list = getattr(namespace, self.dest) enode_list.append(enode)
async def test_lightchain_integration(request, event_loop, caplog, geth_ipc_path, enode, geth_process): """Test LightChainSyncer/LightPeerChain against a running geth instance. In order to run this manually, you can use `tox -e py36-lightchain_integration` or: pytest --integration --capture=no tests/helios/integration/test_lightchain_integration.py The fixture for this test was generated with: geth --testnet --syncmode full It only needs the first 11 blocks for this test to succeed. """ if not pytest.config.getoption("--integration"): pytest.skip("Not asked to run integration tests") # will almost certainly want verbose logging in a failure caplog.set_level(logging.DEBUG) # make sure geth has been launched wait_for_socket(geth_ipc_path) remote = Node.from_uri(enode) base_db = AtomicDB() chaindb = FakeAsyncChainDB(base_db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) headerdb = FakeAsyncHeaderDB(base_db) context = ChainContext( headerdb=headerdb, network_id=ROPSTEN_NETWORK_ID, vm_configuration=ROPSTEN_VM_CONFIGURATION, ) peer_pool = LESPeerPool( privkey=ecies.generate_privkey(), context=context, ) chain = FakeAsyncRopstenChain(base_db) syncer = LightChainSyncer(chain, chaindb, peer_pool) syncer.min_peers_to_sync = 1 peer_chain = LightPeerChain(headerdb, peer_pool) asyncio.ensure_future(peer_pool.run()) asyncio.ensure_future(connect_to_peers_loop(peer_pool, tuple([remote]))) asyncio.ensure_future(peer_chain.run()) asyncio.ensure_future(syncer.run()) await asyncio.sleep( 0) # Yield control to give the LightChainSyncer a chance to start def finalizer(): event_loop.run_until_complete(peer_pool.cancel()) event_loop.run_until_complete(peer_chain.cancel()) event_loop.run_until_complete(syncer.cancel()) request.addfinalizer(finalizer) n = 11 # Wait for the chain to sync a few headers. async def wait_for_header_sync(block_number): while headerdb.get_canonical_head().block_number < block_number: await asyncio.sleep(0.1) await asyncio.wait_for(wait_for_header_sync(n), 5) # https://ropsten.etherscan.io/block/11 header = headerdb.get_canonical_block_header_by_number(n) body = await peer_chain.coro_get_block_body_by_hash(header.hash) assert len(body['transactions']) == 15 receipts = await peer_chain.coro_get_receipts(header.hash) assert len(receipts) == 15 assert encode_hex(keccak(rlp.encode(receipts[0]))) == ( '0xf709ed2c57efc18a1675e8c740f3294c9e2cb36ba7bb3b89d3ab4c8fef9d8860') assert len(peer_pool) == 1 peer = peer_pool.highest_td_peer head = await peer_chain.coro_get_block_header_by_hash(peer.head_hash) # In order to answer queries for contract code, geth needs the state trie entry for the block # we specify in the query, but because of fast sync we can only assume it has that for recent # blocks, so we use the current head to lookup the code for the contract below. # https://ropsten.etherscan.io/address/0x95a48dca999c89e4e284930d9b9af973a7481287 contract_addr = decode_hex('0x8B09D9ac6A4F7778fCb22852e879C7F3B2bEeF81') contract_code = await peer_chain.coro_get_contract_code( head.hash, contract_addr) assert encode_hex(contract_code) == '0x600060006000600060006000356000f1' account = await peer_chain.coro_get_account(head.hash, contract_addr) assert account.code_hash == keccak(contract_code) assert account.balance == 0
def _test() -> None: import argparse from pathlib import Path import signal from hvm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER from hp2p import ecies from hp2p.constants import ROPSTEN_BOOTNODES from helios.utils.chains import load_nodekey from tests.helios.core.integration_test_helpers import ( FakeAsyncLevelDB, FakeAsyncHeaderDB, FakeAsyncChainDB, FakeAsyncRopstenChain) parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-debug', action="store_true") parser.add_argument('-bootnodes', type=str, default=[]) parser.add_argument('-nodekey', type=str) args = parser.parse_args() logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S') log_level = logging.INFO if args.debug: log_level = logging.DEBUG loop = asyncio.get_event_loop() db = FakeAsyncLevelDB(args.db) headerdb = FakeAsyncHeaderDB(db) chaindb = FakeAsyncChainDB(db) chaindb.persist_header(ROPSTEN_GENESIS_HEADER) chain = FakeAsyncRopstenChain(db) # NOTE: Since we may create a different priv/pub key pair every time we run this, remote nodes # may try to establish a connection using the pubkey from one of our previous runs, which will # result in lots of DecryptionErrors in receive_handshake(). if args.nodekey: privkey = load_nodekey(Path(args.nodekey)) else: privkey = ecies.generate_privkey() port = 30303 if args.bootnodes: bootstrap_nodes = args.bootnodes.split(',') else: bootstrap_nodes = ROPSTEN_BOOTNODES bootstrap_nodes = [Node.from_uri(enode) for enode in bootstrap_nodes] server = FullServer( privkey, port, chain, chaindb, headerdb, db, RopstenChain.network_id, bootstrap_nodes=bootstrap_nodes, ) server.logger.setLevel(log_level) sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint() -> None: await sigint_received.wait() await server.cancel() loop.stop() loop.set_debug(True) asyncio.ensure_future(exit_on_sigint()) asyncio.ensure_future(server.run()) loop.run_forever() loop.close()
async def _receive_handshake(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: msg = await self.wait(reader.read(ENCRYPTED_AUTH_MSG_LEN), timeout=REPLY_TIMEOUT) ip, socket, *_ = writer.get_extra_info("peername") remote_address = Address(ip, socket) self.logger.debug("Receiving handshake from %s", remote_address) got_eip8 = False try: ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) except DecryptionError: # Try to decode as EIP8 got_eip8 = True msg_size = big_endian_to_int(msg[:2]) remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2 msg += await self.wait(reader.read(remaining_bytes), timeout=REPLY_TIMEOUT) try: ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) except DecryptionError as e: self.logger.debug("Failed to decrypt handshake: %s", e) return initiator_remote = Node(initiator_pubkey, remote_address) responder = HandshakeResponder(initiator_remote, self.privkey, got_eip8, self.cancel_token) responder_nonce = secrets.token_bytes(HASH_LEN) auth_ack_msg = responder.create_auth_ack_message(responder_nonce) auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg) # Use the `writer` to send the reply to the remote writer.write(auth_ack_ciphertext) await self.wait(writer.drain()) # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer` aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets( initiator_nonce=initiator_nonce, responder_nonce=responder_nonce, remote_ephemeral_pubkey=ephem_pubkey, auth_init_ciphertext=msg, auth_ack_ciphertext=auth_ack_ciphertext) connection = PeerConnection( reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, ) # Create and register peer in peer_pool peer = self.peer_pool.get_peer_factory().create_peer( remote=initiator_remote, connection=connection, inbound=True, ) if self.peer_pool.is_full: await peer.disconnect(DisconnectReason.too_many_peers) return elif not self.peer_pool.is_valid_connection_candidate(peer.remote): await peer.disconnect(DisconnectReason.useless_peer) return total_peers = len(self.peer_pool) inbound_peer_count = len([ peer for peer in self.peer_pool.connected_nodes.values() if peer.inbound ]) if self.chain_config.node_type != 4 and total_peers > 1 and inbound_peer_count / total_peers > DIAL_IN_OUT_RATIO: # make sure to have at least 1/4 outbound connections await peer.disconnect(DisconnectReason.too_many_peers) else: # We use self.wait() here as a workaround for # https://github.com/ethereum/py-evm/issues/670. await self.wait(self.do_handshake(peer))
from hp2p.kademlia import Address, Node # The default timeout for a round trip API request and response from a peer. ROUND_TRIP_TIMEOUT = 20.0 # Timeout used when performing the check to ensure peers are on the same side of chain splits as # us. CHAIN_SPLIT_CHECK_TIMEOUT = 15 # The defalt preferred nodes DEFAULT_PREFERRED_NODES: Dict[int, Tuple[Node, ...]] = { MAINNET_NETWORK_ID: ( Node( keys.PublicKey( decode_hex( "1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082" )), # noqa: E501 Address("52.74.57.123", 30303, 30303)), Node( keys.PublicKey( decode_hex( "78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d" )), # noqa: E501 Address("191.235.84.50", 30303, 30303)), Node( keys.PublicKey( decode_hex( "ddd81193df80128880232fc1deb45f72746019839589eeb642d3d44efbb8b2dda2c1a46a348349964a6066f8afb016eb2a8c0f3c66f32fadf4370a236a4b5286" )), # noqa: E501 Address("52.231.202.145", 30303, 30303)), Node(
def get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", 0)) s.listen(1) port = s.getsockname()[1] s.close() return port port = get_open_port() NETWORK_ID = 99 SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() + 1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class ParagonServer(BaseServer): def _make_peer_pool(self): return ParagonPeerPool( privkey=self.privkey, context=ParagonContext(), token=self.cancel_token, )