def load_local_nodes(path, local_private_key = None): existing_peers = load_peers_from_file(path) peer_pool = [] for i, peer in enumerate(existing_peers): if local_private_key is None or peer[0] != local_private_key.public_key.to_hex(): peer_pool.append(Node(keys.PublicKey(decode_hex(peer[0])),Address(peer[1], peer[2], peer[3]))) return peer_pool
async def _run(self) -> None: self.logger.info("Running server...") if self.chain_config.do_upnp: self.logger.debug("Doing upnp...") mapped_external_ip = await self.upnp_service.add_nat_portmap() else: mapped_external_ip = None self.logger.debug("not doing upnp") if mapped_external_ip is None: external_ip = '0.0.0.0' else: external_ip = mapped_external_ip await self._start_tcp_listener() self.logger.info( "enode://%s@%s:%s", self.privkey.public_key.to_hex()[2:], external_ip, self.port, ) self.logger.info('network: %s', self.network_id) self.logger.info('peers: max_peers=%s', self.max_peers) addr = Address(external_ip, self.port, self.port) if self.use_discv5: topic = self._get_discv5_topic() self.logger.info( "Using experimental v5 (topic) discovery mechanism; topic: %s", topic) discovery_proto: DiscoveryProtocol = DiscoveryByTopicProtocol( topic, self.privkey, addr, self.bootstrap_nodes, self.cancel_token) else: discovery_proto = PreferredNodeDiscoveryProtocol( self.privkey, addr, self.bootstrap_nodes, self.preferred_nodes, self.cancel_token) self.discovery = DiscoveryService( discovery_proto, self.peer_pool, self.port, token=self.cancel_token, ) if self.chain_config.report_memory_usage: memory_logger = logging.getLogger("helios.memoryLogger") self.run_task( coro_periodically_report_memory_stats( self.cancel_token, self.chain_config.memory_usage_report_interval, memory_logger)) self.run_daemon(self.peer_pool) self.run_daemon(self.discovery) self.run_daemon(self.consensus) if not self.chain_config.disable_smart_contract_chain_manager: self.run_daemon(self.smart_contract_chain_manager) if self.chain_config.do_upnp: # UPNP service is still experimental and not essential, so we don't use run_daemon() for # it as that means if it crashes we'd be terminated as well. self.run_child_service(self.upnp_service) self.syncer = self._make_syncer() await self.syncer.run()
async def _receive_handshake(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: msg = await self.wait(reader.read(ENCRYPTED_AUTH_MSG_LEN), timeout=REPLY_TIMEOUT) ip, socket, *_ = writer.get_extra_info("peername") remote_address = Address(ip, socket) self.logger.debug("Receiving handshake from %s", remote_address) got_eip8 = False try: ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) except DecryptionError: # Try to decode as EIP8 got_eip8 = True msg_size = big_endian_to_int(msg[:2]) remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2 msg += await self.wait(reader.read(remaining_bytes), timeout=REPLY_TIMEOUT) try: ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) except DecryptionError as e: self.logger.debug("Failed to decrypt handshake: %s", e) return initiator_remote = Node(initiator_pubkey, remote_address) responder = HandshakeResponder(initiator_remote, self.privkey, got_eip8, self.cancel_token) responder_nonce = secrets.token_bytes(HASH_LEN) auth_ack_msg = responder.create_auth_ack_message(responder_nonce) auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg) # Use the `writer` to send the reply to the remote writer.write(auth_ack_ciphertext) await self.wait(writer.drain()) # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer` aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets( initiator_nonce=initiator_nonce, responder_nonce=responder_nonce, remote_ephemeral_pubkey=ephem_pubkey, auth_init_ciphertext=msg, auth_ack_ciphertext=auth_ack_ciphertext) connection = PeerConnection( reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, ) # Create and register peer in peer_pool peer = self.peer_pool.get_peer_factory().create_peer( remote=initiator_remote, connection=connection, inbound=True, ) if self.peer_pool.is_full: await peer.disconnect(DisconnectReason.too_many_peers) return elif not self.peer_pool.is_valid_connection_candidate(peer.remote): await peer.disconnect(DisconnectReason.useless_peer) return total_peers = len(self.peer_pool) inbound_peer_count = len([ peer for peer in self.peer_pool.connected_nodes.values() if peer.inbound ]) if self.chain_config.node_type != 4 and total_peers > 1 and inbound_peer_count / total_peers > DIAL_IN_OUT_RATIO: # make sure to have at least 1/4 outbound connections await peer.disconnect(DisconnectReason.too_many_peers) else: # We use self.wait() here as a workaround for # https://github.com/ethereum/py-evm/issues/670. await self.wait(self.do_handshake(peer))
# The default timeout for a round trip API request and response from a peer. ROUND_TRIP_TIMEOUT = 20.0 # Timeout used when performing the check to ensure peers are on the same side of chain splits as # us. CHAIN_SPLIT_CHECK_TIMEOUT = 15 # The defalt preferred nodes DEFAULT_PREFERRED_NODES: Dict[int, Tuple[Node, ...]] = { MAINNET_NETWORK_ID: ( Node( keys.PublicKey( decode_hex( "1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082" )), # noqa: E501 Address("52.74.57.123", 30303, 30303)), Node( keys.PublicKey( decode_hex( "78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d" )), # noqa: E501 Address("191.235.84.50", 30303, 30303)), Node( keys.PublicKey( decode_hex( "ddd81193df80128880232fc1deb45f72746019839589eeb642d3d44efbb8b2dda2c1a46a348349964a6066f8afb016eb2a8c0f3c66f32fadf4370a236a4b5286" )), # noqa: E501 Address("52.231.202.145", 30303, 30303)), Node( keys.PublicKey( decode_hex(
from tests.p2p.auth_constants import eip8_values from tests.helios.core.integration_test_helpers import FakeAsyncHeaderDB def get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", 0)) s.listen(1) port = s.getsockname()[1] s.close() return port port = get_open_port() NETWORK_ID = 99 SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port) RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key']) RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS) INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key']) INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() + 1) INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS) class ParagonServer(BaseServer): def _make_peer_pool(self): return ParagonPeerPool( privkey=self.privkey, context=ParagonContext(),