async def receive_handshake( self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter ) -> None: ip, socket, *_ = writer.get_extra_info("peername") remote_address = Address(ip, socket) if self.peer_pool.chk_dialin_blacklist(remote_address): Logger.info_every_n( "{} has been blacklisted, refusing connection".format(remote_address), 100, ) reader.feed_eof() writer.close() expected_exceptions = ( TimeoutError, PeerConnectionLost, HandshakeFailure, asyncio.IncompleteReadError, HandshakeDisconnectedFailure, ) try: await self._receive_handshake(reader, writer) except expected_exceptions as e: self.logger.debug("Could not complete handshake: %s", e) Logger.error_every_n("Could not complete handshake: {}".format(e), 100) reader.feed_eof() writer.close() except OperationCancelled: self.logger.error("OperationCancelled") reader.feed_eof() writer.close() except Exception as e: self.logger.exception("Unexpected error handling handshake") reader.feed_eof() writer.close()
async def _run(self) -> None: self.logger.info("Running server...") mapped_external_ip = None if self.upnp_service: mapped_external_ip = await self.upnp_service.add_nat_portmap() external_ip = mapped_external_ip or "0.0.0.0" await self._start_tcp_listener() self.logger.info( "this server: enode://%s@%s:%s", self.privkey.public_key.to_hex()[2:], external_ip, self.port, ) self.logger.info("network: %s", self.network_id) self.logger.info("peers: max_peers=%s", self.max_peers) addr = Address(external_ip, self.port, self.port) if self.use_discv5: topic = self._get_discv5_topic() self.logger.info( "Using experimental v5 (topic) discovery mechanism; topic: %s", topic ) discovery_proto = DiscoveryByTopicProtocol( topic, self.privkey, addr, self.bootstrap_nodes, self.network_id, self.cancel_token, ) else: discovery_proto = PreferredNodeDiscoveryProtocol( self.privkey, addr, self.bootstrap_nodes, self.preferred_nodes, self.network_id, self.cancel_token, ) self.discovery = DiscoveryService( discovery_proto, self.peer_pool, self.port, token=self.cancel_token ) self.run_daemon(self.peer_pool) self.run_daemon(self.discovery) if self.upnp_service: # UPNP service is still experimental and not essential, so we don't use run_daemon() for # it as that means if it crashes we'd be terminated as well. self.run_child_service(self.upnp_service) self.syncer = self._make_syncer() await self.cancel_token.wait()
async def _receive_handshake(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: msg = await self.wait(reader.read(ENCRYPTED_AUTH_MSG_LEN), timeout=REPLY_TIMEOUT) ip, socket, *_ = writer.get_extra_info("peername") remote_address = Address(ip, socket) self.logger.debug("Receiving handshake from %s", remote_address) got_eip8 = False try: ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) except DecryptionError: # Try to decode as EIP8 got_eip8 = True msg_size = big_endian_to_int(msg[:2]) remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2 msg += await self.wait(reader.read(remaining_bytes), timeout=REPLY_TIMEOUT) try: ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) except DecryptionError as e: self.logger.debug("Failed to decrypt handshake: %s", e) return initiator_remote = Node(initiator_pubkey, remote_address) responder = HandshakeResponder(initiator_remote, self.privkey, got_eip8, self.cancel_token) responder_nonce = numpy.random.bytes(HASH_LEN) auth_ack_msg = responder.create_auth_ack_message(responder_nonce) auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg) # Use the `writer` to send the reply to the remote writer.write(auth_ack_ciphertext) await self.wait(writer.drain()) # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer` aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets( initiator_nonce=initiator_nonce, responder_nonce=responder_nonce, remote_ephemeral_pubkey=ephem_pubkey, auth_init_ciphertext=msg, auth_ack_ciphertext=auth_ack_ciphertext, ) connection = PeerConnection( reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, ) # Create and register peer in peer_pool peer = self.peer_pool.get_peer_factory().create_peer( remote=initiator_remote, connection=connection, inbound=True) if self.peer_pool.is_full: await peer.disconnect(DisconnectReason.too_many_peers) return elif not self.peer_pool.is_valid_connection_candidate(peer.remote): await peer.disconnect(DisconnectReason.useless_peer) return total_peers = len(self.peer_pool) inbound_peer_count = len([ peer for peer in self.peer_pool.connected_nodes.values() if peer.inbound ]) if total_peers > 1 and inbound_peer_count / total_peers > DIAL_IN_OUT_RATIO: # make sure to have at least 1/4 outbound connections await peer.disconnect(DisconnectReason.too_many_peers) else: # We use self.wait() here as a workaround for # https://github.com/ethereum/py-evm/issues/670. await self.wait(self.do_handshake(peer))
async def _run(self) -> None: self.logger.info("Running server...") mapped_external_ip = None if self.upnp_service: mapped_external_ip = await self.upnp_service.add_nat_portmap() external_ip = mapped_external_ip or "0.0.0.0" await self._start_tcp_listener() self.logger.info( "this server: enode://%s@%s:%s", self.privkey.public_key.to_hex()[2:], external_ip, self.port, ) self.logger.info("network: %s", self.network_id) self.logger.info("peers: max_peers=%s", self.max_peers) addr = Address(external_ip, self.port, self.port) if self.use_discv5: topic = self._get_discv5_topic() self.logger.info( "Using experimental v5 (topic) discovery mechanism; topic: %s", topic) discovery_proto = DiscoveryByTopicProtocol( topic, self.privkey, addr, self.bootstrap_nodes, self.network_id, self.cancel_token, ) else: discovery_proto = PreferredNodeDiscoveryProtocol( self.privkey, addr, self.bootstrap_nodes, self.preferred_nodes, self.network_id, self.cancel_token, ) routing_table_path = self.crawling_routing_table_path if routing_table_path is not None: assert ( platform.python_implementation() == "PyPy" ), "pytho3.6 on linux doesn't support pickling module objects" self.discovery = CrawlingService(discovery_proto, self.port, token=self.cancel_token) # hack: replace routing table if (os.path.exists(routing_table_path) and os.path.getsize(routing_table_path) > 0): with open(routing_table_path, "rb") as f: discovery_proto.routing = pickle.load(f) discovery_proto.routing.this_node = discovery_proto.this_node assert isinstance(discovery_proto.routing, RoutingTable) def persist_routing_table(_): with open(routing_table_path, "wb") as f: pickle.dump(discovery_proto.routing, f) self.discovery.add_finished_callback(persist_routing_table) # no need to run peer pool as daemon else: self.discovery = DiscoveryService(discovery_proto, self.peer_pool, self.port, token=self.cancel_token) self.run_daemon(self.peer_pool) self.run_daemon(self.discovery) if self.upnp_service: # UPNP service is still experimental and not essential, so we don't use run_daemon() for # it as that means if it crashes we'd be terminated as well. self.run_child_service(self.upnp_service) self.syncer = self._make_syncer() await self.cancel_token.wait()