def __init__( self, privkey: datatypes.PrivateKey, port: int, chain: AsyncChain, chaindb: AsyncChainDB, headerdb: 'BaseAsyncHeaderDB', base_db: BaseDB, network_id: int, max_peers: int = DEFAULT_MAX_PEERS, peer_class: Type[BasePeer] = ETHPeer, peer_pool_class: Type[PeerPool] = PreferredNodePeerPool, bootstrap_nodes: Tuple[Node, ...] = None, token: CancelToken = None, ) -> None: super().__init__(token) self.headerdb = headerdb self.chaindb = chaindb self.chain = chain self.base_db = base_db self.privkey = privkey self.port = port self.network_id = network_id self.peer_class = peer_class self.peer_pool_class = peer_pool_class self.max_peers = max_peers self.bootstrap_nodes = bootstrap_nodes self.upnp_service = UPnPService(port, token=self.cancel_token) if not bootstrap_nodes: self.logger.warn("Running with no bootstrap nodes")
def __init__( self, privkey: datatypes.PrivateKey, port: int, chain: AsyncChain, chaindb: AsyncChainDB, headerdb: AsyncHeaderDB, base_db: AsyncBaseDB, network_id: int, max_peers: int = DEFAULT_MAX_PEERS, peer_class: Type[BasePeer] = ETHPeer, bootstrap_nodes: Tuple[Node, ...] = None, preferred_nodes: Sequence[Node] = None, token: CancelToken = None, ) -> None: super().__init__(token) self.headerdb = headerdb self.chaindb = chaindb self.chain = chain self.base_db = base_db self.privkey = privkey self.port = port self.network_id = network_id self.peer_class = peer_class self.max_peers = max_peers self.bootstrap_nodes = bootstrap_nodes self.preferred_nodes = preferred_nodes if self.preferred_nodes is None and network_id in DEFAULT_PREFERRED_NODES: self.preferred_nodes = DEFAULT_PREFERRED_NODES[self.network_id] self.upnp_service = UPnPService(port, token=self.cancel_token) self.peer_pool = self._make_peer_pool() if not bootstrap_nodes: self.logger.warn("Running with no bootstrap nodes")
def __init__(self, privkey: datatypes.PrivateKey, port: int, chain: BaseAsyncChain, chaindb: BaseAsyncChainDB, headerdb: BaseAsyncHeaderDB, base_db: BaseAsyncDB, network_id: int, peer_info: BasePeerInfo = None, max_peers: int = DEFAULT_MAX_PEERS, bootstrap_nodes: Tuple[Node, ...] = None, preferred_nodes: Sequence[Node] = None, event_bus: TrinityEventBusEndpoint = None, token: CancelToken = None, ) -> None: super().__init__(token) # cross process event bus self.event_bus = event_bus # chain information self.chain = chain self.chaindb = chaindb self.headerdb = headerdb self.base_db = base_db # node information self.privkey = privkey self.port = port self.network_id = network_id self.peer_info = peer_info self.max_peers = max_peers self.bootstrap_nodes = bootstrap_nodes self.preferred_nodes = preferred_nodes if self.preferred_nodes is None and network_id in DEFAULT_PREFERRED_NODES: self.preferred_nodes = DEFAULT_PREFERRED_NODES[self.network_id] # child services self.upnp_service = UPnPService(port, token=self.cancel_token) self.peer_pool = self._make_peer_pool() self._peer_pool_request_handler = self._make_peer_pool_request_handler(self.peer_pool) self.request_server = self._make_request_server() if not bootstrap_nodes: self.logger.warning("Running with no bootstrap nodes")
class Server(BaseService): """Server listening for incoming connections""" logger = logging.getLogger("p2p.server.Server") _tcp_listener = None _udp_listener = None peer_pool: PeerPool = None def __init__( self, privkey: datatypes.PrivateKey, port: int, chain: AsyncChain, chaindb: AsyncChainDB, headerdb: 'BaseAsyncHeaderDB', base_db: BaseDB, network_id: int, max_peers: int = DEFAULT_MAX_PEERS, peer_class: Type[BasePeer] = ETHPeer, peer_pool_class: Type[PeerPool] = PreferredNodePeerPool, bootstrap_nodes: Tuple[Node, ...] = None, token: CancelToken = None, ) -> None: super().__init__(token) self.headerdb = headerdb self.chaindb = chaindb self.chain = chain self.base_db = base_db self.privkey = privkey self.port = port self.network_id = network_id self.peer_class = peer_class self.peer_pool_class = peer_pool_class self.max_peers = max_peers self.bootstrap_nodes = bootstrap_nodes self.upnp_service = UPnPService(port, token=self.cancel_token) if not bootstrap_nodes: self.logger.warn("Running with no bootstrap nodes") async def _start_tcp_listener(self) -> None: # TODO: Support IPv6 addresses as well. self._tcp_listener = await asyncio.start_server( self.receive_handshake, host='0.0.0.0', port=self.port, ) async def _close_tcp_listener(self) -> None: self._tcp_listener.close() await self._tcp_listener.wait_closed() async def _start_udp_listener(self, discovery: DiscoveryProtocol) -> None: loop = asyncio.get_event_loop() # TODO: Support IPv6 addresses as well. self._udp_transport, _ = await loop.create_datagram_endpoint( lambda: discovery, local_addr=('0.0.0.0', self.port), family=socket.AF_INET) async def _close_udp_listener(self) -> None: cast(asyncio.DatagramTransport, self._udp_transport).abort() async def _close(self) -> None: await asyncio.gather(self._close_tcp_listener(), self._close_udp_listener()) def _make_syncer(self, peer_pool: PeerPool) -> BaseService: # This method exists only so that ShardSyncer can provide a different implementation. return FullNodeSyncer(self.chain, self.chaindb, self.base_db, peer_pool, self.cancel_token) def _make_peer_pool(self, discovery: DiscoveryProtocol) -> PeerPool: # This method exists only so that ShardSyncer can provide a different implementation. return self.peer_pool_class( self.peer_class, self.headerdb, self.network_id, self.privkey, discovery, max_peers=self.max_peers, ) async def _run(self) -> None: self.logger.info("Running server...") mapped_external_ip = await self.upnp_service.add_nat_portmap() if mapped_external_ip is None: external_ip = '0.0.0.0' else: external_ip = mapped_external_ip await self._start_tcp_listener() self.logger.info( "enode://%s@%s:%s", self.privkey.public_key.to_hex()[2:], external_ip, self.port, ) self.logger.info('network: %s', self.network_id) self.logger.info('peers: max_peers=%s', self.max_peers) addr = Address(external_ip, self.port, self.port) self.discovery = DiscoveryProtocol( self.privkey, addr, bootstrap_nodes=self.bootstrap_nodes) await self._start_udp_listener(self.discovery) self.peer_pool = self._make_peer_pool(self.discovery) asyncio.ensure_future(self.discovery.bootstrap()) asyncio.ensure_future(self.peer_pool.run()) asyncio.ensure_future(self.upnp_service.run()) self.syncer = self._make_syncer(self.peer_pool) await self.syncer.run() async def _cleanup(self) -> None: self.logger.info("Closing server...") await asyncio.gather( self.peer_pool.cancel(), self.discovery.stop(), ) await self._close() async def receive_handshake(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: expected_exceptions = ( TimeoutError, PeerConnectionLost, HandshakeFailure, asyncio.IncompleteReadError, ) try: await self._receive_handshake(reader, writer) except expected_exceptions as e: self.logger.debug("Could not complete handshake: %s", e) except OperationCancelled: pass except Exception as e: self.logger.exception("Unexpected error handling handshake") async def _receive_handshake(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: msg = await self.wait(reader.read(ENCRYPTED_AUTH_MSG_LEN), timeout=REPLY_TIMEOUT) ip, socket, *_ = writer.get_extra_info("peername") remote_address = Address(ip, socket) self.logger.debug("Receiving handshake from %s", remote_address) got_eip8 = False try: ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) except DecryptionError: # Try to decode as EIP8 got_eip8 = True msg_size = big_endian_to_int(msg[:2]) remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2 msg += await self.wait(reader.read(remaining_bytes), timeout=REPLY_TIMEOUT) try: ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) except DecryptionError as e: self.logger.debug("Failed to decrypt handshake: %s", e) return initiator_remote = Node(initiator_pubkey, remote_address) responder = HandshakeResponder(initiator_remote, self.privkey, got_eip8, self.cancel_token) responder_nonce = secrets.token_bytes(HASH_LEN) auth_ack_msg = responder.create_auth_ack_message(responder_nonce) auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg) # Use the `writer` to send the reply to the remote writer.write(auth_ack_ciphertext) await self.wait(writer.drain()) # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer` aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets( initiator_nonce=initiator_nonce, responder_nonce=responder_nonce, remote_ephemeral_pubkey=ephem_pubkey, auth_init_ciphertext=msg, auth_ack_ciphertext=auth_ack_ciphertext) # Create and register peer in peer_pool peer = self.peer_class( remote=initiator_remote, privkey=self.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=self.headerdb, network_id=self.network_id, inbound=True, ) if self.peer_pool.is_full: peer.disconnect(DisconnectReason.too_many_peers) else: # We use self.wait() here as a workaround for # https://github.com/ethereum/py-evm/issues/670. await self.wait(self.do_handshake(peer)) async def do_handshake(self, peer: BasePeer) -> None: await peer.do_p2p_handshake(), await peer.do_sub_proto_handshake() self._start_peer(peer) def _start_peer(self, peer: BasePeer) -> None: # This method exists only so that we can monkey-patch it in tests. self.peer_pool.start_peer(peer)