Exemplo n.º 1
0
def _test() -> None:
    import argparse
    import signal
    from p2p import ecies
    from p2p import kademlia
    from p2p.constants import ROPSTEN_BOOTNODES
    from p2p.discovery import DiscoveryProtocol
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB, LocalGethPeerPool
    logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
    logging.getLogger('p2p.chain.ChainSyncer').setLevel(logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-local-geth', action="store_true")
    args = parser.parse_args()

    loop = asyncio.get_event_loop()
    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    privkey = ecies.generate_privkey()
    if args.local_geth:
        peer_pool = LocalGethPeerPool(ETHPeer, chaindb, RopstenChain.network_id, privkey)
        discovery = None
    else:
        listen_host = '0.0.0.0'
        listen_port = 30303
        addr = kademlia.Address(listen_host, listen_port, listen_port)
        discovery = DiscoveryProtocol(privkey, addr, ROPSTEN_BOOTNODES)
        loop.run_until_complete(discovery.create_endpoint())
        print("Bootstrapping discovery service...")
        loop.run_until_complete(discovery.bootstrap())
        peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id, privkey, discovery)

    asyncio.ensure_future(peer_pool.run())
    downloader = ChainSyncer(chaindb, peer_pool)
    # On ROPSTEN the discovery table is usually full of bad peers so we can't require too many
    # peers in order to sync.
    downloader.min_peers_to_sync = 1

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        await downloader.run()
        await peer_pool.stop()
        await downloader.stop()
        if discovery is not None:
            discovery.stop()
            # Give any pending discovery tasks some time to finish.
            await asyncio.sleep(2)

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)
    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 2
0
class Server(BaseService):
    """Server listening for incoming connections"""
    logger = logging.getLogger("p2p.server.Server")
    _tcp_listener = None
    _udp_listener = None

    peer_pool: PeerPool = None

    def __init__(
        self,
        privkey: datatypes.PrivateKey,
        port: int,
        chain: AsyncChain,
        chaindb: AsyncChainDB,
        headerdb: 'BaseAsyncHeaderDB',
        base_db: BaseDB,
        network_id: int,
        max_peers: int = DEFAULT_MAX_PEERS,
        peer_class: Type[BasePeer] = ETHPeer,
        peer_pool_class: Type[PeerPool] = PreferredNodePeerPool,
        bootstrap_nodes: Tuple[Node, ...] = None,
        token: CancelToken = None,
    ) -> None:
        super().__init__(token)
        self.headerdb = headerdb
        self.chaindb = chaindb
        self.chain = chain
        self.base_db = base_db
        self.privkey = privkey
        self.port = port
        self.network_id = network_id
        self.peer_class = peer_class
        self.peer_pool_class = peer_pool_class
        self.max_peers = max_peers
        self.bootstrap_nodes = bootstrap_nodes
        self.upnp_service = UPnPService(port, token=self.cancel_token)

        if not bootstrap_nodes:
            self.logger.warn("Running with no bootstrap nodes")

    async def _start_tcp_listener(self) -> None:
        # TODO: Support IPv6 addresses as well.
        self._tcp_listener = await asyncio.start_server(
            self.receive_handshake,
            host='0.0.0.0',
            port=self.port,
        )

    async def _close_tcp_listener(self) -> None:
        self._tcp_listener.close()
        await self._tcp_listener.wait_closed()

    async def _start_udp_listener(self, discovery: DiscoveryProtocol) -> None:
        loop = asyncio.get_event_loop()
        # TODO: Support IPv6 addresses as well.
        self._udp_transport, _ = await loop.create_datagram_endpoint(
            lambda: discovery,
            local_addr=('0.0.0.0', self.port),
            family=socket.AF_INET)

    async def _close_udp_listener(self) -> None:
        cast(asyncio.DatagramTransport, self._udp_transport).abort()

    async def _close(self) -> None:
        await asyncio.gather(self._close_tcp_listener(),
                             self._close_udp_listener())

    def _make_syncer(self, peer_pool: PeerPool) -> BaseService:
        # This method exists only so that ShardSyncer can provide a different implementation.
        return FullNodeSyncer(self.chain, self.chaindb, self.base_db,
                              peer_pool, self.cancel_token)

    def _make_peer_pool(self, discovery: DiscoveryProtocol) -> PeerPool:
        # This method exists only so that ShardSyncer can provide a different implementation.
        return self.peer_pool_class(
            self.peer_class,
            self.headerdb,
            self.network_id,
            self.privkey,
            discovery,
            max_peers=self.max_peers,
        )

    async def _run(self) -> None:
        self.logger.info("Running server...")
        mapped_external_ip = await self.upnp_service.add_nat_portmap()
        if mapped_external_ip is None:
            external_ip = '0.0.0.0'
        else:
            external_ip = mapped_external_ip
        await self._start_tcp_listener()
        self.logger.info(
            "enode://%s@%s:%s",
            self.privkey.public_key.to_hex()[2:],
            external_ip,
            self.port,
        )
        self.logger.info('network: %s', self.network_id)
        self.logger.info('peers: max_peers=%s', self.max_peers)
        addr = Address(external_ip, self.port, self.port)
        self.discovery = DiscoveryProtocol(
            self.privkey, addr, bootstrap_nodes=self.bootstrap_nodes)
        await self._start_udp_listener(self.discovery)
        self.peer_pool = self._make_peer_pool(self.discovery)
        asyncio.ensure_future(self.discovery.bootstrap())
        asyncio.ensure_future(self.peer_pool.run())
        asyncio.ensure_future(self.upnp_service.run())
        self.syncer = self._make_syncer(self.peer_pool)
        await self.syncer.run()

    async def _cleanup(self) -> None:
        self.logger.info("Closing server...")
        await asyncio.gather(
            self.peer_pool.cancel(),
            self.discovery.stop(),
        )
        await self._close()

    async def receive_handshake(self, reader: asyncio.StreamReader,
                                writer: asyncio.StreamWriter) -> None:
        expected_exceptions = (
            TimeoutError,
            PeerConnectionLost,
            HandshakeFailure,
            asyncio.IncompleteReadError,
        )
        try:
            await self._receive_handshake(reader, writer)
        except expected_exceptions as e:
            self.logger.debug("Could not complete handshake: %s", e)
        except OperationCancelled:
            pass
        except Exception as e:
            self.logger.exception("Unexpected error handling handshake")

    async def _receive_handshake(self, reader: asyncio.StreamReader,
                                 writer: asyncio.StreamWriter) -> None:
        msg = await self.wait(reader.read(ENCRYPTED_AUTH_MSG_LEN),
                              timeout=REPLY_TIMEOUT)

        ip, socket, *_ = writer.get_extra_info("peername")
        remote_address = Address(ip, socket)
        self.logger.debug("Receiving handshake from %s", remote_address)
        got_eip8 = False
        try:
            ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                msg, self.privkey)
        except DecryptionError:
            # Try to decode as EIP8
            got_eip8 = True
            msg_size = big_endian_to_int(msg[:2])
            remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2
            msg += await self.wait(reader.read(remaining_bytes),
                                   timeout=REPLY_TIMEOUT)
            try:
                ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                    msg, self.privkey)
            except DecryptionError as e:
                self.logger.debug("Failed to decrypt handshake: %s", e)
                return

        initiator_remote = Node(initiator_pubkey, remote_address)
        responder = HandshakeResponder(initiator_remote, self.privkey,
                                       got_eip8, self.cancel_token)

        responder_nonce = secrets.token_bytes(HASH_LEN)
        auth_ack_msg = responder.create_auth_ack_message(responder_nonce)
        auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)

        # Use the `writer` to send the reply to the remote
        writer.write(auth_ack_ciphertext)
        await self.wait(writer.drain())

        # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer`
        aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
            initiator_nonce=initiator_nonce,
            responder_nonce=responder_nonce,
            remote_ephemeral_pubkey=ephem_pubkey,
            auth_init_ciphertext=msg,
            auth_ack_ciphertext=auth_ack_ciphertext)

        # Create and register peer in peer_pool
        peer = self.peer_class(
            remote=initiator_remote,
            privkey=self.privkey,
            reader=reader,
            writer=writer,
            aes_secret=aes_secret,
            mac_secret=mac_secret,
            egress_mac=egress_mac,
            ingress_mac=ingress_mac,
            headerdb=self.headerdb,
            network_id=self.network_id,
            inbound=True,
        )

        if self.peer_pool.is_full:
            peer.disconnect(DisconnectReason.too_many_peers)
        else:
            # We use self.wait() here as a workaround for
            # https://github.com/ethereum/py-evm/issues/670.
            await self.wait(self.do_handshake(peer))

    async def do_handshake(self, peer: BasePeer) -> None:
        await peer.do_p2p_handshake(),
        await peer.do_sub_proto_handshake()
        self._start_peer(peer)

    def _start_peer(self, peer: BasePeer) -> None:
        # This method exists only so that we can monkey-patch it in tests.
        self.peer_pool.start_peer(peer)
Exemplo n.º 3
0
class Server(BaseService):
    """Server listening for incoming connections"""
    logger = logging.getLogger("p2p.server.Server")
    _tcp_listener = None
    _udp_listener = None
    _nat_portmap_lifetime = 30 * 60

    def __init__(self,
                 privkey: datatypes.PrivateKey,
                 port: int,
                 chain: AsyncChain,
                 chaindb: AsyncChainDB,
                 headerdb: 'BaseAsyncHeaderDB',
                 base_db: BaseDB,
                 network_id: int,
                 max_peers: int = DEFAULT_MAX_PEERS,
                 peer_class: Type[BasePeer] = ETHPeer,
                 peer_pool_class: Type[PeerPool] = PreferredNodePeerPool,
                 bootstrap_nodes: Tuple[Node, ...] = None,
                 token: CancelToken = None,
                 ) -> None:
        super().__init__(token)
        self.headerdb = headerdb
        self.chaindb = chaindb
        self.chain = chain
        self.base_db = base_db
        self.privkey = privkey
        self.port = port
        self.network_id = network_id
        self.peer_class = peer_class
        self.peer_pool_class = peer_pool_class
        self.max_peers = max_peers
        self.bootstrap_nodes = bootstrap_nodes

        if not bootstrap_nodes:
            self.logger.warn("Running with no bootstrap nodes")

    async def refresh_nat_portmap(self) -> None:
        """Run an infinite loop refreshing our NAT port mapping.

        On every iteration we configure the port mapping with a lifetime of 30 minutes and then
        sleep for that long as well.
        """
        while self.is_running:
            try:
                # We start with a sleep because our _run() method will setup the initial portmap.
                await self.wait_first(asyncio.sleep(self._nat_portmap_lifetime))
                await self.add_nat_portmap()
            except OperationCancelled:
                break

    async def add_nat_portmap(self) -> None:
        self.logger.info("Setting up NAT portmap...")
        # This is experimental and it's OK if it fails, hence the bare except.
        try:
            upnp_dev = await self._discover_upnp_device()
            if upnp_dev is None:
                return
            await self._add_nat_portmap(upnp_dev)
        except upnpclient.soap.SOAPError as e:
            if e.args == (718, 'ConflictInMappingEntry'):
                # An entry already exists with the parameters we specified. Maybe the router
                # didn't clean it up after it expired or it has been configured by other piece
                # of software, either way we should not override it.
                # https://tools.ietf.org/id/draft-ietf-pcp-upnp-igd-interworking-07.html#errors
                self.logger.info("NAT port mapping already configured, not overriding it")
            else:
                self.logger.exception("Failed to setup NAT portmap")
        except Exception:
            self.logger.exception("Failed to setup NAT portmap")

    def _find_internal_ip_on_device_network(self, upnp_dev: upnpclient.upnp.Device) -> str:
        parsed_url = urlparse(upnp_dev.location)
        # Get an ipaddress.IPv4Network instance for the upnp device's network.
        upnp_dev_net = ipaddress.ip_network(parsed_url.hostname + '/24', strict=False)
        for iface in netifaces.interfaces():
            for family, addresses in netifaces.ifaddresses(iface).items():
                # TODO: Support IPv6 addresses as well.
                if family != netifaces.AF_INET:
                    continue
                for item in addresses:
                    if ipaddress.ip_address(item['addr']) in upnp_dev_net:
                        return item['addr']
        return None

    async def _add_nat_portmap(self, upnp_dev: upnpclient.upnp.Device) -> None:
        # Detect our internal IP address (or abort if we can't determine
        # the internal IP address
        internal_ip = self._find_internal_ip_on_device_network(upnp_dev)
        if internal_ip is None:
            self.logger.warn(
                "Unable to detect internal IP address in order to setup NAT portmap"
            )
            return

        external_ip = upnp_dev.WANIPConn1.GetExternalIPAddress()['NewExternalIPAddress']
        for protocol, description in [('TCP', 'ethereum p2p'), ('UDP', 'ethereum discovery')]:
            upnp_dev.WANIPConn1.AddPortMapping(
                NewRemoteHost=external_ip,
                NewExternalPort=self.port,
                NewProtocol=protocol,
                NewInternalPort=self.port,
                NewInternalClient=internal_ip,
                NewEnabled='1',
                NewPortMappingDescription=description,
                NewLeaseDuration=self._nat_portmap_lifetime,
            )
        self.logger.info("NAT port forwarding successfully setup")

    async def _discover_upnp_device(self) -> upnpclient.upnp.Device:
        loop = asyncio.get_event_loop()
        # UPnP discovery can take a long time, so use a loooong timeout here.
        discover_timeout = 10 * REPLY_TIMEOUT
        # Use loop.run_in_executor() because upnpclient.discover() is blocking and may take a
        # while to complete.
        devices = await self.wait_first(
            loop.run_in_executor(None, upnpclient.discover),
            timeout=discover_timeout)

        # If there are no UPNP devices we can exit early
        if not devices:
            self.logger.info("No UPNP-enabled devices found")
            return None

        # Now we loop over all of the devices until we find one that we can use.
        for device in devices:
            try:
                device.WANIPConn1
            except AttributeError:
                continue
            return device
        return None

    async def _start_tcp_listener(self) -> None:
        # TODO: Support IPv6 addresses as well.
        self._tcp_listener = await asyncio.start_server(
            self.receive_handshake,
            host='0.0.0.0',
            port=self.port,
        )

    async def _close_tcp_listener(self) -> None:
        self._tcp_listener.close()
        await self._tcp_listener.wait_closed()

    async def _start_udp_listener(self, discovery: DiscoveryProtocol) -> None:
        loop = asyncio.get_event_loop()
        # TODO: Support IPv6 addresses as well.
        self._udp_transport, _ = await loop.create_datagram_endpoint(
            lambda: discovery,
            local_addr=('0.0.0.0', self.port),
            family=socket.AF_INET)

    async def _close_udp_listener(self) -> None:
        cast(asyncio.DatagramTransport, self._udp_transport).abort()

    async def _close(self) -> None:
        await asyncio.gather(
            self._close_tcp_listener(), self._close_udp_listener())

    def _make_syncer(self, peer_pool: PeerPool) -> BaseService:
        # This method exists only so that ShardSyncer can provide a different implementation.
        return FullNodeSyncer(
            self.chain, self.chaindb, self.base_db, peer_pool, self.cancel_token)

    def _make_peer_pool(self, discovery: DiscoveryProtocol) -> PeerPool:
        # This method exists only so that ShardSyncer can provide a different implementation.
        return self.peer_pool_class(
            self.peer_class,
            self.headerdb,
            self.network_id,
            self.privkey,
            discovery,
            max_peers=self.max_peers,
        )

    async def _run(self) -> None:
        self.logger.info("Running server...")
        upnp_dev = await self._discover_upnp_device()
        external_ip = '0.0.0.0'
        if upnp_dev is not None:
            external_ip = upnp_dev.WANIPConn1.GetExternalIPAddress()['NewExternalIPAddress']
            await self._add_nat_portmap(upnp_dev)
        await self._start_tcp_listener()
        self.logger.info(
            "enode://%s@%s:%s",
            self.privkey.public_key.to_hex()[2:],
            external_ip,
            self.port,
        )
        self.logger.info('network: %s', self.network_id)
        addr = Address(external_ip, self.port, self.port)
        self.discovery = DiscoveryProtocol(self.privkey, addr, bootstrap_nodes=self.bootstrap_nodes)
        await self._start_udp_listener(self.discovery)
        self.peer_pool = self._make_peer_pool(self.discovery)
        asyncio.ensure_future(self.refresh_nat_portmap())
        asyncio.ensure_future(self.discovery.bootstrap())
        asyncio.ensure_future(self.peer_pool.run())
        self.syncer = self._make_syncer(self.peer_pool)
        await self.syncer.run()

    async def _cleanup(self) -> None:
        self.logger.info("Closing server...")
        await asyncio.gather(self.peer_pool.cancel(), self.discovery.stop())
        await self._close()

    async def receive_handshake(
            self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
        expected_exceptions = (
            TimeoutError,
            PeerConnectionLost,
            HandshakeFailure,
            asyncio.IncompleteReadError,
        )
        try:
            await self._receive_handshake(reader, writer)
        except expected_exceptions as e:
            self.logger.debug("Could not complete handshake: %s", e)
        except unclean_close_exceptions:
            self.logger.exception("Unclean exit while receiving handshake")
        except OperationCancelled:
            pass
        except Exception as e:
            self.logger.exception("Unexpected error handling handshake")

    async def _receive_handshake(
            self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
        msg = await self.wait_first(
            reader.read(ENCRYPTED_AUTH_MSG_LEN),
            timeout=REPLY_TIMEOUT)

        ip, socket, *_ = writer.get_extra_info("peername")
        remote_address = Address(ip, socket)
        self.logger.debug("Receiving handshake from %s", remote_address)
        try:
            ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                msg, self.privkey)
        except DecryptionError:
            # Try to decode as EIP8
            msg_size = big_endian_to_int(msg[:2])
            remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2
            msg += await self.wait_first(
                reader.read(remaining_bytes),
                timeout=REPLY_TIMEOUT)
            try:
                ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                    msg, self.privkey)
            except DecryptionError as e:
                self.logger.debug("Failed to decrypt handshake: %s", e)
                return

        # Create `HandshakeResponder(remote: kademlia.Node, privkey: datatypes.PrivateKey)` instance
        initiator_remote = Node(initiator_pubkey, remote_address)
        responder = HandshakeResponder(initiator_remote, self.privkey, self.cancel_token)

        # Call `HandshakeResponder.create_auth_ack_message(nonce: bytes)` to create the reply
        responder_nonce = secrets.token_bytes(HASH_LEN)
        auth_ack_msg = responder.create_auth_ack_message(nonce=responder_nonce)
        auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)

        # Use the `writer` to send the reply to the remote
        writer.write(auth_ack_ciphertext)
        await writer.drain()

        # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer`
        aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
            initiator_nonce=initiator_nonce,
            responder_nonce=responder_nonce,
            remote_ephemeral_pubkey=ephem_pubkey,
            auth_init_ciphertext=msg,
            auth_ack_ciphertext=auth_ack_ciphertext
        )

        # Create and register peer in peer_pool
        peer = self.peer_class(
            remote=initiator_remote,
            privkey=self.privkey,
            reader=reader,
            writer=writer,
            aes_secret=aes_secret,
            mac_secret=mac_secret,
            egress_mac=egress_mac,
            ingress_mac=ingress_mac,
            headerdb=self.headerdb,
            network_id=self.network_id,
            inbound=True,
        )

        await self.do_handshake(peer)

    async def do_handshake(self, peer: BasePeer) -> None:
        await peer.do_p2p_handshake(),
        await peer.do_sub_proto_handshake()
        self._start_peer(peer)

    def _start_peer(self, peer: BasePeer) -> None:
        # This method exists only so that we can monkey-patch it in tests.
        self.peer_pool.start_peer(peer)
Exemplo n.º 4
0
class Server(BaseService):
    """Server listening for incoming connections"""
    logger = logging.getLogger("p2p.server.Server")
    _server = None

    def __init__(
        self,
        privkey: datatypes.PrivateKey,
        address: Address,
        chain: AsyncChain,
        chaindb: AsyncChainDB,
        headerdb: 'BaseAsyncHeaderDB',
        db: BaseDB,
        network_id: int,
        min_peers: int = DEFAULT_MIN_PEERS,
        peer_class: Type[BasePeer] = ETHPeer,
        peer_pool_class: Type[PeerPool] = PeerPool,
        bootstrap_nodes: List[str] = [],
    ) -> None:
        super().__init__(CancelToken('Server'))
        self.headerdb = headerdb
        self.privkey = privkey
        self.address = address
        self.network_id = network_id
        self.peer_class = peer_class
        if not bootstrap_nodes:
            if self.network_id == MAINNET_NETWORK_ID:
                bootstrap_nodes = MAINNET_BOOTNODES
            elif self.network_id == ROPSTEN_NETWORK_ID:
                bootstrap_nodes = ROPSTEN_BOOTNODES
            else:
                self.logger.warn(
                    "No bootstrap nodes for network id: {}".format(network_id))
                bootstrap_nodes = []
        self.discovery = DiscoveryProtocol(self.privkey,
                                           self.address,
                                           bootstrap_nodes=bootstrap_nodes)
        self.peer_pool = peer_pool_class(
            peer_class,
            self.headerdb,
            self.network_id,
            self.privkey,
            self.discovery,
            min_peers=min_peers,
        )
        self.syncer = FullNodeSyncer(chain, chaindb, db, self.peer_pool,
                                     self.cancel_token)

    async def refresh_nat_portmap(self) -> None:
        """Run an infinite loop refreshing our NAT port mapping.

        On every iteration we configure the port mapping with a lifetime of 30 minutes and then
        sleep for that long as well.
        """
        lifetime = 30 * 60
        while not self.is_finished:
            self.logger.info("Setting up NAT portmap...")
            # This is experimental and it's OK if it fails, hence the bare except.
            try:
                await self._add_nat_portmap(lifetime)
            except Exception as e:
                if (isinstance(e, upnpclient.soap.SOAPError)
                        and e.args == (718, 'ConflictInMappingEntry')):
                    # An entry already exists with the parameters we specified. Maybe the router
                    # didn't clean it up after it expired or it has been configured by other piece
                    # of software, either way we should not override it.
                    # https://tools.ietf.org/id/draft-ietf-pcp-upnp-igd-interworking-07.html#errors
                    self.logger.info(
                        "NAT port mapping already configured, not overriding it"
                    )
                else:
                    self.logger.exception("Failed to setup NAT portmap")

            try:
                await wait_with_token(asyncio.sleep(lifetime),
                                      token=self.cancel_token)
            except OperationCancelled:
                break

    async def _add_nat_portmap(self, lifetime: int) -> None:
        loop = asyncio.get_event_loop()
        # Use loop.run_in_executor() because upnpclient.discover() is blocking and may take a
        # while to complete. And we must use a ThreadPoolExecutor() because the response from
        # upnpclient.discover() can't be pickled.
        devices = await wait_with_token(loop.run_in_executor(
            ThreadPoolExecutor(max_workers=1), upnpclient.discover),
                                        token=self.cancel_token,
                                        timeout=2 * REPLY_TIMEOUT)
        if not devices:
            self.logger.info("No UPNP-enabled devices found")
            return
        device = devices[0]
        device.WANIPConn1.AddPortMapping(
            NewRemoteHost=device.WANIPConn1.GetExternalIPAddress()
            ['NewExternalIPAddress'],
            NewExternalPort=self.address.tcp_port,
            NewProtocol='TCP',
            NewInternalPort=self.address.tcp_port,
            NewInternalClient=self.address.ip,
            NewEnabled='1',
            NewPortMappingDescription='Created by Py-EVM',
            NewLeaseDuration=lifetime)
        self.logger.info("NAT port forwarding successfully setup")

    async def _start(self) -> None:
        self._server = await asyncio.start_server(
            self.receive_handshake,
            host=self.address.ip,
            port=self.address.tcp_port,
        )

    async def _close(self) -> None:
        self._server.close()
        await self._server.wait_closed()

    async def _run(self) -> None:
        self.logger.info("Running server...")
        await self._start()
        self.logger.info(
            "enode://%s@%s:%s",
            self.privkey.public_key.to_hex()[2:],
            self.address.ip,
            self.address.tcp_port,
        )
        await self.discovery.create_endpoint()
        asyncio.ensure_future(self.refresh_nat_portmap())
        asyncio.ensure_future(self.discovery.bootstrap())
        asyncio.ensure_future(self.peer_pool.run())
        await self.syncer.run()

    async def _cleanup(self) -> None:
        self.logger.info("Closing server...")
        await self.peer_pool.cancel()
        await self.discovery.stop()
        await self._close()

    async def receive_handshake(self, reader: asyncio.StreamReader,
                                writer: asyncio.StreamWriter) -> None:
        expected_exceptions = (TimeoutError, PeerConnectionLost,
                               HandshakeFailure, asyncio.IncompleteReadError,
                               ConnectionResetError, BrokenPipeError)
        try:
            await self._receive_handshake(reader, writer)
        except expected_exceptions as e:
            self.logger.debug("Could not complete handshake", exc_info=True)
        except OperationCancelled:
            pass
        except Exception as e:
            self.logger.exception("Unexpected error handling handshake")

    async def _receive_handshake(self, reader: asyncio.StreamReader,
                                 writer: asyncio.StreamWriter) -> None:
        msg = await wait_with_token(
            reader.read(ENCRYPTED_AUTH_MSG_LEN),
            token=self.cancel_token,
            timeout=REPLY_TIMEOUT,
        )

        ip, socket, *_ = writer.get_extra_info("peername")
        remote_address = Address(ip, socket)
        self.logger.debug("Receiving handshake from %s", remote_address)
        try:
            ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                msg, self.privkey)
        except DecryptionError:
            # Try to decode as EIP8
            msg_size = big_endian_to_int(msg[:2])
            remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2
            msg += await wait_with_token(
                reader.read(remaining_bytes),
                token=self.cancel_token,
                timeout=REPLY_TIMEOUT,
            )
            try:
                ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                    msg, self.privkey)
            except DecryptionError as e:
                self.logger.debug("Failed to decrypt handshake", exc_info=True)
                return

        # Create `HandshakeResponder(remote: kademlia.Node, privkey: datatypes.PrivateKey)` instance
        initiator_remote = Node(initiator_pubkey, remote_address)
        responder = HandshakeResponder(initiator_remote, self.privkey,
                                       self.cancel_token)

        # Call `HandshakeResponder.create_auth_ack_message(nonce: bytes)` to create the reply
        responder_nonce = secrets.token_bytes(HASH_LEN)
        auth_ack_msg = responder.create_auth_ack_message(nonce=responder_nonce)
        auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)

        # Use the `writer` to send the reply to the remote
        writer.write(auth_ack_ciphertext)
        await writer.drain()

        # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer`
        aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
            initiator_nonce=initiator_nonce,
            responder_nonce=responder_nonce,
            remote_ephemeral_pubkey=ephem_pubkey,
            auth_init_ciphertext=msg,
            auth_ack_ciphertext=auth_ack_ciphertext)

        # Create and register peer in peer_pool
        peer = self.peer_class(remote=initiator_remote,
                               privkey=self.privkey,
                               reader=reader,
                               writer=writer,
                               aes_secret=aes_secret,
                               mac_secret=mac_secret,
                               egress_mac=egress_mac,
                               ingress_mac=ingress_mac,
                               headerdb=self.headerdb,
                               network_id=self.network_id)

        await self.do_handshake(peer)

    async def do_handshake(self, peer: BasePeer) -> None:
        await peer.do_p2p_handshake(),
        await peer.do_sub_proto_handshake()
        self.peer_pool.start_peer(peer)