Пример #1
0
async def test_token_wait(event_loop):
    token = CancelToken('token')
    event_loop.call_soon(token.trigger)
    done, pending = await asyncio.wait([token.wait()], timeout=0.1)
    assert len(done) == 1
    assert len(pending) == 0
    assert token.triggered
Пример #2
0
async def test_token_wait(event_loop):
    token = CancelToken('token')
    event_loop.call_soon(token.trigger)
    done, pending = await asyncio.wait([token.wait()], timeout=0.1)
    assert len(done) == 1
    assert len(pending) == 0
    assert token.triggered
Пример #3
0
async def test_wait_cancel_pending_tasks_on_cancellation(event_loop):
    """Test that cancelling a pending CancelToken.wait() coroutine doesn't leave .wait()
    coroutines for any chained tokens behind.
    """
    token = CancelToken('token').chain(CancelToken('token2')).chain(CancelToken('token3'))
    token_wait_coroutine = token.wait()
    done, pending = await asyncio.wait([token_wait_coroutine], timeout=0.1)
    assert len(done) == 0
    assert len(pending) == 1
    pending_task = pending.pop()
    assert pending_task._coro == token_wait_coroutine
    pending_task.cancel()
    await assert_only_current_task_not_done()
Пример #4
0
async def test_wait_cancel_pending_tasks_on_cancellation(event_loop):
    """Test that cancelling a pending CancelToken.wait() coroutine doesn't leave .wait()
    coroutines for any chained tokens behind.
    """
    token = CancelToken('token').chain(CancelToken('token2')).chain(CancelToken('token3'))
    token_wait_coroutine = token.wait()
    done, pending = await asyncio.wait([token_wait_coroutine], timeout=0.1)
    assert len(done) == 0
    assert len(pending) == 1
    pending_task = pending.pop()
    assert pending_task._coro == token_wait_coroutine
    pending_task.cancel()
    await assert_only_current_task_not_done()
Пример #5
0
class PeerPool:
    """PeerPool attempts to keep connections to at least min_peers on the given network."""
    logger = logging.getLogger("p2p.peer.PeerPool")
    _connect_loop_sleep = 2
    _last_lookup = 0  # type: float
    _lookup_interval = 5  # type: int

    def __init__(self,
                 peer_class: Type[BasePeer],
                 chaindb: AsyncChainDB,
                 network_id: int,
                 privkey: datatypes.PrivateKey,
                 discovery: DiscoveryProtocol,
                 min_peers: int = DEFAULT_MIN_PEERS,
                 ) -> None:
        self.peer_class = peer_class
        self.chaindb = chaindb
        self.network_id = network_id
        self.privkey = privkey
        self.discovery = discovery
        self.min_peers = min_peers
        self.connected_nodes = {}  # type: Dict[Node, BasePeer]
        self.cancel_token = CancelToken('PeerPool')
        self._subscribers = []  # type: List[PeerPoolSubscriber]

    def get_nodes_to_connect(self) -> Generator[Node, None, None]:
        return self.discovery.get_random_nodes(self.min_peers)

    def subscribe(self, subscriber: PeerPoolSubscriber) -> None:
        self._subscribers.append(subscriber)
        for peer in self.connected_nodes.values():
            subscriber.register_peer(peer)

    def unsubscribe(self, subscriber: PeerPoolSubscriber) -> None:
        if subscriber in self._subscribers:
            self._subscribers.remove(subscriber)

    def start_peer(self, peer):
        asyncio.ensure_future(peer.run(finished_callback=self._peer_finished))
        self.add_peer(peer)

    def add_peer(self, peer):
        self.logger.debug('Adding peer (%s) ...', str(peer))
        self.connected_nodes[peer.remote] = peer
        self.logger.debug('Number of peers: %d', len(self.connected_nodes))
        for subscriber in self._subscribers:
            subscriber.register_peer(peer)

    async def run(self) -> None:
        self.logger.info("Running PeerPool...")
        while not self.cancel_token.triggered:
            try:
                await self.maybe_connect_to_more_peers()
            except OperationCancelled as e:
                self.logger.debug("PeerPool finished: %s", e)
                break
            except:  # noqa: E722
                # Most unexpected errors should be transient, so we log and restart from scratch.
                self.logger.exception("Unexpected error, restarting")
                await self.stop_all_peers()
            # Wait self._connect_loop_sleep seconds, unless we're asked to stop.
            await asyncio.wait([self.cancel_token.wait()], timeout=self._connect_loop_sleep)

    async def stop_all_peers(self) -> None:
        self.logger.info("Stopping all peers ...")
        await asyncio.gather(
            *[peer.stop() for peer in self.connected_nodes.values()])

    async def stop(self) -> None:
        self.cancel_token.trigger()
        await self.stop_all_peers()

    async def connect(self, remote: Node) -> BasePeer:
        """
        Connect to the given remote and return a Peer instance when successful.
        Returns None if the remote is unreachable, times out or is useless.
        """
        if remote in self.connected_nodes:
            self.logger.debug("Skipping %s; already connected to it", remote)
            return None
        expected_exceptions = (
            UnreachablePeer, TimeoutError, PeerConnectionLost, HandshakeFailure)
        try:
            self.logger.debug("Connecting to %s...", remote)
            peer = await wait_with_token(
                handshake(remote, self.privkey, self.peer_class, self.chaindb, self.network_id),
                token=self.cancel_token,
                timeout=HANDSHAKE_TIMEOUT)
            return peer
        except OperationCancelled:
            # Pass it on to instruct our main loop to stop.
            raise
        except expected_exceptions as e:
            self.logger.debug("Could not complete handshake with %s: %s", remote, repr(e))
        except Exception:
            self.logger.exception("Unexpected error during auth/p2p handshake with %s", remote)
        return None

    async def lookup_random_node(self) -> None:
        # This method runs in the background, so we must catch OperationCancelled here otherwise
        # asyncio will warn that its exception was never retrieved.
        try:
            await self.discovery.lookup_random(self.cancel_token)
        except OperationCancelled:
            pass
        self._last_lookup = time.time()

    async def maybe_connect_to_more_peers(self) -> None:
        """Connect to more peers if we're not yet connected to at least self.min_peers."""
        if len(self.connected_nodes) >= self.min_peers:
            self.logger.debug(
                "Already connected to %s peers: %s; sleeping",
                len(self.connected_nodes),
                [remote for remote in self.connected_nodes])
            return

        if self._last_lookup + self._lookup_interval < time.time():
            asyncio.ensure_future(self.lookup_random_node())

        await self._connect_to_nodes(self.get_nodes_to_connect())

        # In some cases (e.g ROPSTEN or private testnets), the discovery table might be full of
        # bad peers so if we can't connect to any peers we try a random bootstrap node as well.
        if not self.peers:
            await self._connect_to_nodes(self._get_random_bootnode())

    def _get_random_bootnode(self) -> Generator[Node, None, None]:
        if self.discovery.bootstrap_nodes:
            yield random.choice(self.discovery.bootstrap_nodes)
        else:
            self.logger.warning('No bootstrap_nodes')

    async def _connect_to_nodes(self, nodes: Generator[Node, None, None]) -> None:
        for node in nodes:
            # TODO: Consider changing connect() to raise an exception instead of returning None,
            # as discussed in
            # https://github.com/ethereum/py-evm/pull/139#discussion_r152067425
            peer = await self.connect(node)
            if peer is not None:
                self.logger.info("Successfully connected to %s", peer)
                self.start_peer(peer)
                if len(self.connected_nodes) >= self.min_peers:
                    return

    def _peer_finished(self, peer: BasePeer) -> None:
        """Remove the given peer from our list of connected nodes.
        This is passed as a callback to be called when a peer finishes.
        """
        if peer.remote in self.connected_nodes:
            self.connected_nodes.pop(peer.remote)

    @property
    def peers(self) -> List[BasePeer]:
        peers = list(self.connected_nodes.values())
        # Shuffle the list of peers so that dumb callsites are less likely to send all requests to
        # a single peer even if they always pick the first one from the list.
        random.shuffle(peers)
        return peers

    async def get_random_peer(self) -> BasePeer:
        while not self.peers:
            self.logger.debug("No connected peers, sleeping a bit")
            await asyncio.sleep(0.5)
        return random.choice(self.peers)
Пример #6
0
class PeerPool:
    """PeerPool attempts to keep connections to at least min_peers on the given network."""
    logger = logging.getLogger("p2p.peer.PeerPool")
    min_peers = 2
    _connect_loop_sleep = 2

    def __init__(
        self,
        peer_class: Type[BasePeer],
        chaindb: ChainDB,
        network_id: int,
        privkey: datatypes.PrivateKey,
    ) -> None:
        self.peer_class = peer_class
        self.chaindb = chaindb
        self.network_id = network_id
        self.privkey = privkey
        self.connected_nodes = {}  # type: Dict[Node, BasePeer]
        self.cancel_token = CancelToken('PeerPool')
        self._subscribers = []  # type: List[PeerPoolSubscriber]

    def subscribe(self, subscriber: PeerPoolSubscriber) -> None:
        self._subscribers.append(subscriber)
        for peer in self.connected_nodes.values():
            subscriber.register_peer(peer)

    def unsubscribe(self, subscriber: PeerPoolSubscriber) -> None:
        if subscriber in self._subscribers:
            self._subscribers.remove(subscriber)

    async def get_nodes_to_connect(self) -> List[Node]:
        # TODO: This should use the Discovery service to lookup nodes to connect to, but our
        # current implementation only supports v4 and with that it takes an insane amount of time
        # to find any LES nodes with the same network ID as us, so for now we hard-code some nodes
        # that seem to have a good uptime.
        from evm.chains.ropsten import RopstenChain
        from evm.chains.mainnet import MainnetChain
        if self.network_id == MainnetChain.network_id:
            return [
                Node(
                    keys.PublicKey(
                        decode_hex(
                            "1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082"
                        )),  # noqa: E501
                    Address("52.74.57.123", 30303, 30303)),
                Node(
                    keys.PublicKey(
                        decode_hex(
                            "78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d"
                        )),  # noqa: E501
                    Address("191.235.84.50", 30303, 30303)),
                Node(
                    keys.PublicKey(
                        decode_hex(
                            "ddd81193df80128880232fc1deb45f72746019839589eeb642d3d44efbb8b2dda2c1a46a348349964a6066f8afb016eb2a8c0f3c66f32fadf4370a236a4b5286"
                        )),  # noqa: E501
                    Address("52.231.202.145", 30303, 30303)),
                Node(
                    keys.PublicKey(
                        decode_hex(
                            "3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99"
                        )),  # noqa: E501
                    Address("13.93.211.84", 30303, 30303)),
            ]
        elif self.network_id == RopstenChain.network_id:
            return [
                Node(
                    keys.PublicKey(
                        decode_hex(
                            "88c2b24429a6f7683fbfd06874ae3f1e7c8b4a5ffb846e77c705ba02e2543789d66fc032b6606a8d8888eb6239a2abe5897ce83f78dcdcfcb027d6ea69aa6fe9"
                        )),  # noqa: E501
                    Address("163.172.157.61", 30303, 30303)),
                Node(
                    keys.PublicKey(
                        decode_hex(
                            "a1ef9ba5550d5fac27f7cbd4e8d20a643ad75596f307c91cd6e7f85b548b8a6bf215cca436d6ee436d6135f9fe51398f8dd4c0bd6c6a0c332ccb41880f33ec12"
                        )),  # noqa: E501
                    Address("51.15.218.125", 30303, 30303)),
                Node(
                    keys.PublicKey(
                        decode_hex(
                            "e80276aabb7682a4a659f4341c1199de79d91a2e500a6ee9bed16ed4ce927ba8d32ba5dea357739ffdf2c5bcc848d3064bb6f149f0b4249c1f7e53f8bf02bfc8"
                        )),  # noqa: E501
                    Address("51.15.39.57", 30303, 30303)),
                Node(
                    keys.PublicKey(
                        decode_hex(
                            "584c0db89b00719e9e7b1b5c32a4a8942f379f4d5d66bb69f9c7fa97fa42f64974e7b057b35eb5a63fd7973af063f9a1d32d8c60dbb4854c64cb8ab385470258"
                        )),  # noqa: E501
                    Address("51.15.35.2", 30303, 30303)),
                Node(
                    keys.PublicKey(
                        decode_hex(
                            "d40871fc3e11b2649700978e06acd68a24af54e603d4333faecb70926ca7df93baa0b7bf4e927fcad9a7c1c07f9b325b22f6d1730e728314d0e4e6523e5cebc2"
                        )),  # noqa: E501
                    Address("51.15.132.235", 30303, 30303)),
                Node(
                    keys.PublicKey(
                        decode_hex(
                            "482484b9198530ee2e00db89791823244ca41dcd372242e2e1297dd06f6d8dd357603960c5ad9cc8dc15fcdf0e4edd06b7ad7db590e67a0b54f798c26581ebd7"
                        )),  # noqa: E501
                    Address("51.15.75.138", 30303, 30303)),
            ]
        else:
            raise ValueError("Unknown network_id: {}".format(self.network_id))

    async def run(self):
        self.logger.info("Running PeerPool...")
        while not self.cancel_token.triggered:
            try:
                await self.maybe_connect_to_more_peers()
            except:  # noqa: E722
                # Most unexpected errors should be transient, so we log and restart from scratch.
                self.logger.error("Unexpected error (%s), restarting",
                                  traceback.format_exc())
                await self.stop_all_peers()
            # Wait self._connect_loop_sleep seconds, unless we're asked to stop.
            await asyncio.wait([self.cancel_token.wait()],
                               timeout=self._connect_loop_sleep)

    async def stop_all_peers(self):
        self.logger.info("Stopping all peers ...")
        await asyncio.gather(
            *[peer.stop() for peer in self.connected_nodes.values()])

    async def stop(self):
        self.cancel_token.trigger()
        await self.stop_all_peers()

    async def connect(self, remote: Node) -> BasePeer:
        """
        Connect to the given remote and return a Peer instance when successful.
        Returns None if the remote is unreachable, times out or is useless.
        """
        if remote in self.connected_nodes:
            self.logger.debug("Skipping %s; already connected to it", remote)
            return None
        expected_exceptions = (UnreachablePeer, asyncio.TimeoutError,
                               PeerConnectionLost, HandshakeFailure)
        try:
            self.logger.info("Connecting to %s...", remote)
            # TODO: Use asyncio.wait() and our cancel_token here to cancel in case the token is
            # triggered.
            peer = await asyncio.wait_for(
                handshake(remote, self.privkey, self.peer_class, self.chaindb,
                          self.network_id), HANDSHAKE_TIMEOUT)
            return peer
        except expected_exceptions as e:
            self.logger.info("Could not complete handshake with %s: %s",
                             remote, repr(e))
        except Exception:
            self.logger.warning(
                "Unexpected error during auth/p2p handshake with %s: %s",
                remote, traceback.format_exc())
        return None

    async def maybe_connect_to_more_peers(self):
        """Connect to more peers if we're not yet connected to at least self.min_peers."""
        if len(self.connected_nodes) >= self.min_peers:
            self.logger.debug("Already connected to %s peers: %s; sleeping",
                              len(self.connected_nodes),
                              [remote for remote in self.connected_nodes])
            return

        for node in await self.get_nodes_to_connect():
            # TODO: Consider changing connect() to raise an exception instead of returning None,
            # as discussed in
            # https://github.com/pipermerriam/py-evm/pull/139#discussion_r152067425
            peer = await self.connect(node)
            if peer is not None:
                self.logger.info("Successfully connected to %s", peer)
                asyncio.ensure_future(
                    peer.run(finished_callback=self._peer_finished))
                self.connected_nodes[peer.remote] = peer
                for subscriber in self._subscribers:
                    subscriber.register_peer(peer)

    def _peer_finished(self, peer: BasePeer) -> None:
        """Remove the given peer from our list of connected nodes.
        This is passed as a callback to be called when a peer finishes.
        """
        if peer.remote in self.connected_nodes:
            self.connected_nodes.pop(peer.remote)

    @property
    def peers(self) -> List[BasePeer]:
        return list(self.connected_nodes.values())
Пример #7
0
class PeerPool:
    """PeerPool attempts to keep connections to at least min_peers on the given network."""
    logger = logging.getLogger("p2p.peer.PeerPool")
    min_peers = 2
    _connect_loop_sleep = 2

    def __init__(self,
                 peer_class: Type[BasePeer],
                 chaindb: AsyncChainDB,
                 network_id: int,
                 privkey: datatypes.PrivateKey,
                 ) -> None:
        self.peer_class = peer_class
        self.chaindb = chaindb
        self.network_id = network_id
        self.privkey = privkey
        self.connected_nodes = {}  # type: Dict[Node, BasePeer]
        self.cancel_token = CancelToken('PeerPool')
        self._subscribers = []  # type: List[PeerPoolSubscriber]

    def subscribe(self, subscriber: PeerPoolSubscriber) -> None:
        self._subscribers.append(subscriber)
        for peer in self.connected_nodes.values():
            subscriber.register_peer(peer)

    def unsubscribe(self, subscriber: PeerPoolSubscriber) -> None:
        if subscriber in self._subscribers:
            self._subscribers.remove(subscriber)

    async def get_nodes_to_connect(self) -> List[Node]:
        # TODO: This should use the Discovery service to lookup nodes to connect to, but our
        # current implementation only supports v4 and with that it takes an insane amount of time
        # to find any LES nodes with the same network ID as us, so for now we hard-code some nodes
        # that seem to have a good uptime.
        from evm.chains.ropsten import RopstenChain
        from evm.chains.mainnet import MainnetChain
        if self.network_id == MainnetChain.network_id:
            return [
                Node(
                    keys.PublicKey(decode_hex("1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082")),  # noqa: E501
                    Address("52.74.57.123", 30303, 30303)),
                Node(
                    keys.PublicKey(decode_hex("78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d")),  # noqa: E501
                    Address("191.235.84.50", 30303, 30303)),
                Node(
                    keys.PublicKey(decode_hex("ddd81193df80128880232fc1deb45f72746019839589eeb642d3d44efbb8b2dda2c1a46a348349964a6066f8afb016eb2a8c0f3c66f32fadf4370a236a4b5286")),  # noqa: E501
                    Address("52.231.202.145", 30303, 30303)),
                Node(
                    keys.PublicKey(decode_hex("3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99")),  # noqa: E501
                    Address("13.93.211.84", 30303, 30303)),
            ]
        elif self.network_id == RopstenChain.network_id:
            return [
                Node(
                    keys.PublicKey(decode_hex("88c2b24429a6f7683fbfd06874ae3f1e7c8b4a5ffb846e77c705ba02e2543789d66fc032b6606a8d8888eb6239a2abe5897ce83f78dcdcfcb027d6ea69aa6fe9")),  # noqa: E501
                    Address("163.172.157.61", 30303, 30303)),
                Node(
                    keys.PublicKey(decode_hex("a1ef9ba5550d5fac27f7cbd4e8d20a643ad75596f307c91cd6e7f85b548b8a6bf215cca436d6ee436d6135f9fe51398f8dd4c0bd6c6a0c332ccb41880f33ec12")),  # noqa: E501
                    Address("51.15.218.125", 30303, 30303)),
                Node(
                    keys.PublicKey(decode_hex("e80276aabb7682a4a659f4341c1199de79d91a2e500a6ee9bed16ed4ce927ba8d32ba5dea357739ffdf2c5bcc848d3064bb6f149f0b4249c1f7e53f8bf02bfc8")),  # noqa: E501
                    Address("51.15.39.57", 30303, 30303)),
                Node(
                    keys.PublicKey(decode_hex("584c0db89b00719e9e7b1b5c32a4a8942f379f4d5d66bb69f9c7fa97fa42f64974e7b057b35eb5a63fd7973af063f9a1d32d8c60dbb4854c64cb8ab385470258")),  # noqa: E501
                    Address("51.15.35.2", 30303, 30303)),
                Node(
                    keys.PublicKey(decode_hex("d40871fc3e11b2649700978e06acd68a24af54e603d4333faecb70926ca7df93baa0b7bf4e927fcad9a7c1c07f9b325b22f6d1730e728314d0e4e6523e5cebc2")),  # noqa: E501
                    Address("51.15.132.235", 30303, 30303)),
                Node(
                    keys.PublicKey(decode_hex("482484b9198530ee2e00db89791823244ca41dcd372242e2e1297dd06f6d8dd357603960c5ad9cc8dc15fcdf0e4edd06b7ad7db590e67a0b54f798c26581ebd7")),  # noqa: E501
                    Address("51.15.75.138", 30303, 30303)),
            ]
        else:
            raise ValueError("Unknown network_id: {}".format(self.network_id))

    async def run(self):
        self.logger.info("Running PeerPool...")
        while not self.cancel_token.triggered:
            try:
                await self.maybe_connect_to_more_peers()
            except:  # noqa: E722
                # Most unexpected errors should be transient, so we log and restart from scratch.
                self.logger.error("Unexpected error (%s), restarting", traceback.format_exc())
                await self.stop_all_peers()
            # Wait self._connect_loop_sleep seconds, unless we're asked to stop.
            await asyncio.wait([self.cancel_token.wait()], timeout=self._connect_loop_sleep)

    async def stop_all_peers(self):
        self.logger.info("Stopping all peers ...")
        await asyncio.gather(
            *[peer.stop() for peer in self.connected_nodes.values()])

    async def stop(self):
        self.cancel_token.trigger()
        await self.stop_all_peers()

    async def connect(self, remote: Node) -> BasePeer:
        """
        Connect to the given remote and return a Peer instance when successful.
        Returns None if the remote is unreachable, times out or is useless.
        """
        if remote in self.connected_nodes:
            self.logger.debug("Skipping %s; already connected to it", remote)
            return None
        expected_exceptions = (
            UnreachablePeer, asyncio.TimeoutError, PeerConnectionLost, HandshakeFailure)
        try:
            self.logger.info("Connecting to %s...", remote)
            # TODO: Use asyncio.wait() and our cancel_token here to cancel in case the token is
            # triggered.
            peer = await asyncio.wait_for(
                handshake(remote, self.privkey, self.peer_class, self.chaindb, self.network_id),
                HANDSHAKE_TIMEOUT)
            return peer
        except expected_exceptions as e:
            self.logger.info("Could not complete handshake with %s: %s", remote, repr(e))
        except Exception:
            self.logger.warning("Unexpected error during auth/p2p handshake with %s: %s",
                                remote, traceback.format_exc())
        return None

    async def maybe_connect_to_more_peers(self):
        """Connect to more peers if we're not yet connected to at least self.min_peers."""
        if len(self.connected_nodes) >= self.min_peers:
            self.logger.debug(
                "Already connected to %s peers: %s; sleeping",
                len(self.connected_nodes),
                [remote for remote in self.connected_nodes])
            return

        for node in await self.get_nodes_to_connect():
            # TODO: Consider changing connect() to raise an exception instead of returning None,
            # as discussed in
            # https://github.com/pipermerriam/py-evm/pull/139#discussion_r152067425
            peer = await self.connect(node)
            if peer is not None:
                self.logger.info("Successfully connected to %s", peer)
                asyncio.ensure_future(peer.run(finished_callback=self._peer_finished))
                self.connected_nodes[peer.remote] = peer
                for subscriber in self._subscribers:
                    subscriber.register_peer(peer)

    def _peer_finished(self, peer: BasePeer) -> None:
        """Remove the given peer from our list of connected nodes.
        This is passed as a callback to be called when a peer finishes.
        """
        if peer.remote in self.connected_nodes:
            self.connected_nodes.pop(peer.remote)

    @property
    def peers(self) -> List[BasePeer]:
        return list(self.connected_nodes.values())