Esempio n. 1
0
    async def _wait_for_response(self,
                                 request: TRequest,
                                 timeout: int = None) -> TReturn:
        if self.pending_request is not None:
            self.logger.error(
                "Already waiting for response to %s for peer: %s",
                self.response_msg_name,
                self._peer,
            )
            raise AlreadyWaiting(
                "Already waiting for response to {0} for peer: {1}".format(
                    self.response_msg_name,
                    self._peer
                )
            )

        future: 'asyncio.Future[TReturn]' = asyncio.Future()
        self.pending_request = (request, future)

        try:
            response = await self.wait(future, timeout=timeout)
        finally:
            # Always ensure that we reset the `pending_request` to `None` on exit.
            self.pending_request = None

        return response
Esempio n. 2
0
    async def payload_candidates(
            self,
            request: RequestAPI[TRequestPayload],
            tracker: BasePerformanceTracker[RequestAPI[TRequestPayload], Any],
            *,
            timeout: float = None) -> AsyncGenerator[TResponsePayload, None]:
        """
        Make a request and iterate through candidates for a valid response.

        To mark a response as valid, use `complete_request`. After that call, payload
        candidates will stop arriving.
        """
        total_timeout = self.response_timeout if timeout is None else timeout

        # The _lock ensures that we never have two concurrent requests to a
        # single peer for a single command pair in flight.
        try:
            await self.wait(self._lock.acquire(),
                            timeout=total_timeout * NUM_QUEUED_REQUESTS)
        except TimeoutError:
            raise AlreadyWaiting(
                f"Timed out waiting for {self.response_msg_name} request lock "
                f"or peer: {self._peer}")

        start_at = time.perf_counter()

        try:
            self._request(request)
            while self._is_pending():
                timeout_remaining = max(
                    0, total_timeout - (time.perf_counter() - start_at))

                try:
                    yield await self._get_payload(timeout_remaining)
                except TimeoutError as err:
                    tracker.record_timeout()

                    # If the peer has timeoud out too many times, desconnect
                    # and blacklist them
                    try:
                        self.timeout_bucket.take_nowait()
                    except NotEnoughTokens:
                        self.logger.warning(
                            "Blacklisting and disconnecting from %s due to too many timeouts",
                            self._peer,
                        )
                        self._peer.connection_tracker.record_blacklist(
                            self._peer.remote,
                            BLACKLIST_SECONDS_TOO_MANY_TIMEOUTS,
                            f"Too many timeouts: {err}",
                        )
                        self._peer.disconnect_nowait(DisconnectReason.timeout)
                        await self.cancellation()
                    finally:
                        raise
        finally:
            self._lock.release()
Esempio n. 3
0
    async def payload_candidates(
            self,
            request: BaseRequest[TRequestPayload],
            tracker: BasePerformanceTracker[BaseRequest[TRequestPayload], Any],
            *,
            timeout: float = None) -> AsyncGenerator[TResponsePayload, None]:
        """
        Make a request and iterate through candidates for a valid response.

        To mark a response as valid, use `complete_request`. After that call, payload
        candidates will stop arriving.
        """
        outer_timeout = self.response_timeout if timeout is None else timeout

        start_at = time.perf_counter()

        # The _lock ensures that we never have two concurrent requests to a
        # single peer for a single command pair in flight.
        try:
            await self.wait(self._lock.acquire(), timeout=outer_timeout)
        except TimeoutError:
            raise AlreadyWaiting(
                f"Timed out waiting for {self.response_msg_name} request lock "
                f"or peer: {self._peer}")

        if timeout is not None or tracker.total_msgs < 20:
            inner_timeout = outer_timeout
        else:
            # We compute a timeout based on the historical performance
            # of the peer defined as three standard deviations above
            # the response time for the 99th percentile of requests.
            try:
                rtt_99th = tracker.round_trip_99th.value
                rtt_stddev = tracker.round_trip_stddev.value
            except ValueError:
                inner_timeout = outer_timeout
            else:
                inner_timeout = rtt_99th + 3 * rtt_stddev

        try:
            self._request(request)
            while self._is_pending():
                timeout_remaining = max(
                    0, outer_timeout - (time.perf_counter() - start_at))

                payload_timeout = min(inner_timeout, timeout_remaining)

                try:
                    yield await self._get_payload(payload_timeout)
                except TimeoutError:
                    tracker.record_timeout()
                    raise
        finally:
            self._lock.release()
Esempio n. 4
0
    def _request(self, request: BaseRequest[TRequestPayload]) -> None:
        if self.pending_request is not None:
            self.logger.error(
                "Already waiting for response to %s for peer: %s",
                self.response_msg_name,
                self._peer,
            )
            raise AlreadyWaiting(
                "Already waiting for response to {0} for peer: {1}".format(
                    self.response_msg_name, self._peer))

        self._peer.sub_proto.send_request(request)

        future: 'asyncio.Future[TResponsePayload]' = asyncio.Future()
        self.pending_request = (time.perf_counter(), future)
Esempio n. 5
0
    async def _request_and_wait(self,
                                request: TRequest,
                                timeout: int = None) -> TReturn:
        if self.pending_request is not None:
            self.logger.error(
                "Already waiting for response to %s for peer: %s",
                self.response_msg_name,
                self._peer,
            )
            raise AlreadyWaiting(
                "Already waiting for response to {0} for peer: {1}".format(
                    self.response_msg_name, self._peer))

        if timeout is None:
            timeout = self.response_timout
        self._send_sub_proto_request(request)
        return await self._wait_for_response(request, timeout=timeout)
Esempio n. 6
0
    async def payload_candidates(
            self,
            request: BaseRequest[TRequestPayload],
            tracker: BasePerformanceTracker[BaseRequest[TRequestPayload], Any],
            *,
            timeout: float = None) -> AsyncGenerator[TResponsePayload, None]:
        """
        Make a request and iterate through candidates for a valid response.

        To mark a response as valid, use `complete_request`. After that call, payload
        candidates will stop arriving.
        """
        if timeout is None:
            timeout = self.response_timeout

        start_at = time.perf_counter()

        # The _lock ensures that we never have two concurrent requests to a
        # single peer for a single command pair in flight.
        try:
            await self.wait(self._lock.acquire(), timeout=timeout)
        except TimeoutError:
            raise AlreadyWaiting(
                "Timed out waiting for {0} request lock for peer: {1}".format(
                    self.response_msg_name,
                    self._peer
                )
            )

        try:
            self._request(request)
            while self._is_pending():
                timeout_remaining = max(0, timeout - (time.perf_counter() - start_at))
                try:
                    yield await self._get_payload(timeout_remaining)
                except TimeoutError:
                    tracker.record_timeout(timeout)
                    raise
        finally:
            self._lock.release()