Пример #1
0
    def __init__(
        self,
        endpoint: config.EndpointConfiguration,
        size: int = 10,
        max_age: int = 120,
        timeout: int = 1,
        max_connection_attempts: Optional[int] = None,
        max_retries: Optional[int] = None,
        protocol_factory: TProtocolFactory = THeaderProtocol.
        THeaderProtocolFactory(),
    ):
        if max_connection_attempts and max_retries:
            raise Exception(
                "do not mix max_retries and max_connection_attempts")

        if max_retries:
            warn_deprecated(
                "ThriftConnectionPool's max_retries is now named max_connection_attempts"
            )
            max_connection_attempts = max_retries
        elif not max_connection_attempts:
            max_connection_attempts = 3

        self.endpoint = endpoint
        self.max_age = max_age
        self.retry_policy = RetryPolicy.new(attempts=max_connection_attempts)
        self.timeout = timeout
        self.protocol_factory = protocol_factory

        self.size = size
        self.pool: ProtocolPool = queue.LifoQueue()
        for _ in range(size):
            self.pool.put(None)
Пример #2
0
    def stop(self) -> None:
        """Start the server.

        Stop all of the worker threads and the healthcheck server (if it exists).
        Waits for the handler threads to drain before returning but does not wait
        for the pump or watcher threads to finish.

        Should only be called once and should not be before the server is
        started, will raise an AssertionError in either of those cases.
        """
        assert self.started
        assert not self.stopped
        logger.debug("Stopping server.")
        self.stopped = True
        # Stop the pump first so we stop consuming messages from the message
        # queue
        logger.debug("Stopping pump thread.")
        self.pump.stop()
        # It's important to call `handler.stop()` before calling `join` on the
        # handler threads, otherwise we'll be waiting for threads that have not
        # been instructed to stop.
        logger.debug("Stopping message handler threads.")
        for handler in self.handlers:
            handler.stop()
        retry_policy = RetryPolicy.new(
            budget=self.stop_timeout.total_seconds())
        logger.debug("Waiting for message handler threads to drain.")
        for time_remaining, thread in zip(retry_policy, self.threads):
            thread.join(timeout=time_remaining)
        # Stop the healthcheck server last
        logger.debug("Stopping healthcheck server.")
        self.healthcheck_server.stop()
        logger.debug("Server stopped.")
Пример #3
0
 def retrying(self, **policy: Any) -> Iterator["_PooledClientProxy"]:
     yield self.__class__(
         self.client_cls,
         self.pool,
         self.server_span,
         self.namespace,
         retry_policy=RetryPolicy.new(**policy),
     )
Пример #4
0
 def __init__(
     self,
     client_cls: Any,
     pool: ThriftConnectionPool,
     server_span: Span,
     namespace: str,
     retry_policy: Optional[RetryPolicy] = None,
 ):
     self.client_cls = client_cls
     self.pool = pool
     self.server_span = server_span
     self.namespace = namespace
     self.retry_policy = retry_policy or RetryPolicy.new(attempts=1)
Пример #5
0
    def __init__(
        self,
        path: str,
        parser: Callable[[IO], T],
        timeout: Optional[float] = None,
        binary: bool = False,
        encoding: Optional[str] = None,
        newline: Optional[str] = None,
        backoff: Optional[float] = None,
    ):
        if binary and encoding is not None:
            raise TypeError("'encoding' is not supported in binary mode.")

        if binary and newline is not None:
            raise TypeError("'newline' is not supported in binary mode.")

        self._path = path
        self._parser = parser
        self._mtime = 0.0
        self._data: Union[T, Type[_NOT_LOADED]] = _NOT_LOADED
        self._open_options = _OpenOptions(
            mode="rb" if binary else "r",
            encoding=encoding or ("UTF-8" if not binary else None),
            newline=newline,
        )

        backoff = backoff or DEFAULT_FILEWATCHER_BACKOFF

        if timeout is not None:
            last_error = None
            for _ in RetryPolicy.new(budget=timeout, backoff=backoff):
                if self._data is not _NOT_LOADED:
                    break

                try:
                    self.get_data()
                except WatchedFileNotAvailableError as exc:
                    last_error = exc
                else:
                    break

                logging.warning("%s: file not yet available. sleeping.", path)
            else:
                last_error = typing.cast(WatchedFileNotAvailableError,
                                         last_error)
                raise WatchedFileNotAvailableError(
                    self._path,
                    f"timed out. last error was: {last_error.inner}")
Пример #6
0
    def publish(self, payload: SerializedBatch) -> None:
        """Publish spans to Zipkin API.

        :param payload: Count and payload to publish.
        """
        if not payload.item_count:
            return

        logger.info("Sending batch of %d traces", payload.item_count)
        headers = {
            "User-Agent": "baseplate-trace-publisher/1.0",
            "Content-Type": "application/json",
        }
        for _ in RetryPolicy.new(attempts=self.retry_limit):
            try:
                with self.metrics.timer("post"):
                    response = self.session.post(
                        self.endpoint,
                        data=payload.serialized,
                        headers=headers,
                        timeout=self.post_timeout,
                        stream=False,
                    )
                response.raise_for_status()
            except requests.HTTPError as exc:
                self.metrics.counter("error.http").increment()
                response = getattr(exc, "response", None)
                if response is not None:
                    logger.exception("HTTP Request failed. Error: %s",
                                     response.text)
                    # If client error, skip retries
                    if response.status_code < 500:
                        self.metrics.counter("error.http.client").increment()
                        return
                else:
                    logger.exception(
                        "HTTP Request failed. Response not available")
            except OSError:
                self.metrics.counter("error.io").increment()
                logger.exception("HTTP Request failed")
            else:
                self.metrics.counter("sent").increment(payload.item_count)
                return

        raise MaxRetriesError(
            f"ZipkinPublisher exhausted allowance of {self.retry_limit:d} retries."
        )
Пример #7
0
    def publish(self, payload: SerializedBatch) -> None:
        if not payload.item_count:
            return

        logger.info("sending batch of %d events", payload.item_count)
        compressed_payload = gzip.compress(payload.serialized)
        headers = {
            "Date": email.utils.formatdate(usegmt=True),
            "User-Agent": "baseplate-event-publisher/1.0",
            "Content-Type": "application/json",
            "X-Signature": self._sign_payload(payload.serialized),
            "Content-Encoding": "gzip",
        }

        for _ in RetryPolicy.new(budget=MAX_RETRY_TIME, backoff=RETRY_BACKOFF):
            try:
                with self.metrics.timer("post"):
                    response = self.session.post(
                        self.url,
                        headers=headers,
                        data=compressed_payload,
                        timeout=POST_TIMEOUT,
                        # http://docs.python-requests.org/en/latest/user/advanced/#keep-alive
                        stream=False,
                    )
                response.raise_for_status()
            except requests.HTTPError as exc:
                self.metrics.counter("error.http").increment()

                # we should crash if it's our fault
                response = getattr(exc, "response", None)
                if response is not None and response.status_code < 500:
                    logger.exception("HTTP Request failed. Error: %s",
                                     response.text)
                    if response.status_code != 422:
                        # Do not exit on validation errors
                        raise
                else:
                    logger.exception("HTTP Request failed.")
            except OSError:
                self.metrics.counter("error.io").increment()
                logger.exception("HTTP Request failed")
            else:
                self.metrics.counter("sent").increment(payload.item_count)
                return

        raise MaxRetriesError("could not sent batch")
Пример #8
0
    def put(self, message: bytes, timeout: Optional[float] = None) -> None:
        """Add a message to the queue.

        :param timeout: If the queue is full, the call will block up to
            ``timeout`` seconds or forever if ``None``.
        :raises: :py:exc:`TimedOutError` The queue was full for the allowed
            duration of the call.

        """
        for time_remaining in RetryPolicy.new(budget=timeout):
            try:
                return self.queue.send(message=message)
            except posix_ipc.SignalError:  # pragma: nocover
                continue  # interrupted, just try again
            except posix_ipc.BusyError:
                select.select([], [self.queue.mqd], [], time_remaining)

        raise TimedOutError
Пример #9
0
    def __init__(
        self,
        endpoint: config.EndpointConfiguration,
        size: int = 10,
        max_age: int = 120,
        timeout: float = 1,
        max_connection_attempts: int = 3,
        protocol_factory: TProtocolFactory = _DEFAULT_PROTOCOL_FACTORY,
    ):
        self.endpoint = endpoint
        self.max_age = max_age
        self.retry_policy = RetryPolicy.new(attempts=max_connection_attempts)
        self.timeout = timeout
        self.protocol_factory = protocol_factory

        self.size = size
        self.pool: ProtocolPool = queue.LifoQueue()
        for _ in range(size):
            self.pool.put(None)
Пример #10
0
    def get(self, timeout: Optional[float] = None) -> bytes:
        """Read a message from the queue.

        :param timeout: If the queue is empty, the call will block up to
            ``timeout`` seconds or forever if ``None``.
        :raises: :py:exc:`TimedOutError` The queue was empty for the allowed
            duration of the call.

        """
        for time_remaining in RetryPolicy.new(budget=timeout):
            try:
                message, _ = self.queue.receive()
                return message
            except posix_ipc.SignalError:  # pragma: nocover
                continue  # interrupted, just try again
            except posix_ipc.BusyError:
                select.select([self.queue.mqd], [], [], time_remaining)

        raise TimedOutError
Пример #11
0
    def get_batch(self, max_items: int,
                  timeout: Optional[float]) -> Sequence[Message]:
        """Return a batch of messages.

        :param max_items: The maximum batch size.
        :param timeout: The maximum time to wait in seconds, or ``None``
            for no timeout.

        """
        if timeout == 0:
            block = False
        else:
            block = True
        batch = []
        retry_policy = RetryPolicy.new(attempts=max_items, budget=timeout)
        for time_remaining in retry_policy:
            item = self._get_next_item(block=block, timeout=time_remaining)
            if item is None:
                break
            batch.append(item)

        return batch