Пример #1
0
    def maybe_expire(self, request_timeout_ms, retry_backoff_ms, linger_ms, is_full):
        """Expire batches if metadata is not available

        A batch whose metadata is not available should be expired if one
        of the following is true:

          * the batch is not in retry AND request timeout has elapsed after
            it is ready (full or linger.ms has reached).

          * the batch is in retry AND request timeout has elapsed after the
            backoff period ended.
        """
        now = time.time()
        since_append = now - self.last_append
        since_ready = now - (self.created + linger_ms / 1000.0)
        since_backoff = now - (self.last_attempt + retry_backoff_ms / 1000.0)
        timeout = request_timeout_ms / 1000.0

        error = None
        if not self.in_retry() and is_full and timeout < since_append:
            error = "%d seconds have passed since last append" % (since_append,)
        elif not self.in_retry() and timeout < since_ready:
            error = "%d seconds have passed since batch creation plus linger time" % (since_ready,)
        elif self.in_retry() and timeout < since_backoff:
            error = "%d seconds have passed since last attempt plus backoff time" % (since_backoff,)

        if error:
            self.records.close()
            self.done(-1, None, Errors.KafkaTimeoutError(
                "Batch for %s containing %s record(s) expired: %s" % (
                self.topic_partition, self.records.next_offset(), error)))
            return True
        return False
Пример #2
0
    def _retrieve_offsets(self, timestamps, timeout_ms=float("inf")):
        """Fetch offset for each partition passed in ``timestamps`` map.

        Blocks until offsets are obtained, a non-retriable exception is raised
        or ``timeout_ms`` passed.

        Arguments:
            timestamps: {TopicPartition: int} dict with timestamps to fetch
                offsets by. -1 for the latest available, -2 for the earliest
                available. Otherwise timestamp is treated as epoch milliseconds.

        Returns:
            {TopicPartition: (int, int)}: Mapping of partition to
                retrieved offset and timestamp. If offset does not exist for
                the provided timestamp, that partition will be missing from
                this mapping.
        """
        if not timestamps:
            return {}

        start_time = time.time()
        remaining_ms = timeout_ms
        timestamps = copy.copy(timestamps)
        while remaining_ms > 0:
            if not timestamps:
                return {}

            future = self._send_offset_requests(timestamps)
            self._client.poll(future=future, timeout_ms=remaining_ms)

            if future.succeeded():
                return future.value
            if not future.retriable():
                raise future.exception  # pylint: disable-msg=raising-bad-type

            elapsed_ms = (time.time() - start_time) * 1000
            remaining_ms = timeout_ms - elapsed_ms
            if remaining_ms < 0:
                break

            if future.exception.invalid_metadata:
                refresh_future = self._client.cluster.request_update()
                self._client.poll(future=refresh_future, timeout_ms=remaining_ms)

                # Issue #1780
                # Recheck partition existence after after a successful metadata refresh
                if refresh_future.succeeded() and isinstance(future.exception, Errors.StaleMetadata):
                    log.debug("Stale metadata was raised, and we now have an updated metadata. Rechecking partition existance")
                    unknown_partition = future.exception.args[0]  # TopicPartition from StaleMetadata
                    if self._client.cluster.leader_for_partition(unknown_partition) is None:
                        log.debug("Removed partition %s from offsets retrieval" % (unknown_partition, ))
                        timestamps.pop(unknown_partition)
            else:
                time.sleep(self.config['retry_backoff_ms'] / 1000.0)

            elapsed_ms = (time.time() - start_time) * 1000
            remaining_ms = timeout_ms - elapsed_ms

        raise Errors.KafkaTimeoutError(
            "Failed to get offsets by timestamps in %s ms" % (timeout_ms,))
Пример #3
0
 def get(self, timeout=None):
     if not self.is_done and not self._produce_future.wait(timeout):
         raise Errors.KafkaTimeoutError(
             "Timeout after waiting for %s secs." % timeout)
     assert self.is_done
     if self.failed():
         raise self.exception # pylint: disable-msg=raising-bad-type
     return self.value
Пример #4
0
 def wakeup(self):
     with self._wake_lock:
         try:
             self._wake_w.sendall(b'x')
         except socket.timeout:
             log.warning('Timeout to send to wakeup socket!')
             raise Errors.KafkaTimeoutError()
         except socket.error:
             log.warning('Unable to send to wakeup socket!')
    def allocate(self, size, max_time_to_block_ms):
        """
        Allocate a buffer of the given size. This method blocks if there is not
        enough memory and the buffer pool is configured with blocking mode.

        Arguments:
            size (int): The buffer size to allocate in bytes [ignored]
            max_time_to_block_ms (int): The maximum time in milliseconds to
                block for buffer memory to be available

        Returns:
            io.BytesIO
        """
        with self._lock:
            # check if we have a free buffer of the right size pooled
            if self._free:
                return self._free.popleft()

            elif self._poolable_size == 0:
                return io.BytesIO()

            else:
                # we are out of buffers and will have to block
                buf = None
                more_memory = threading.Condition(self._lock)
                self._waiters.append(more_memory)
                # loop over and over until we have a buffer or have reserved
                # enough memory to allocate one
                while buf is None:
                    start_wait = time.time()
                    more_memory.wait(max_time_to_block_ms / 1000.0)
                    end_wait = time.time()
                    if self.wait_time:
                        self.wait_time.record(end_wait - start_wait)

                    if self._free:
                        buf = self._free.popleft()
                    else:
                        self._waiters.remove(more_memory)
                        raise Errors.KafkaTimeoutError(
                            "Failed to allocate memory within the configured"
                            " max blocking time")

                # remove the condition for this thread to let the next thread
                # in line start getting memory
                removed = self._waiters.popleft()
                assert removed is more_memory, "Wrong condition"

                # signal any additional waiters if there is more memory left
                # over for them
                if self._free and self._waiters:
                    self._waiters[0].notify()

                # unlock and return the buffer
                return buf
Пример #6
0
    def _retrieve_offsets(self, timestamps, timeout_ms=float("inf")):
        """Fetch offset for each partition passed in ``timestamps`` map.

        Blocks until offsets are obtained, a non-retriable exception is raised
        or ``timeout_ms`` passed.

        Arguments:
            timestamps: {TopicPartition: int} dict with timestamps to fetch
                offsets by. -1 for the latest available, -2 for the earliest
                available. Otherwise timestamp is treated as epoch miliseconds.

        Returns:
            {TopicPartition: (int, int)}: Mapping of partition to
                retrieved offset and timestamp. If offset does not exist for
                the provided timestamp, that partition will be missing from
                this mapping.
        """
        if not timestamps:
            return {}

        start_time = time.time()
        remaining_ms = timeout_ms
        while remaining_ms > 0:
            future = self._send_offset_requests(timestamps)
            self._client.poll(future=future, timeout_ms=remaining_ms)

            if future.succeeded():
                return future.value
            if not future.retriable():
                raise future.exception  # pylint: disable-msg=raising-bad-type

            elapsed_ms = (time.time() - start_time) * 1000
            remaining_ms = timeout_ms - elapsed_ms
            if remaining_ms < 0:
                break

            if future.exception.invalid_metadata:
                refresh_future = self._client.cluster.request_update()
                self._client.poll(future=refresh_future,
                                  timeout_ms=remaining_ms)
            else:
                time.sleep(self.config['retry_backoff_ms'] / 1000.0)

            elapsed_ms = (time.time() - start_time) * 1000
            remaining_ms = timeout_ms - elapsed_ms

        raise Errors.KafkaTimeoutError(
            "Failed to get offsets by timestamps in %s ms" % timeout_ms)
Пример #7
0
    def await_flush_completion(self, timeout=None):
        """
        Mark all partitions as ready to send and block until the send is complete
        """
        try:
            for batch in self._incomplete.all():
                log.debug('Waiting on produce to %s',
                          batch.produce_future.topic_partition)
                if not batch.produce_future.wait(timeout=timeout):
                    raise Errors.KafkaTimeoutError('Timeout waiting for future')
                if not batch.produce_future.is_done:
                    raise Errors.UnknownError('Future not done')

                if batch.produce_future.failed():
                    log.warning(batch.produce_future.exception)
        finally:
            self._flushes_in_progress.decrement()
Пример #8
0
    def _wait_on_metadata(self, topic, max_wait):
        """
        Wait for cluster metadata including partitions for the given topic to
        be available.

        Arguments:
            topic (str): topic we want metadata for
            max_wait (float): maximum time in secs for waiting on the metadata

        Returns:
            set: partition ids for the topic

        Raises:
            KafkaTimeoutError: if partitions for topic were not obtained before
                specified max_wait timeout
        """
        # add topic to metadata topic list if it is not there already.
        self._sender.add_topic(topic)
        begin = time.time()
        elapsed = 0.0
        metadata_event = None
        while True:
            partitions = self._metadata.partitions_for_topic(topic)
            if partitions is not None:
                return partitions

            if not metadata_event:
                metadata_event = threading.Event()

            log.debug("Requesting metadata update for topic %s", topic)

            metadata_event.clear()
            future = self._metadata.request_update()
            future.add_both(lambda e, *args: e.set(), metadata_event)
            self._sender.wakeup()
            metadata_event.wait(max_wait - elapsed)
            elapsed = time.time() - begin
            if not metadata_event.is_set():
                raise Errors.KafkaTimeoutError(
                    "Failed to update metadata after %.1f secs." %
                    (max_wait, ))
            elif topic in self._metadata.unauthorized_topics:
                raise Errors.TopicAuthorizationFailedError(topic)
            else:
                log.debug("_wait_on_metadata woke after %s secs.", elapsed)