def maybe_expire(self, request_timeout_ms, linger_ms):
     since_append_ms = 1000 * (time.time() - self.last_append)
     if ((self.records.is_full() and request_timeout_ms < since_append_ms)
             or (request_timeout_ms < (since_append_ms + linger_ms))):
         self.records.close()
         self.done(-1, Errors.KafkaTimeoutError('Batch Expired'))
         return True
     return False
Esempio n. 2
0
    def allocate(self, size, max_time_to_block_ms):
        """
        Allocate a buffer of the given size. This method blocks if there is not
        enough memory and the buffer pool is configured with blocking mode.

        Arguments:
            size (int): The buffer size to allocate in bytes [ignored]
            max_time_to_block_ms (int): The maximum time in milliseconds to
                block for buffer memory to be available

        Returns:
            io.BytesIO
        """
        with self._lock:
            # check if we have a free buffer of the right size pooled
            if self._free:
                return self._free.popleft()

            elif self._poolable_size == 0:
                return io.BytesIO()

            else:
                # we are out of buffers and will have to block
                buf = None
                more_memory = threading.Condition(self._lock)
                self._waiters.append(more_memory)
                # loop over and over until we have a buffer or have reserved
                # enough memory to allocate one
                while buf is None:
                    start_wait = time.time()
                    more_memory.wait(max_time_to_block_ms / 1000.0)
                    end_wait = time.time()
                    #this.waitTime.record(endWait - startWait, time.milliseconds());

                    if self._free:
                        buf = self._free.popleft()
                    else:
                        raise Errors.KafkaTimeoutError(
                            "Failed to allocate memory within the configured"
                            " max blocking time")

                # remove the condition for this thread to let the next thread
                # in line start getting memory
                removed = self._waiters.popleft()
                assert removed is more_memory, 'Wrong condition'

                # signal any additional waiters if there is more memory left
                # over for them
                if self._free and self._waiters:
                    self._waiters[0].notify()

                # unlock and return the buffer
                return buf
Esempio n. 3
0
    def _wait_on_metadata(self, topic, max_wait):
        """
        Wait for cluster metadata including partitions for the given topic to
        be available.

        Arguments:
            topic (str): topic we want metadata for
            max_wait (float): maximum time in secs for waiting on the metadata

        Returns:
            set: partition ids for the topic

        Raises:
            TimeoutException: if partitions for topic were not obtained before
                specified max_wait timeout
        """
        # add topic to metadata topic list if it is not there already.
        self._sender.add_topic(topic)
        partitions = self._metadata.partitions_for_topic(topic)
        if partitions:
            return partitions

        event = threading.Event()

        def event_set(*args):
            event.set()

        def request_update(self, event):
            event.clear()
            log.debug("Requesting metadata update for topic %s.", topic)
            f = self._metadata.request_update()
            f.add_both(event_set)
            return f

        begin = time.time()
        elapsed = 0.0
        future = request_update(self, event)
        while elapsed < max_wait:
            self._sender.wakeup()
            event.wait(max_wait - elapsed)
            if future.failed():
                future = request_update(self, event)
            elapsed = time.time() - begin

            partitions = self._metadata.partitions_for_topic(topic)
            if partitions:
                return partitions
        else:
            raise Errors.KafkaTimeoutError(
                "Failed to update metadata after %s secs.", max_wait)
Esempio n. 4
0
    def _wait_on_metadata(self, topic, max_wait):
        """
        Wait for cluster metadata including partitions for the given topic to
        be available.

        Arguments:
            topic (str): topic we want metadata for
            max_wait (float): maximum time in secs for waiting on the metadata

        Returns:
            set: partition ids for the topic

        Raises:
            TimeoutException: if partitions for topic were not obtained before
                specified max_wait timeout
        """
        # add topic to metadata topic list if it is not there already.
        self._sender.add_topic(topic)
        begin = time.time()
        elapsed = 0.0
        metadata_event = None
        while True:
            partitions = self._metadata.partitions_for_topic(topic)
            if partitions is not None:
                return partitions

            if not metadata_event:
                metadata_event = threading.Event()

            log.debug("Requesting metadata update for topic %s", topic)

            metadata_event.clear()
            future = self._metadata.request_update()
            future.add_both(lambda e, *args: e.set(), metadata_event)
            self._sender.wakeup()
            metadata_event.wait(max_wait - elapsed)
            elapsed = time.time() - begin
            if not metadata_event.is_set():
                raise Errors.KafkaTimeoutError(
                    "Failed to update metadata after %s secs.", max_wait)
            elif topic in self._metadata.unauthorized_topics:
                raise Errors.TopicAuthorizationFailedError(topic)
            else:
                log.debug("_wait_on_metadata woke after %s secs.", elapsed)
Esempio n. 5
0
 def get(self, timeout=None):
     if not self.is_done and not self._produce_future. await (timeout):
         raise Errors.KafkaTimeoutError(
             "Timeout after waiting for %s secs." % timeout)