Пример #1
0
    def read_many(self, start_sequence, min_count, max_count):
        """Reads a batch of items from the Ringbuffer.

        If the number of available items after the first read item is smaller than the max_count,
        these items are returned. So it could be the number of items read is smaller than the max_count.
        If there are less items available than min_count, then this call blocks. Reading a batch of items
        is likely to perform better because less overhead is involved.

        Args:
            start_sequence (int): The start_sequence of the first item to read.
            min_count (int): The minimum number of items to read.
            max_count (int): The maximum number of items to read.

        Returns:
            hazelcast.future.Future[list]: The list of read items.
        """
        check_not_negative(start_sequence, "sequence can't be smaller than 0")
        check_true(max_count >= min_count,
                   "max count should be greater or equal to min count")
        check_true(max_count < MAX_BATCH_SIZE,
                   "max count can't be greater than %d" % MAX_BATCH_SIZE)

        future = Future()
        request = ringbuffer_read_many_codec.encode_request(
            self.name, start_sequence, min_count, max_count, None)

        def handler(message):
            return ImmutableLazyDataList(
                ringbuffer_read_many_codec.decode_response(message)["items"],
                self._to_object)

        def check_capacity(capacity):
            try:
                capacity = capacity.result()
                check_true(
                    min_count <= capacity,
                    "min count: %d should be smaller or equal to capacity: %d"
                    % (min_count, capacity),
                )
                f = self._invoke(request, handler)
                f.add_done_callback(set_result)
            except Exception as e:
                future.set_exception(e)

        def set_result(f):
            try:
                future.set_result(f.result())
            except Exception as e:
                future.set_exception(e)

        self.capacity().add_done_callback(check_capacity)
        return future
Пример #2
0
    def read_many(
        self, start_sequence: int, min_count: int, max_count: int, filter: typing.Any = None
    ) -> Future[ReadResult]:
        """Reads a batch of items from the Ringbuffer.

        If the number of available items after the first read item is smaller
        than the ``max_count``, these items are returned. So it could be the
        number of items read is smaller than the ``max_count``. If there are
        less items available than ``min_count``, then this call blocks.

        Warnings:
            These blocking calls consume server memory and if there are many
            calls, it can be possible to see leaking memory or
            ``OutOfMemoryError`` s on the server.

        Reading a batch of items is likely to perform better because less
        overhead is involved.

        A filter can be provided to only select items that need to be read. If
        the filter is ``None``, all items are read. If the filter is not
        ``None``, only items where the filter function returns true are
        returned. Using  filters is a good way to prevent getting items that
        are of no value to the receiver. This reduces the amount of IO and the
        number of operations being executed, and can result in a significant
        performance improvement. Note that, filtering logic must be defined
        on the server-side.

        If the ``start_sequence`` is smaller than the smallest sequence still
        available in the Ringbuffer (:func:`head_sequence`), then the smallest
        available sequence will be used as the start sequence and the
        minimum/maximum number of items will be attempted to be read from there
        on.

        If the ``start_sequence`` is bigger than the last available sequence
        in the Ringbuffer (:func:`tail_sequence`), then the last available
        sequence plus one will be used as the start sequence and the call will
        block until further items become available and it can read at least the
        minimum number of items.

        Args:
            start_sequence: The start sequence of the first item to read.
            min_count: The minimum number of items to read.
            max_count: The maximum number of items to read.
            filter: Filter to select returned elements.

        Returns:
            The list of read items.
        """
        check_not_negative(start_sequence, "sequence can't be smaller than 0")
        check_not_negative(min_count, "min count can't be smaller than 0")
        check_true(max_count >= min_count, "max count should be greater or equal to min count")
        check_true(
            max_count < MAX_BATCH_SIZE, "max count can't be greater than %d" % MAX_BATCH_SIZE
        )
        try:
            filter_data = self._to_data(filter)
        except SchemaNotReplicatedError as e:
            return self._send_schema_and_retry(
                e, self.read_many, start_sequence, min_count, max_count, filter
            )

        request = ringbuffer_read_many_codec.encode_request(
            self.name, start_sequence, min_count, max_count, filter_data
        )

        def handler(message):
            response = ringbuffer_read_many_codec.decode_response(message)
            read_count = response["read_count"]
            next_seq = response["next_seq"]
            items = response["items"]
            item_seqs = response["item_seqs"]

            return ReadResult(read_count, next_seq, items, item_seqs, self._to_object)

        def continuation(future):
            # Since the first call to capacity
            # is cached on the client-side, doing
            # a capacity check each time should not
            # be a problem
            capacity = future.result()

            check_true(
                max_count <= capacity,
                "max count: %d should be smaller or equal to capacity: %d" % (max_count, capacity),
            )

            return self._invoke(request, handler)

        return self.capacity().continue_with(continuation)