Example #1
0
    def __init__(self, target, pipeline, full_document, resume_after,
                 max_await_time_ms, batch_size, collation,
                 start_at_operation_time, session):
        if pipeline is None:
            pipeline = []
        elif not isinstance(pipeline, list):
            raise TypeError("pipeline must be a list")

        common.validate_string_or_none('full_document', full_document)
        validate_collation_or_none(collation)
        common.validate_non_negative_integer_or_none("batchSize", batch_size)

        self._decode_custom = False
        self._orig_codec_options = target.codec_options
        if target.codec_options.type_registry._decoder_map:
            self._decode_custom = True
            # Keep the type registry so that we support encoding custom types
            # in the pipeline.
            self._target = target.with_options(
                codec_options=target.codec_options.with_options(
                    document_class=RawBSONDocument))
        else:
            self._target = target

        self._pipeline = copy.deepcopy(pipeline)
        self._full_document = full_document
        self._resume_token = copy.deepcopy(resume_after)
        self._max_await_time_ms = max_await_time_ms
        self._batch_size = batch_size
        self._collation = collation
        self._start_at_operation_time = start_at_operation_time
        self._session = session
        self._cursor = self._create_cursor()
    def __init__(self, target, pipeline, full_document, resume_after,
                 max_await_time_ms, batch_size, collation,
                 start_at_operation_time, session):
        if pipeline is None:
            pipeline = []
        elif not isinstance(pipeline, list):
            raise TypeError("pipeline must be a list")

        common.validate_string_or_none('full_document', full_document)
        validate_collation_or_none(collation)
        common.validate_non_negative_integer_or_none("batchSize", batch_size)

        self._decode_custom = False
        self._orig_codec_options = target.codec_options
        if target.codec_options.type_registry._decoder_map:
            self._decode_custom = True
            # Keep the type registry so that we support encoding custom types
            # in the pipeline.
            self._target = target.with_options(
                codec_options=target.codec_options.with_options(
                    document_class=RawBSONDocument))
        else:
            self._target = target

        self._pipeline = copy.deepcopy(pipeline)
        self._full_document = full_document
        self._resume_token = copy.deepcopy(resume_after)
        self._max_await_time_ms = max_await_time_ms
        self._batch_size = batch_size
        self._collation = collation
        self._start_at_operation_time = start_at_operation_time
        self._session = session
        self._cursor = self._create_cursor()
    def __init__(
        self,
        target: Union["MongoClient[_DocumentType]", "Database[_DocumentType]",
                      "Collection[_DocumentType]"],
        pipeline: Optional[_Pipeline],
        full_document: Optional[str],
        resume_after: Optional[Mapping[str, Any]],
        max_await_time_ms: Optional[int],
        batch_size: Optional[int],
        collation: Optional[_CollationIn],
        start_at_operation_time: Optional[Timestamp],
        session: Optional["ClientSession"],
        start_after: Optional[Mapping[str, Any]],
        comment: Optional[Any] = None,
    ) -> None:
        if pipeline is None:
            pipeline = []
        pipeline = common.validate_list("pipeline", pipeline)
        common.validate_string_or_none("full_document", full_document)
        validate_collation_or_none(collation)
        common.validate_non_negative_integer_or_none("batchSize", batch_size)

        self._decode_custom = False
        self._orig_codec_options = target.codec_options
        if target.codec_options.type_registry._decoder_map:
            self._decode_custom = True
            # Keep the type registry so that we support encoding custom types
            # in the pipeline.
            self._target = target.with_options(  # type: ignore
                codec_options=target.codec_options.with_options(
                    document_class=RawBSONDocument))
        else:
            self._target = target

        self._pipeline = copy.deepcopy(pipeline)
        self._full_document = full_document
        self._uses_start_after = start_after is not None
        self._uses_resume_after = resume_after is not None
        self._resume_token = copy.deepcopy(start_after or resume_after)
        self._max_await_time_ms = max_await_time_ms
        self._batch_size = batch_size
        self._collation = collation
        self._start_at_operation_time = start_at_operation_time
        self._session = session
        self._comment = comment
        # Initialize cursor.
        self._cursor = self._create_cursor()
    def __init__(
        self,
        target,
        cursor_class,
        pipeline,
        options,
        explicit_session,
        let=None,
        user_fields=None,
        result_processor=None,
        comment=None,
    ):
        if "explain" in options:
            raise ConfigurationError(
                "The explain option is not supported. Use Database.command instead."
            )

        self._target = target

        pipeline = common.validate_list("pipeline", pipeline)
        self._pipeline = pipeline
        self._performs_write = False
        if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]):
            self._performs_write = True

        common.validate_is_mapping("options", options)
        if let is not None:
            common.validate_is_mapping("let", let)
            options["let"] = let
        if comment is not None:
            options["comment"] = comment
        self._options = options

        # This is the batchSize that will be used for setting the initial
        # batchSize for the cursor, as well as the subsequent getMores.
        self._batch_size = common.validate_non_negative_integer_or_none(
            "batchSize", self._options.pop("batchSize", None))

        # If the cursor option is already specified, avoid overriding it.
        self._options.setdefault("cursor", {})
        # If the pipeline performs a write, we ignore the initial batchSize
        # since the server doesn't return results in this case.
        if self._batch_size is not None and not self._performs_write:
            self._options["cursor"]["batchSize"] = self._batch_size

        self._cursor_class = cursor_class
        self._explicit_session = explicit_session
        self._user_fields = user_fields
        self._result_processor = result_processor

        self._collation = validate_collation_or_none(
            options.pop("collation", None))

        self._max_await_time_ms = options.pop("maxAwaitTimeMS", None)
        self._write_preference = None
Example #5
0
    def __init__(self, target, pipeline, full_document, resume_after,
                 max_await_time_ms, batch_size, collation,
                 start_at_operation_time, session):
        if pipeline is None:
            pipeline = []
        elif not isinstance(pipeline, list):
            raise TypeError("pipeline must be a list")

        common.validate_string_or_none('full_document', full_document)
        validate_collation_or_none(collation)
        common.validate_non_negative_integer_or_none("batchSize", batch_size)

        self._target = target
        self._pipeline = copy.deepcopy(pipeline)
        self._full_document = full_document
        self._resume_token = copy.deepcopy(resume_after)
        self._max_await_time_ms = max_await_time_ms
        self._batch_size = batch_size
        self._collation = collation
        self._start_at_operation_time = start_at_operation_time
        self._session = session
        self._cursor = self._create_cursor()
Example #6
0
    def __init__(self, collection, filter=None, projection=None, skip=0,
                 limit=0, no_cursor_timeout=False,
                 cursor_type=CursorType.NON_TAILABLE,
                 sort=None, allow_partial_results=False, oplog_replay=False,
                 modifiers=None, batchSize=None, batch_size=0, manipulate=True):
        """Create a new cursor.

        Should not be called directly by application developers - see
        :meth:`~pymongo.collection.Collection.find` instead.

        .. mongodoc:: cursors
        """
        self.__id = None

        spec = filter
        if spec is None:
            spec = {}

        validate_is_mapping("filter", spec)
        if not isinstance(skip, int):
            raise TypeError("skip must be an instance of int")
        if not isinstance(limit, int):
            raise TypeError("limit must be an instance of int")
        validate_boolean("no_cursor_timeout", no_cursor_timeout)
        if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE,
                               CursorType.TAILABLE_AWAIT, CursorType.EXHAUST):
            raise ValueError("not a valid value for cursor_type")
        validate_boolean("allow_partial_results", allow_partial_results)
        validate_boolean("oplog_replay", oplog_replay)
        if modifiers is not None:
            validate_is_mapping("modifiers", modifiers)
        batchSize = validate_positive_integer_or_none("batchSize", batchSize)
        batch_size = validate_non_negative_integer_or_none(
            "batch_size", batch_size)
        if batchSize is not None:
            batch_size = batchSize

        if projection is not None:
            if not projection:
                projection = {"_id": 1}
            projection = helpers._fields_list_to_dict(projection, "projection")

        self.__collection = collection
        self.__spec = spec
        self.__projection = projection
        self.__skip = skip
        self.__limit = limit
        self.__batch_size = batch_size
        self.__modifiers = modifiers and modifiers.copy() or {}
        self.__ordering = sort and helpers._index_document(sort) or None
        self.__max_scan = None
        self.__explain = False
        self.__hint = None
        self.__comment = None
        self.__max_time_ms = None
        self.__max_await_time_ms = None
        self.__max = None
        self.__min = None
        self.__manipulate = manipulate

        # Exhaust cursor support
        self.__exhaust = False
        self.__exhaust_mgr = None
        if cursor_type == CursorType.EXHAUST:
            if self.__collection.database.client.is_mongos:
                raise InvalidOperation('Exhaust cursors are '
                                       'not supported by mongos')
            if limit:
                raise InvalidOperation("Can't use limit and exhaust together.")
            self.__exhaust = True

        # This is ugly. People want to be able to do cursor[5:5] and
        # get an empty result set (old behavior was an
        # exception). It's hard to do that right, though, because the
        # server uses limit(0) to mean 'no limit'. So we set __empty
        # in that case and check for it when iterating. We also unset
        # it anytime we change __limit.
        self.__empty = False

        self.__data = deque()
        self.__address = None
        self.__retrieved = 0
        self.__killed = False

        self.__codec_options = collection.codec_options
        self.__read_preference = collection.read_preference
        self.__read_concern = collection.read_concern

        self.__query_flags = cursor_type
        if self.__read_preference != ReadPreference.PRIMARY:
            self.__query_flags |= _QUERY_OPTIONS["slave_okay"]
        if no_cursor_timeout:
            self.__query_flags |= _QUERY_OPTIONS["no_timeout"]
        if allow_partial_results:
            self.__query_flags |= _QUERY_OPTIONS["partial"]
        if oplog_replay:
            self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]