Пример #1
0
    def _update(self, filter, update, upsert, multi, **kwargs):
        validate_is_mapping("filter", filter)
        validate_boolean("upsert", upsert)

        if self.write_concern.acknowledged:
            updates = [
                SON([('q', filter), ('u', update), ("upsert", upsert),
                     ("multi", multi)])
            ]

            command = SON([("update", self._collection_name),
                           ("updates", updates),
                           ("writeConcern", self.write_concern.document)])
            raw_response = yield self._database.command(command, **kwargs)
            _check_write_command_response([[0, raw_response]])

            # Extract upserted_id from returned array
            if raw_response.get("upserted"):
                raw_response["upserted"] = raw_response["upserted"][0]["_id"]

        else:
            yield self.update(filter,
                              update,
                              upsert=upsert,
                              multi=multi,
                              **kwargs)
            raw_response = None

        defer.returnValue(raw_response)
Пример #2
0
    def _new_find_and_modify(self, filter, projection, sort, upsert=None,
                             return_document=ReturnDocument.BEFORE, **kwargs):
        validate_is_mapping("filter", filter)
        if not isinstance(return_document, bool):
            raise ValueError("TxMongo: return_document must be ReturnDocument.BEFORE "
                             "or ReturnDocument.AFTER")

        cmd = SON([("findAndModify", self._collection_name),
                   ("query", filter),
                   ("new", return_document)])
        cmd.update(kwargs)

        if projection is not None:
            cmd["fields"] = self._normalize_fields_projection(projection)

        if sort is not None:
            cmd["sort"] = dict(sort["orderby"])
        if upsert is not None:
            validate_boolean("upsert", upsert)
            cmd["upsert"] = upsert

        no_obj_error = "No matching object found"

        result = yield self._database.command(cmd, allowable_errors=[no_obj_error], **kwargs)
        defer.returnValue(result.get("value"))
Пример #3
0
    def _new_find_and_modify(self, filter, projection, sort, upsert=None,
                             return_document=ReturnDocument.BEFORE, **kwargs):
        validate_is_mapping("filter", filter)
        if not isinstance(return_document, bool):
            raise ValueError("TxMongo: return_document must be ReturnDocument.BEFORE "
                             "or ReturnDocument.AFTER")

        cmd = SON([("findAndModify", self._collection_name),
                   ("query", filter),
                   ("new", return_document)])
        cmd.update(kwargs)

        if projection is not None:
            cmd["fields"] = self._normalize_fields_projection(projection)

        if sort is not None:
            cmd["sort"] = dict(sort["orderby"])
        if upsert is not None:
            validate_boolean("upsert", upsert)
            cmd["upsert"] = upsert

        no_obj_error = "No matching object found"

        result = yield self._database.command(cmd, allowable_errors=[no_obj_error], **kwargs)
        defer.returnValue(result.get("value"))
Пример #4
0
    def __init__(self, filter, replacement, upsert=False, collation=None):
        """Create a ReplaceOne instance.

        For use with :meth:`~pymongo.collection.Collection.bulk_write`.

        :Parameters:
          - `filter`: A query that matches the document to replace.
          - `replacement`: The new document.
          - `upsert` (optional): If ``True``, perform an insert if no documents
            match the filter.
          - `collation` (optional): An instance of
            :class:`~pymongo.collation.Collation`. This option is only
            supported on MongoDB 3.4 and above.

        .. versionchanged:: 3.5
           Added the `collation` option.
        """
        if filter is not None:
            validate_is_mapping("filter", filter)
        if upsert is not None:
            validate_boolean("upsert", upsert)
        self._filter = filter
        self._doc = replacement
        self._upsert = upsert
        self._collation = collation
Пример #5
0
    def __init__(self, filter, collation=None, hint=None):
        """Create a DeleteOne instance.

        For use with :meth:`~pymongo.collection.Collection.bulk_write`.

        :Parameters:
          - `filter`: A query that matches the document to delete.
          - `collation` (optional): An instance of
            :class:`~pymongo.collation.Collation`. This option is only
            supported on MongoDB 3.4 and above.
          - `hint` (optional): An index to use to support the query
            predicate specified either by its string name, or in the same
            format as passed to
            :meth:`~pymongo.collection.Collection.create_index` (e.g.
            ``[('field', ASCENDING)]``). This option is only supported on
            MongoDB 4.4 and above.

        .. versionchanged:: 3.11
           Added the ``hint`` option.
        .. versionchanged:: 3.5
           Added the `collation` option.
        """
        if filter is not None:
            validate_is_mapping("filter", filter)
        if hint is not None:
            if not isinstance(hint, string_type):
                hint = helpers._index_document(hint)
        self._filter = filter
        self._collation = collation
        self._hint = hint
Пример #6
0
    def update(self,
               spec,
               document,
               upsert=False,
               manipulate=False,
               multi=False,
               check_keys=True,
               **kwargs):

        warnings.warn(
            "update is deprecated. Use replace_one, update_one or "
            "update_many instead.",
            DeprecationWarning,
            stacklevel=2)
        common.validate_is_mapping("spec", spec)
        common.validate_is_mapping("document", document)
        if document:

            first = next(iter(document))
            if first.startswith('$'):
                check_keys = False

        write_concern = None
        collation = validate_collation_or_none(kwargs.pop('collation', None))
        if kwargs:
            write_concern = WriteConcern(**kwargs)
        return self._update_retryable(spec,
                                      document,
                                      upsert,
                                      check_keys,
                                      multi,
                                      manipulate,
                                      write_concern,
                                      collation=collation)
Пример #7
0
    def __init__(self, filter, replacement, upsert=False, collation=None):
        """Create a ReplaceOne instance.

        For use with :meth:`~pymongo.collection.Collection.bulk_write`.

        :Parameters:
          - `filter`: A query that matches the document to replace.
          - `replacement`: The new document.
          - `upsert` (optional): If ``True``, perform an insert if no documents
            match the filter.
          - `collation` (optional): An instance of
            :class:`~pymongo.collation.Collation`. This option is only
            supported on MongoDB 3.4 and above.

        .. versionchanged:: 3.5
           Added the `collation` option.
        """
        if filter is not None:
            validate_is_mapping("filter", filter)
        if upsert is not None:
            validate_boolean("upsert", upsert)
        self._filter = filter
        self._doc = replacement
        self._upsert = upsert
        self._collation = collation
Пример #8
0
 def __init__(self, filter=None, doc=None, upsert=None):
     if filter is not None:
         validate_is_mapping("filter", filter)
     if upsert is not None:
         validate_boolean("upsert", upsert)
     self._filter = filter
     self._doc = doc
     self._upsert = upsert
Пример #9
0
 def __init__(self, filter=None, doc=None, upsert=None):
     if filter is not None:
         validate_is_mapping("filter", filter)
     if upsert is not None:
         validate_boolean("upsert", upsert)
     self._filter = filter
     self._doc = doc
     self._upsert = upsert
Пример #10
0
    def _delete(self, sock_info, criteria, multi, write_concern=None):
        """Internal delete helper."""
        common.validate_is_mapping("filter", criteria)
        write_concern = write_concern or self.write_concern
        acknowledged = write_concern.acknowledged
        delete_doc = SON([('q', criteria), ('limit', int(not multi))])

        return result
Пример #11
0
 def __init__(self, filter, doc, upsert, collation):
     if filter is not None:
         validate_is_mapping("filter", filter)
     if upsert is not None:
         validate_boolean("upsert", upsert)
     self._filter = filter
     self._doc = doc
     self._upsert = upsert
     self._collation = collation
Пример #12
0
    def execute(self, write_concern=None):
        """Execute all provided operations.

        :Parameters:
          - write_concern (optional): the write concern for this bulk
            execution.
        """
        if write_concern is not None:
            validate_is_mapping("write_concern", write_concern)
        return self.__bulk.execute(write_concern)
Пример #13
0
    async def execute(self, write_concern: Optional[dict] = None) -> dict:
        """Execute all provided operations.

        :Parameters:
          - write_concern: the write concern for this bulk
            execution.
        """
        if write_concern is not None:
            validate_is_mapping('write_concern', write_concern)
        return await self.__bulk.execute(write_concern)
Пример #14
0
    def execute(self, write_concern=None):
        """Execute all provided operations.

        :Parameters:
          - write_concern (optional): the write concern for this bulk
            execution.
        """
        if write_concern is not None:
            validate_is_mapping("write_concern", write_concern)
        return self.__bulk.execute(write_concern)
    def __init__(
        self,
        target,
        cursor_class,
        pipeline,
        options,
        explicit_session,
        let=None,
        user_fields=None,
        result_processor=None,
        comment=None,
    ):
        if "explain" in options:
            raise ConfigurationError(
                "The explain option is not supported. Use Database.command instead."
            )

        self._target = target

        pipeline = common.validate_list("pipeline", pipeline)
        self._pipeline = pipeline
        self._performs_write = False
        if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]):
            self._performs_write = True

        common.validate_is_mapping("options", options)
        if let is not None:
            common.validate_is_mapping("let", let)
            options["let"] = let
        if comment is not None:
            options["comment"] = comment
        self._options = options

        # This is the batchSize that will be used for setting the initial
        # batchSize for the cursor, as well as the subsequent getMores.
        self._batch_size = common.validate_non_negative_integer_or_none(
            "batchSize", self._options.pop("batchSize", None))

        # If the cursor option is already specified, avoid overriding it.
        self._options.setdefault("cursor", {})
        # If the pipeline performs a write, we ignore the initial batchSize
        # since the server doesn't return results in this case.
        if self._batch_size is not None and not self._performs_write:
            self._options["cursor"]["batchSize"] = self._batch_size

        self._cursor_class = cursor_class
        self._explicit_session = explicit_session
        self._user_fields = user_fields
        self._result_processor = result_processor

        self._collation = validate_collation_or_none(
            options.pop("collation", None))

        self._max_await_time_ms = options.pop("maxAwaitTimeMS", None)
        self._write_preference = None
Пример #16
0
 def __init__(self, filter, doc, upsert, collation, array_filters):
     if filter is not None:
         validate_is_mapping("filter", filter)
     if upsert is not None:
         validate_boolean("upsert", upsert)
     if array_filters is not None:
         validate_list("array_filters", array_filters)
     self._filter = filter
     self._doc = doc
     self._upsert = upsert
     self._collation = collation
     self._array_filters = array_filters
Пример #17
0
 def __init__(self, filter, doc, upsert, collation, array_filters):
     if filter is not None:
         validate_is_mapping("filter", filter)
     if upsert is not None:
         validate_boolean("upsert", upsert)
     if array_filters is not None:
         validate_list("array_filters", array_filters)
     self._filter = filter
     self._doc = doc
     self._upsert = upsert
     self._collation = collation
     self._array_filters = array_filters
Пример #18
0
    def find(self, selector):
        """Specify selection criteria for bulk operations.

        :Parameters:
          - `selector` (dict): the selection criteria for update
            and remove operations.

        :Returns:
          - A :class:`BulkWriteOperation` instance, used to add
            update and remove operations to this bulk operation.
        """
        validate_is_mapping("selector", selector)
        return BulkWriteOperation(selector, self.__bulk)
Пример #19
0
    def find(self, selector):
        """Specify selection criteria for bulk operations.

        :Parameters:
          - `selector` (dict): the selection criteria for update
            and remove operations.

        :Returns:
          - A :class:`BulkWriteOperation` instance, used to add
            update and remove operations to this bulk operation.
        """
        validate_is_mapping("selector", selector)
        return BulkWriteOperation(selector, self.__bulk)
Пример #20
0
    def list_collection_names(
        self,
        session: Optional["ClientSession"] = None,
        filter: Optional[Mapping[str, Any]] = None,
        comment: Optional[Any] = None,
        **kwargs: Any,
    ) -> List[str]:
        """Get a list of all the collection names in this database.

        For example, to list all non-system collections::

            filter = {"name": {"$regex": r"^(?!system\\.)"}}
            db.list_collection_names(filter=filter)

        :Parameters:
          - `session` (optional): a
            :class:`~pymongo.client_session.ClientSession`.
          - `filter` (optional):  A query document to filter the list of
            collections returned from the listCollections command.
          - `comment` (optional): A user-provided comment to attach to this
            command.
          - `**kwargs` (optional): Optional parameters of the
            `listCollections command
            <https://docs.mongodb.com/manual/reference/command/listCollections/>`_
            can be passed as keyword arguments to this method. The supported
            options differ by server version.


        .. versionchanged:: 3.8
           Added the ``filter`` and ``**kwargs`` parameters.

        .. versionadded:: 3.6
        """
        if comment is not None:
            kwargs["comment"] = comment
        if filter is None:
            kwargs["nameOnly"] = True

        else:
            # The enumerate collections spec states that "drivers MUST NOT set
            # nameOnly if a filter specifies any keys other than name."
            common.validate_is_mapping("filter", filter)
            kwargs["filter"] = filter
            if not filter or (len(filter) == 1 and "name" in filter):
                kwargs["nameOnly"] = True

        return [
            result["name"]
            for result in self.list_collections(session=session, **kwargs)
        ]
Пример #21
0
    def _delete(self, filter, multi, **kwargs):
        validate_is_mapping("filter", filter)

        if self.write_concern.acknowledged:
            deletes = [SON([('q', filter), ("limit", 0 if multi else 1)])]
            command = SON([("delete", self._collection_name),
                           ("deletes", deletes),
                           ("writeConcern", self.write_concern.document)])

            raw_response = yield self._database.command(command, **kwargs)
            _check_write_command_response([[0, raw_response]])

        else:
            yield self.remove(filter, single=not multi, **kwargs)
            raw_response = None

        defer.returnValue(raw_response)
Пример #22
0
    def _delete(self, filter, multi):
        validate_is_mapping("filter", filter)

        if self.write_concern.acknowledged:
            deletes = [SON([("q", filter), ("limit", 0 if multi else 1)])]
            command = SON(
                [("delete", self._collection_name), ("deletes", deletes), ("writeConcern", self.write_concern.document)]
            )

            raw_response = yield self._database.command(command)
            _check_write_command_response([[0, raw_response]])

        else:
            yield self.remove(filter, single=not multi)
            raw_response = None

        defer.returnValue(raw_response)
Пример #23
0
    def __init__(self, filter, doc, upsert, collation, array_filters, hint):
        if filter is not None:
            validate_is_mapping("filter", filter)
        if upsert is not None:
            validate_boolean("upsert", upsert)
        if array_filters is not None:
            validate_list("array_filters", array_filters)
        if hint is not None:
            if not isinstance(hint, string_type):
                hint = helpers._index_document(hint)

        self._filter = filter
        self._doc = doc
        self._upsert = upsert
        self._collation = collation
        self._array_filters = array_filters
        self._hint = hint
Пример #24
0
    def __init__(self, filter, collation=None):
        """Create a DeleteMany instance.

        For use with :meth:`~pymongo.collection.Collection.bulk_write`.

        :Parameters:
          - `filter`: A query that matches the documents to delete.
          - `collation` (optional): An instance of
            :class:`~pymongo.collation.Collation`. This option is only
            supported on MongoDB 3.4 and above.

        .. versionchanged:: 3.5
           Added the `collation` option.
        """
        if filter is not None:
            validate_is_mapping("filter", filter)
        self._filter = filter
        self._collation = collation
Пример #25
0
    def __init__(self, filter, collation=None):
        """Create a DeleteMany instance.

        For use with :meth:`~pymongo.collection.Collection.bulk_write`.

        :Parameters:
          - `filter`: A query that matches the documents to delete.
          - `collation` (optional): An instance of
            :class:`~pymongo.collation.Collation`. This option is only
            supported on MongoDB 3.4 and above.

        .. versionchanged:: 3.5
           Added the `collation` option.
        """
        if filter is not None:
            validate_is_mapping("filter", filter)
        self._filter = filter
        self._collation = collation
Пример #26
0
    def find(self,
             selector: dict,
             collation: Optional[Collation] = None) -> BulkWriteOperation:
        """Specify selection criteria for bulk operations.

        :Parameters:
          - `selector`: the selection criteria for update
            and remove operations.
          - `collation` (optional): An instance of
            :class:`~pymongo.collation.Collation`. This option is only supported
            on MongoDB 3.4 and above.

        :Returns:
          - A :class:`BulkWriteOperation` instance, used to add
            update and remove operations to this bulk operation.
        """
        validate_is_mapping('selector', selector)
        return BulkWriteOperation(selector, self.__bulk, collation)
Пример #27
0
    def __init__(
        self,
        filter: Mapping[str, Any],
        replacement: Mapping[str, Any],
        upsert: bool = False,
        collation: Optional[_CollationIn] = None,
        hint: Optional[_IndexKeyHint] = None,
    ) -> None:
        """Create a ReplaceOne instance.

        For use with :meth:`~pymongo.collection.Collection.bulk_write`.

        :Parameters:
          - `filter`: A query that matches the document to replace.
          - `replacement`: The new document.
          - `upsert` (optional): If ``True``, perform an insert if no documents
            match the filter.
          - `collation` (optional): An instance of
            :class:`~pymongo.collation.Collation`.
          - `hint` (optional): An index to use to support the query
            predicate specified either by its string name, or in the same
            format as passed to
            :meth:`~pymongo.collection.Collection.create_index` (e.g.
            ``[('field', ASCENDING)]``). This option is only supported on
            MongoDB 4.2 and above.

        .. versionchanged:: 3.11
           Added the ``hint`` option.
        .. versionchanged:: 3.5
           Added the ``collation`` option.
        """
        if filter is not None:
            validate_is_mapping("filter", filter)
        if upsert is not None:
            validate_boolean("upsert", upsert)
        if hint is not None:
            if not isinstance(hint, str):
                hint = helpers._index_document(hint)

        self._filter = filter
        self._doc = replacement
        self._upsert = upsert
        self._collation = collation
        self._hint = hint
Пример #28
0
    def find(self, selector, collation=None):
        """Specify selection criteria for bulk operations.

        :Parameters:
          - `selector` (dict): the selection criteria for update
            and remove operations.
          - `collation` (optional): An instance of
            :class:`~pymongo.collation.Collation`. This option is only supported
            on MongoDB 3.4 and above.

        :Returns:
          - A :class:`BulkWriteOperation` instance, used to add
            update and remove operations to this bulk operation.

        .. versionchanged:: 3.4
           Added the `collation` option.

        """
        validate_is_mapping("selector", selector)
        return BulkWriteOperation(selector, self.__bulk, collation)
Пример #29
0
    def find(self, selector, collation=None):
        """Specify selection criteria for bulk operations.

        :Parameters:
          - `selector` (dict): the selection criteria for update
            and remove operations.
          - `collation` (optional): An instance of
            :class:`~pymongo.collation.Collation`. This option is only
            supported on MongoDB 3.4 and above.

        :Returns:
          - A :class:`BulkWriteOperation` instance, used to add
            update and remove operations to this bulk operation.

        .. versionchanged:: 3.4
           Added the `collation` option.

        """
        validate_is_mapping("selector", selector)
        return BulkWriteOperation(selector, self.__bulk, collation)
Пример #30
0
    def _update(self, filter, update, upsert, multi):
        validate_is_mapping("filter", filter)
        validate_boolean("upsert", upsert)

        if self.write_concern.acknowledged:
            updates = [SON([("q", filter), ("u", update), ("upsert", upsert), ("multi", multi)])]

            command = SON(
                [("update", self._collection_name), ("updates", updates), ("writeConcern", self.write_concern.document)]
            )
            raw_response = yield self._database.command(command)
            _check_write_command_response([[0, raw_response]])

            # Extract upserted_id from returned array
            if raw_response.get("upserted"):
                raw_response["upserted"] = raw_response["upserted"][0]["_id"]

        else:
            yield self.update(filter, update, upsert=upsert, multi=multi)
            raw_response = None

        defer.returnValue(raw_response)
Пример #31
0
    def __init__(self,
                 collection,
                 filter=None,
                 projection=None,
                 skip=0,
                 limit=0,
                 no_cursor_timeout=False,
                 cursor_type=CursorType.NON_TAILABLE,
                 sort=None,
                 allow_partial_results=False,
                 oplog_replay=False,
                 modifiers=None,
                 batch_size=0,
                 manipulate=True):
        """Create a new cursor.

        Should not be called directly by application developers - see
        :meth:`~pymongo.collection.Collection.find` instead.

        .. mongodoc:: cursors
        """
        self.__id = None

        spec = filter
        if spec is None:
            spec = {}

        validate_is_mapping("filter", spec)
        if not isinstance(skip, int):
            raise TypeError("skip must be an instance of int")
        if not isinstance(limit, int):
            raise TypeError("limit must be an instance of int")
        validate_boolean("no_cursor_timeout", no_cursor_timeout)
        if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE,
                               CursorType.TAILABLE_AWAIT, CursorType.EXHAUST):
            raise ValueError("not a valid value for cursor_type")
        validate_boolean("allow_partial_results", allow_partial_results)
        validate_boolean("oplog_replay", oplog_replay)
        if modifiers is not None:
            validate_is_mapping("modifiers", modifiers)
        if not isinstance(batch_size, integer_types):
            raise TypeError("batch_size must be an integer")
        if batch_size < 0:
            raise ValueError("batch_size must be >= 0")

        if projection is not None:
            if not projection:
                projection = {"_id": 1}
            projection = helpers._fields_list_to_dict(projection, "projection")

        self.__collection = collection
        self.__spec = spec
        self.__projection = projection
        self.__skip = skip
        self.__limit = limit
        self.__batch_size = batch_size
        self.__modifiers = modifiers and modifiers.copy() or {}
        self.__ordering = sort and helpers._index_document(sort) or None
        self.__max_scan = None
        self.__explain = False
        self.__hint = None
        self.__comment = None
        self.__max_time_ms = None
        self.__max_await_time_ms = None
        self.__max = None
        self.__min = None
        self.__manipulate = manipulate

        # Exhaust cursor support
        self.__exhaust = False
        self.__exhaust_mgr = None
        if cursor_type == CursorType.EXHAUST:
            if self.__collection.database.client.is_mongos:
                raise InvalidOperation('Exhaust cursors are '
                                       'not supported by mongos')
            if limit:
                raise InvalidOperation("Can't use limit and exhaust together.")
            self.__exhaust = True

        # This is ugly. People want to be able to do cursor[5:5] and
        # get an empty result set (old behavior was an
        # exception). It's hard to do that right, though, because the
        # server uses limit(0) to mean 'no limit'. So we set __empty
        # in that case and check for it when iterating. We also unset
        # it anytime we change __limit.
        self.__empty = False

        self.__data = deque()
        self.__address = None
        self.__retrieved = 0
        self.__killed = False

        self.__codec_options = collection.codec_options
        self.__read_preference = collection.read_preference
        self.__read_concern = collection.read_concern

        self.__query_flags = cursor_type
        if self.__read_preference != ReadPreference.PRIMARY:
            self.__query_flags |= _QUERY_OPTIONS["slave_okay"]
        if no_cursor_timeout:
            self.__query_flags |= _QUERY_OPTIONS["no_timeout"]
        if allow_partial_results:
            self.__query_flags |= _QUERY_OPTIONS["partial"]
        if oplog_replay:
            self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]
Пример #32
0
    def __init__(self,
                 collection,
                 filter=None,
                 projection=None,
                 skip=0,
                 limit=0,
                 no_cursor_timeout=False,
                 cursor_type=CursorType.NON_TAILABLE,
                 sort=None,
                 allow_partial_results=False,
                 oplog_replay=False,
                 modifiers=None,
                 batch_size=0,
                 collation=None,
                 hint=None,
                 max_scan=None,
                 max_time_ms=None,
                 max=None,
                 min=None,
                 return_key=False,
                 show_record_id=False,
                 snapshot=False,
                 comment=None,
                 session=None,
                 allow_disk_use=None):
        """Create a new cursor.

        Should not be called directly by application developers - see
        :meth:`~pymongo.collection.Collection.find` instead.

        .. mongodoc:: cursors
        """
        # Initialize all attributes used in __del__ before possibly raising
        # an error to avoid attribute errors during garbage collection.
        self.__id = None
        self.__exhaust = False
        self.__exhaust_mgr = None
        self.__killed = False

        if session:
            self.__session = session
            self.__explicit_session = True
        else:
            self.__session = None
            self.__explicit_session = False

        spec = filter
        if spec is None:
            spec = {}

        validate_is_mapping("filter", spec)
        if not isinstance(skip, int):
            raise TypeError("skip must be an instance of int")
        if not isinstance(limit, int):
            raise TypeError("limit must be an instance of int")
        validate_boolean("no_cursor_timeout", no_cursor_timeout)
        if no_cursor_timeout and not self.__explicit_session:
            warnings.warn(
                "use an explicit session with no_cursor_timeout=True "
                "otherwise the cursor may still timeout after "
                "30 minutes, for more info see "
                "https://docs.mongodb.com/v4.4/reference/method/"
                "cursor.noCursorTimeout/"
                "#session-idle-timeout-overrides-nocursortimeout",
                UserWarning,
                stacklevel=2)
        if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE,
                               CursorType.TAILABLE_AWAIT, CursorType.EXHAUST):
            raise ValueError("not a valid value for cursor_type")
        validate_boolean("allow_partial_results", allow_partial_results)
        validate_boolean("oplog_replay", oplog_replay)
        if modifiers is not None:
            warnings.warn("the 'modifiers' parameter is deprecated",
                          DeprecationWarning,
                          stacklevel=2)
            validate_is_mapping("modifiers", modifiers)
        if not isinstance(batch_size, int):
            raise TypeError("batch_size must be an integer")
        if batch_size < 0:
            raise ValueError("batch_size must be >= 0")
        # Only set if allow_disk_use is provided by the user, else None.
        if allow_disk_use is not None:
            allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use)

        if projection is not None:
            if not projection:
                projection = {"_id": 1}
            projection = helpers._fields_list_to_dict(projection, "projection")

        self.__collection = collection
        self.__spec = spec
        self.__projection = projection
        self.__skip = skip
        self.__limit = limit
        self.__batch_size = batch_size
        self.__modifiers = modifiers and modifiers.copy() or {}
        self.__ordering = sort and helpers._index_document(sort) or None
        self.__max_scan = max_scan
        self.__explain = False
        self.__comment = comment
        self.__max_time_ms = max_time_ms
        self.__max_await_time_ms = None
        self.__max = max
        self.__min = min
        self.__collation = validate_collation_or_none(collation)
        self.__return_key = return_key
        self.__show_record_id = show_record_id
        self.__allow_disk_use = allow_disk_use
        self.__snapshot = snapshot
        self.__set_hint(hint)

        # Exhaust cursor support
        if cursor_type == CursorType.EXHAUST:
            if self.__collection.database.client.is_mongos:
                raise InvalidOperation('Exhaust cursors are '
                                       'not supported by mongos')
            if limit:
                raise InvalidOperation("Can't use limit and exhaust together.")
            self.__exhaust = True

        # This is ugly. People want to be able to do cursor[5:5] and
        # get an empty result set (old behavior was an
        # exception). It's hard to do that right, though, because the
        # server uses limit(0) to mean 'no limit'. So we set __empty
        # in that case and check for it when iterating. We also unset
        # it anytime we change __limit.
        self.__empty = False

        self.__data = deque()
        self.__address = None
        self.__retrieved = 0

        self.__codec_options = collection.codec_options
        # Read preference is set when the initial find is sent.
        self.__read_preference = None
        self.__read_concern = collection.read_concern

        self.__query_flags = cursor_type
        if no_cursor_timeout:
            self.__query_flags |= _QUERY_OPTIONS["no_timeout"]
        if allow_partial_results:
            self.__query_flags |= _QUERY_OPTIONS["partial"]
        if oplog_replay:
            self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]

        # The namespace to use for find/getMore commands.
        self.__dbname = collection.database.name
        self.__collname = collection.name
Пример #33
0
    def __init__(
        self,
        collection: "Collection[_DocumentType]",
        filter: Optional[Mapping[str, Any]] = None,
        projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None,
        skip: int = 0,
        limit: int = 0,
        no_cursor_timeout: bool = False,
        cursor_type: int = CursorType.NON_TAILABLE,
        sort: Optional[_Sort] = None,
        allow_partial_results: bool = False,
        oplog_replay: bool = False,
        batch_size: int = 0,
        collation: Optional[_CollationIn] = None,
        hint: Optional[_Hint] = None,
        max_scan: Optional[int] = None,
        max_time_ms: Optional[int] = None,
        max: Optional[_Sort] = None,
        min: Optional[_Sort] = None,
        return_key: Optional[bool] = None,
        show_record_id: Optional[bool] = None,
        snapshot: Optional[bool] = None,
        comment: Optional[Any] = None,
        session: Optional["ClientSession"] = None,
        allow_disk_use: Optional[bool] = None,
        let: Optional[bool] = None,
    ) -> None:
        """Create a new cursor.

        Should not be called directly by application developers - see
        :meth:`~pymongo.collection.Collection.find` instead.

        .. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
        """
        # Initialize all attributes used in __del__ before possibly raising
        # an error to avoid attribute errors during garbage collection.
        self.__collection: Collection[_DocumentType] = collection
        self.__id: Any = None
        self.__exhaust = False
        self.__sock_mgr: Any = None
        self.__killed = False
        self.__session: Optional["ClientSession"]

        if session:
            self.__session = session
            self.__explicit_session = True
        else:
            self.__session = None
            self.__explicit_session = False

        spec: Mapping[str, Any] = filter or {}
        validate_is_mapping("filter", spec)
        if not isinstance(skip, int):
            raise TypeError("skip must be an instance of int")
        if not isinstance(limit, int):
            raise TypeError("limit must be an instance of int")
        validate_boolean("no_cursor_timeout", no_cursor_timeout)
        if no_cursor_timeout and not self.__explicit_session:
            warnings.warn(
                "use an explicit session with no_cursor_timeout=True "
                "otherwise the cursor may still timeout after "
                "30 minutes, for more info see "
                "https://docs.mongodb.com/v4.4/reference/method/"
                "cursor.noCursorTimeout/"
                "#session-idle-timeout-overrides-nocursortimeout",
                UserWarning,
                stacklevel=2,
            )
        if cursor_type not in (
                CursorType.NON_TAILABLE,
                CursorType.TAILABLE,
                CursorType.TAILABLE_AWAIT,
                CursorType.EXHAUST,
        ):
            raise ValueError("not a valid value for cursor_type")
        validate_boolean("allow_partial_results", allow_partial_results)
        validate_boolean("oplog_replay", oplog_replay)
        if not isinstance(batch_size, int):
            raise TypeError("batch_size must be an integer")
        if batch_size < 0:
            raise ValueError("batch_size must be >= 0")
        # Only set if allow_disk_use is provided by the user, else None.
        if allow_disk_use is not None:
            allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use)

        if projection is not None:
            projection = helpers._fields_list_to_dict(projection, "projection")

        if let is not None:
            validate_is_document_type("let", let)

        self.__let = let
        self.__spec = spec
        self.__has_filter = filter is not None
        self.__projection = projection
        self.__skip = skip
        self.__limit = limit
        self.__batch_size = batch_size
        self.__ordering = sort and helpers._index_document(sort) or None
        self.__max_scan = max_scan
        self.__explain = False
        self.__comment = comment
        self.__max_time_ms = max_time_ms
        self.__max_await_time_ms: Optional[int] = None
        self.__max: Optional[Union[SON[Any, Any], _Sort]] = max
        self.__min: Optional[Union[SON[Any, Any], _Sort]] = min
        self.__collation = validate_collation_or_none(collation)
        self.__return_key = return_key
        self.__show_record_id = show_record_id
        self.__allow_disk_use = allow_disk_use
        self.__snapshot = snapshot
        self.__set_hint(hint)

        # Exhaust cursor support
        if cursor_type == CursorType.EXHAUST:
            if self.__collection.database.client.is_mongos:
                raise InvalidOperation(
                    "Exhaust cursors are not supported by mongos")
            if limit:
                raise InvalidOperation("Can't use limit and exhaust together.")
            self.__exhaust = True

        # This is ugly. People want to be able to do cursor[5:5] and
        # get an empty result set (old behavior was an
        # exception). It's hard to do that right, though, because the
        # server uses limit(0) to mean 'no limit'. So we set __empty
        # in that case and check for it when iterating. We also unset
        # it anytime we change __limit.
        self.__empty = False

        self.__data: deque = deque()
        self.__address = None
        self.__retrieved = 0

        self.__codec_options = collection.codec_options
        # Read preference is set when the initial find is sent.
        self.__read_preference = None
        self.__read_concern = collection.read_concern

        self.__query_flags = cursor_type
        if no_cursor_timeout:
            self.__query_flags |= _QUERY_OPTIONS["no_timeout"]
        if allow_partial_results:
            self.__query_flags |= _QUERY_OPTIONS["partial"]
        if oplog_replay:
            self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]

        # The namespace to use for find/getMore commands.
        self.__dbname = collection.database.name
        self.__collname = collection.name
Пример #34
0
    def __init__(self, collection, filter=None, projection=None, skip=0,
                 limit=0, no_cursor_timeout=False,
                 cursor_type=CursorType.NON_TAILABLE,
                 sort=None, allow_partial_results=False, oplog_replay=False,
                 modifiers=None, batch_size=0, manipulate=True,
                 collation=None, hint=None, max_scan=None, max_time_ms=None,
                 max=None, min=None, return_key=False, show_record_id=False,
                 snapshot=False, comment=None, session=None):
        """Create a new cursor.

        Should not be called directly by application developers - see
        :meth:`~pymongo.collection.Collection.find` instead.

        .. mongodoc:: cursors
        """
        # Initialize all attributes used in __del__ before possibly raising
        # an error to avoid attribute errors during garbage collection.
        self.__id = None
        self.__exhaust = False
        self.__exhaust_mgr = None
        self.__killed = False

        if session:
            self.__session = session
            self.__explicit_session = True
        else:
            self.__session = None
            self.__explicit_session = False

        spec = filter
        if spec is None:
            spec = {}

        validate_is_mapping("filter", spec)
        if not isinstance(skip, int):
            raise TypeError("skip must be an instance of int")
        if not isinstance(limit, int):
            raise TypeError("limit must be an instance of int")
        validate_boolean("no_cursor_timeout", no_cursor_timeout)
        if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE,
                               CursorType.TAILABLE_AWAIT, CursorType.EXHAUST):
            raise ValueError("not a valid value for cursor_type")
        validate_boolean("allow_partial_results", allow_partial_results)
        validate_boolean("oplog_replay", oplog_replay)
        if modifiers is not None:
            warnings.warn("the 'modifiers' parameter is deprecated",
                          DeprecationWarning, stacklevel=2)
            validate_is_mapping("modifiers", modifiers)
        if not isinstance(batch_size, integer_types):
            raise TypeError("batch_size must be an integer")
        if batch_size < 0:
            raise ValueError("batch_size must be >= 0")

        if projection is not None:
            if not projection:
                projection = {"_id": 1}
            projection = helpers._fields_list_to_dict(projection, "projection")

        self.__collection = collection
        self.__spec = spec
        self.__projection = projection
        self.__skip = skip
        self.__limit = limit
        self.__batch_size = batch_size
        self.__modifiers = modifiers and modifiers.copy() or {}
        self.__ordering = sort and helpers._index_document(sort) or None
        self.__max_scan = max_scan
        self.__explain = False
        self.__comment = comment
        self.__max_time_ms = max_time_ms
        self.__max_await_time_ms = None
        self.__max = max
        self.__min = min
        self.__manipulate = manipulate
        self.__collation = validate_collation_or_none(collation)
        self.__return_key = return_key
        self.__show_record_id = show_record_id
        self.__snapshot = snapshot
        self.__set_hint(hint)

        # Exhaust cursor support
        if cursor_type == CursorType.EXHAUST:
            if self.__collection.database.client.is_mongos:
                raise InvalidOperation('Exhaust cursors are '
                                       'not supported by mongos')
            if limit:
                raise InvalidOperation("Can't use limit and exhaust together.")
            self.__exhaust = True

        # This is ugly. People want to be able to do cursor[5:5] and
        # get an empty result set (old behavior was an
        # exception). It's hard to do that right, though, because the
        # server uses limit(0) to mean 'no limit'. So we set __empty
        # in that case and check for it when iterating. We also unset
        # it anytime we change __limit.
        self.__empty = False

        self.__data = deque()
        self.__address = None
        self.__retrieved = 0

        self.__codec_options = collection.codec_options
        self.__read_preference = collection.read_preference
        self.__read_concern = collection.read_concern

        self.__query_flags = cursor_type
        if self.__read_preference != ReadPreference.PRIMARY:
            self.__query_flags |= _QUERY_OPTIONS["slave_okay"]
        if no_cursor_timeout:
            self.__query_flags |= _QUERY_OPTIONS["no_timeout"]
        if allow_partial_results:
            self.__query_flags |= _QUERY_OPTIONS["partial"]
        if oplog_replay:
            self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]
Пример #35
0
    def __init__(self, collection: 'aiomongo.Collection',
                 filter: Optional[dict] = None, projection: Optional[Union[dict, list]] = None,
                 skip: int = 0, limit: int = 0, sort: Optional[List[tuple]] = None,
                 modifiers: Optional[dict] = None, batch_size: int = 0, no_cursor_timeout: bool = False,
                 collation: Optional[Union[Collation, dict]]=None) -> None:

        spec = filter
        if spec is None:
            spec = {}
        validate_is_mapping('filter', spec)
        if not isinstance(skip, int):
            raise TypeError('skip must be an instance of int')
        if not isinstance(limit, int):
            raise TypeError('limit must be an instance of int')

        if modifiers is not None:
            validate_is_mapping('modifiers', modifiers)

        if not isinstance(batch_size, int):
            raise TypeError('batch_size must be an integer')
        if batch_size < 0:
            raise ValueError('batch_size must be >= 0')

        if projection is not None:
            if not projection:
                projection = {'_id': 1}
            projection = helpers._fields_list_to_dict(projection, 'projection')

        self.__id = None
        self.__codec_options = DEFAULT_CODEC_OPTIONS
        self.__collation = validate_collation_or_none(collation)
        self.__collection = collection
        self.__connection = None
        self.__data = deque()
        self.__explain = False
        self.__max_scan = None
        self.__spec = spec
        self.__projection = projection
        self.__skip = skip
        self.__limit = limit
        self.__batch_size = batch_size
        self.__modifiers = modifiers or {}
        self.__ordering = sort and helpers._index_document(sort) or None
        self.__hint = None
        self.__comment = None
        self.__max_time_ms = None
        self.__max_await_time_ms = None
        self.__max = None
        self.__min = None
        self.__killed = False

        self.__codec_options = collection.codec_options
        self.__read_preference = collection.read_preference
        self.__read_concern = collection.read_concern
        self.__retrieved = 0

        self.__query_flags = 0
        if self.__read_preference != ReadPreference.PRIMARY:
            self.__query_flags |= _QUERY_OPTIONS['slave_okay']
        if no_cursor_timeout:
            self.__query_flags |= _QUERY_OPTIONS['no_timeout']
Пример #36
0
 def replace_one(self, filter):
     common.validate_is_mapping("filter", filter)