def __init__(self, target, pipeline, full_document, resume_after, max_await_time_ms, batch_size, collation, start_at_operation_time, session): if pipeline is None: pipeline = [] elif not isinstance(pipeline, list): raise TypeError("pipeline must be a list") common.validate_string_or_none('full_document', full_document) validate_collation_or_none(collation) common.validate_non_negative_integer_or_none("batchSize", batch_size) self._decode_custom = False self._orig_codec_options = target.codec_options if target.codec_options.type_registry._decoder_map: self._decode_custom = True # Keep the type registry so that we support encoding custom types # in the pipeline. self._target = target.with_options( codec_options=target.codec_options.with_options( document_class=RawBSONDocument)) else: self._target = target self._pipeline = copy.deepcopy(pipeline) self._full_document = full_document self._resume_token = copy.deepcopy(resume_after) self._max_await_time_ms = max_await_time_ms self._batch_size = batch_size self._collation = collation self._start_at_operation_time = start_at_operation_time self._session = session self._cursor = self._create_cursor()
def update(self, spec, document, upsert=False, manipulate=False, multi=False, check_keys=True, **kwargs): warnings.warn( "update is deprecated. Use replace_one, update_one or " "update_many instead.", DeprecationWarning, stacklevel=2) common.validate_is_mapping("spec", spec) common.validate_is_mapping("document", document) if document: first = next(iter(document)) if first.startswith('$'): check_keys = False write_concern = None collation = validate_collation_or_none(kwargs.pop('collation', None)) if kwargs: write_concern = WriteConcern(**kwargs) return self._update_retryable(spec, document, upsert, check_keys, multi, manipulate, write_concern, collation=collation)
def add_update(self, selector, update, multi=False, upsert=False, collation=None, array_filters=None, hint=None): """Create an update document and add it to the list of ops. """ validate_ok_for_update(update) cmd = SON([('q', selector), ('u', update), ('multi', multi), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation if array_filters is not None: self.uses_array_filters = True cmd['arrayFilters'] = array_filters if hint is not None: self.uses_hint = True cmd['hint'] = hint if multi: # A bulk_write containing an update_many is not retryable. self.is_retryable = False self.ops.append((_UPDATE, cmd))
def __init__(self, database, name, create=False, **kwargs): super(Collection, self).__init__() if not isinstance(name, string_type): raise TypeError("name must be an instance " "of %s" % (string_type.__name__, )) if not name or ".." in name: raise InvalidName("collection names cannot be empty") if "$" in name and not (name.startswith("oplog.$main") or name.startswith("$cmd")): raise InvalidName("collection names must not " "contain '$': %r" % name) if name[0] == "." or name[-1] == ".": raise InvalidName("collection names must not start " "or end with '.': %r" % name) if "\x00" in name: raise InvalidName("collection names must not contain the " "null character") collation = validate_collation_or_none(kwargs.pop('collation', None)) self.__database = database self.__name = _unicode(name) self.__full_name = _UJOIN % (self.__database.name, self.__name) if create or kwargs or collation: self.__create(kwargs, collation)
def save(self, to_save, manipulate=True, check_keys=True, **kwargs): warnings.warn( "save is deprecated. Use insert_one or replace_one " "instead", DeprecationWarning, stacklevel=2) common.validate_is_document_type("to_save", to_save) write_concern = None collation = validate_collation_or_none(kwargs.pop('collation', None)) if kwargs: write_concern = WriteConcern(**kwargs) if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save): return self._insert(to_save, True, check_keys, manipulate, write_concern) else: self._update_retryable({"_id": to_save["_id"]}, to_save, True, check_keys, False, manipulate, write_concern, collation=collation) return to_save.get("_id")
def __init__( self, target: Union["MongoClient[_DocumentType]", "Database[_DocumentType]", "Collection[_DocumentType]"], pipeline: Optional[_Pipeline], full_document: Optional[str], resume_after: Optional[Mapping[str, Any]], max_await_time_ms: Optional[int], batch_size: Optional[int], collation: Optional[_CollationIn], start_at_operation_time: Optional[Timestamp], session: Optional["ClientSession"], start_after: Optional[Mapping[str, Any]], comment: Optional[Any] = None, ) -> None: if pipeline is None: pipeline = [] pipeline = common.validate_list("pipeline", pipeline) common.validate_string_or_none("full_document", full_document) validate_collation_or_none(collation) common.validate_non_negative_integer_or_none("batchSize", batch_size) self._decode_custom = False self._orig_codec_options = target.codec_options if target.codec_options.type_registry._decoder_map: self._decode_custom = True # Keep the type registry so that we support encoding custom types # in the pipeline. self._target = target.with_options( # type: ignore codec_options=target.codec_options.with_options( document_class=RawBSONDocument)) else: self._target = target self._pipeline = copy.deepcopy(pipeline) self._full_document = full_document self._uses_start_after = start_after is not None self._uses_resume_after = resume_after is not None self._resume_token = copy.deepcopy(start_after or resume_after) self._max_await_time_ms = max_await_time_ms self._batch_size = batch_size self._collation = collation self._start_at_operation_time = start_at_operation_time self._session = session self._comment = comment # Initialize cursor. self._cursor = self._create_cursor()
def add_delete(self, selector, limit, collation=None): """Create a delete document and add it to the list of ops. """ cmd = SON([('q', selector), ('limit', limit)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation self.ops.append((_DELETE, cmd))
def add_delete(self, selector, limit, collation=None): """Create a delete document and add it to the list of ops. """ cmd = SON([('q', selector), ('limit', limit)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation self.ops.append((_DELETE, cmd))
def __init__( self, target, cursor_class, pipeline, options, explicit_session, let=None, user_fields=None, result_processor=None, comment=None, ): if "explain" in options: raise ConfigurationError( "The explain option is not supported. Use Database.command instead." ) self._target = target pipeline = common.validate_list("pipeline", pipeline) self._pipeline = pipeline self._performs_write = False if pipeline and ("$out" in pipeline[-1] or "$merge" in pipeline[-1]): self._performs_write = True common.validate_is_mapping("options", options) if let is not None: common.validate_is_mapping("let", let) options["let"] = let if comment is not None: options["comment"] = comment self._options = options # This is the batchSize that will be used for setting the initial # batchSize for the cursor, as well as the subsequent getMores. self._batch_size = common.validate_non_negative_integer_or_none( "batchSize", self._options.pop("batchSize", None)) # If the cursor option is already specified, avoid overriding it. self._options.setdefault("cursor", {}) # If the pipeline performs a write, we ignore the initial batchSize # since the server doesn't return results in this case. if self._batch_size is not None and not self._performs_write: self._options["cursor"]["batchSize"] = self._batch_size self._cursor_class = cursor_class self._explicit_session = explicit_session self._user_fields = user_fields self._result_processor = result_processor self._collation = validate_collation_or_none( options.pop("collation", None)) self._max_await_time_ms = options.pop("maxAwaitTimeMS", None) self._write_preference = None
def add_replace(self, selector, replacement, upsert=False, collation=None): """Create a replace document and add it to the list of ops. """ validate_ok_for_replace(replacement) cmd = SON([('q', selector), ('u', replacement), ('multi', False), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation self.ops.append((_UPDATE, cmd))
def count(self, filter=None, session=None, **kwargs): if filter is not None: if "query" in kwargs: raise ConfigurationError("can't pass both filter and query") kwargs["query"] = filter if "hint" in kwargs and not isinstance(kwargs["hint"], string_type): kwargs["hint"] = helpers._index_document(kwargs["hint"]) collation = validate_collation_or_none(kwargs.pop('collation', None)) return self._count(collation)
def add_replace(self, selector, replacement, upsert=False, collation=None): """Create a replace document and add it to the list of ops. """ validate_ok_for_replace(replacement) cmd = SON([('q', selector), ('u', replacement), ('multi', False), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation self.ops.append((_UPDATE, cmd))
def add_delete(self, selector, limit, collation=None): """Create a delete document and add it to the list of ops. """ cmd = SON([('q', selector), ('limit', limit)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation if limit == _DELETE_ALL: # A bulk_write containing a delete_many is not retryable. self.is_retryable = False self.ops.append((_DELETE, cmd))
def add_delete(self, selector, limit, collation=None): """Create a delete document and add it to the list of ops. """ cmd = SON([('q', selector), ('limit', limit)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation if limit == _DELETE_ALL: # A bulk_write containing a delete_many is not retryable. self.is_retryable = False self.ops.append((_DELETE, cmd))
def __init__(self, target, pipeline, full_document, resume_after, max_await_time_ms, batch_size, collation, start_at_operation_time, session, start_after): if pipeline is None: pipeline = [] elif not isinstance(pipeline, list): raise TypeError("pipeline must be a list") common.validate_string_or_none('full_document', full_document) validate_collation_or_none(collation) common.validate_non_negative_integer_or_none("batchSize", batch_size) self._decode_custom = False self._orig_codec_options = target.codec_options if target.codec_options.type_registry._decoder_map: self._decode_custom = True # Keep the type registry so that we support encoding custom types # in the pipeline. self._target = target.with_options( codec_options=target.codec_options.with_options( document_class=RawBSONDocument)) else: self._target = target self._pipeline = copy.deepcopy(pipeline) self._full_document = full_document self._uses_start_after = start_after is not None self._uses_resume_after = resume_after is not None self._resume_token = copy.deepcopy(start_after or resume_after) self._max_await_time_ms = max_await_time_ms self._batch_size = batch_size self._collation = collation self._start_at_operation_time = start_at_operation_time self._session = session # Initialize cursor. self._cursor = self._create_cursor()
def add_update(self, selector, update, multi=False, upsert=False, collation=None, array_filters=None): """Create an update document and add it to the list of ops. """ validate_ok_for_update(update) cmd = SON([('q', selector), ('u', update), ('multi', multi), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation if array_filters is not None: self.uses_array_filters = True cmd['arrayFilters'] = array_filters self.ops.append((_UPDATE, cmd))
def collation(self, collation): """Adds a :class:`~pymongo.collation.Collation` to this query. Raises :exc:`TypeError` if `collation` is not an instance of :class:`~pymongo.collation.Collation` or a ``dict``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. Only the last collation applied to this cursor has any effect. :Parameters: - `collation`: An instance of :class:`~pymongo.collation.Collation`. """ self.__check_okay_to_chain() self.__collation = validate_collation_or_none(collation) return self
def collation(self, collation): """Adds a :class:`~pymongo.collation.Collation` to this query. This option is only supported on MongoDB 3.4 and above. Raises :exc:`TypeError` if `collation` is not an instance of :class:`~pymongo.collation.Collation` or a ``dict``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. Only the last collation applied to this cursor has any effect. :Parameters: - `collation`: An instance of :class:`~pymongo.collation.Collation`. """ self.__check_okay_to_chain() self.__collation = validate_collation_or_none(collation) return self
def add_update(self, selector: dict, update: dict, multi: bool = False, upsert: bool = False, collation: Optional[Collation] = None) -> None: """Create an update document and add it to the list of ops. """ validate_ok_for_update(update) cmd = SON([('q', selector), ('u', update), ('multi', multi), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation self.ops.append((_UPDATE, cmd))
def add_update(self, selector, update, multi=False, upsert=False, collation=None, array_filters=None): """Create an update document and add it to the list of ops. """ validate_ok_for_update(update) cmd = SON([('q', selector), ('u', update), ('multi', multi), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation if array_filters is not None: self.uses_array_filters = True cmd['arrayFilters'] = array_filters if multi: # A bulk_write containing an update_many is not retryable. self.is_retryable = False self.ops.append((_UPDATE, cmd))
def add_replace(self, selector, replacement, upsert=False, collation=None, hint=None): """Create a replace document and add it to the list of ops.""" validate_ok_for_replace(replacement) cmd = SON([("q", selector), ("u", replacement), ("multi", False), ("upsert", upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd["collation"] = collation if hint is not None: self.uses_hint_update = True cmd["hint"] = hint self.ops.append((_UPDATE, cmd))
def remove(self, spec_or_id=None, multi=True, **kwargs): warnings.warn( "remove is deprecated. Use delete_one or delete_many " "instead.", DeprecationWarning, stacklevel=2) if spec_or_id is None: spec_or_id = {} if not isinstance(spec_or_id, abc.Mapping): spec_or_id = {"_id": spec_or_id} write_concern = None collation = validate_collation_or_none(kwargs.pop('collation', None)) if kwargs: write_concern = WriteConcern(**kwargs) return self._delete_retryable(spec_or_id, multi, write_concern, collation=collation)
def __init__(self, collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None, allow_disk_use=None): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ # Initialize all attributes used in __del__ before possibly raising # an error to avoid attribute errors during garbage collection. self.__id = None self.__exhaust = False self.__exhaust_mgr = None self.__killed = False if session: self.__session = session self.__explicit_session = True else: self.__session = None self.__explicit_session = False spec = filter if spec is None: spec = {} validate_is_mapping("filter", spec) if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) if no_cursor_timeout and not self.__explicit_session: warnings.warn( "use an explicit session with no_cursor_timeout=True " "otherwise the cursor may still timeout after " "30 minutes, for more info see " "https://docs.mongodb.com/v4.4/reference/method/" "cursor.noCursorTimeout/" "#session-idle-timeout-overrides-nocursortimeout", UserWarning, stacklevel=2) if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): raise ValueError("not a valid value for cursor_type") validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) if modifiers is not None: warnings.warn("the 'modifiers' parameter is deprecated", DeprecationWarning, stacklevel=2) validate_is_mapping("modifiers", modifiers) if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") # Only set if allow_disk_use is provided by the user, else None. if allow_disk_use is not None: allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) if projection is not None: if not projection: projection = {"_id": 1} projection = helpers._fields_list_to_dict(projection, "projection") self.__collection = collection self.__spec = spec self.__projection = projection self.__skip = skip self.__limit = limit self.__batch_size = batch_size self.__modifiers = modifiers and modifiers.copy() or {} self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__comment = comment self.__max_time_ms = max_time_ms self.__max_await_time_ms = None self.__max = max self.__min = min self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id self.__allow_disk_use = allow_disk_use self.__snapshot = snapshot self.__set_hint(hint) # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: raise InvalidOperation('Exhaust cursors are ' 'not supported by mongos') if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__data = deque() self.__address = None self.__retrieved = 0 self.__codec_options = collection.codec_options # Read preference is set when the initial find is sent. self.__read_preference = None self.__read_concern = collection.read_concern self.__query_flags = cursor_type if no_cursor_timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if allow_partial_results: self.__query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] # The namespace to use for find/getMore commands. self.__dbname = collection.database.name self.__collname = collection.name
def find_and_modify(self, query={}, update=None, upsert=False, sort=None, full_response=False, manipulate=False, **kwargs): warnings.warn( "find_and_modify is deprecated, use find_one_and_delete" ", find_one_and_replace, or find_one_and_update instead", DeprecationWarning, stacklevel=2) if not update and not kwargs.get('remove', None): raise ValueError("Must either update or remove") if update and kwargs.get('remove', None): raise ValueError("Can't do both update and remove") # No need to include empty args if query: kwargs['query'] = query if update: kwargs['update'] = update if upsert: kwargs['upsert'] = upsert if sort: # Accept a list of tuples to match Cursor's sort parameter. if isinstance(sort, list): kwargs['sort'] = helpers._index_document(sort) # Accept OrderedDict, SON, and dict with len == 1 so we # don't break existing code already using find_and_modify. elif (isinstance(sort, ORDERED_TYPES) or isinstance(sort, dict) and len(sort) == 1): warnings.warn( "Passing mapping types for `sort` is deprecated," " use a list of (key, direction) pairs instead", DeprecationWarning, stacklevel=2) kwargs['sort'] = sort else: raise TypeError("sort must be a list of (key, direction) " "pairs, a dict of len 1, or an instance of " "SON or OrderedDict") fields = kwargs.pop("fields", None) if fields is not None: kwargs["fields"] = helpers._fields_list_to_dict(fields, "fields") collation = validate_collation_or_none(kwargs.pop('collation', None)) cmd = SON([("findAndModify", self.__name)]) cmd.update(kwargs) write_concern = self._write_concern_for_cmd(cmd, None) def _find_and_modify(session, sock_info, retryable_write): if (sock_info.max_wire_version >= 4 and not write_concern.is_server_default): cmd['writeConcern'] = write_concern.document result = self._command(sock_info, cmd, read_preference=ReadPreference.PRIMARY, collation=collation, session=session, retryable_write=retryable_write, user_fields=_FIND_AND_MODIFY_DOC_FIELDS) _check_write_command_response(result) return result out = self.__database.client._retryable_write( write_concern.acknowledged, _find_and_modify, None) if full_response: return out else: document = out.get('value') if manipulate: document = self.__database._fix_outgoing(document, self) return document
def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: """Create an Index instance. For use with :meth:`~pymongo.collection.Collection.create_indexes`. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). Valid options include, but are not limited to: - `name`: custom name to use for this index - if none is given, a name will be generated. - `unique`: if ``True``, creates a uniqueness constraint on the index. - `background`: if ``True``, this index should be created in the background. - `sparse`: if ``True``, omit from the index any documents that lack the indexed field. - `bucketSize`: for use with geoHaystack indexes. Number of documents to group together within a certain proximity to a given longitude and latitude. - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` index. - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` index. - `expireAfterSeconds`: <int> Used to create an expiring (TTL) collection. MongoDB will automatically delete documents from this collection after <int> seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for a partial index. - `collation`: An instance of :class:`~pymongo.collation.Collation` that specifies the collation to use. - `wildcardProjection`: Allows users to include or exclude specific field paths from a `wildcard index`_ using the { "$**" : 1} key pattern. Requires MongoDB >= 4.2. - `hidden`: if ``True``, this index will be hidden from the query planner and will not be evaluated as part of query plan selection. Requires MongoDB >= 4.4. See the MongoDB documentation for a full list of supported options by server version. :Parameters: - `keys`: a single key or a list of (key, direction) pairs specifying the index to create - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments .. versionchanged:: 3.11 Added the ``hidden`` option. .. versionchanged:: 3.2 Added the ``partialFilterExpression`` option to support partial indexes. .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/ """ keys = _index_list(keys) if "name" not in kwargs: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) collation = validate_collation_or_none(kwargs.pop("collation", None)) self.__document = kwargs if collation is not None: self.__document["collation"] = collation
def __init__(self, keys, **kwargs): """Create an Index instance. For use with :meth:`~pymongo.collection.Collection.create_indexes`. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). Valid options include, but are not limited to: - `name`: custom name to use for this index - if none is given, a name will be generated. - `unique`: if ``True`` creates a uniqueness constraint on the index. - `background`: if ``True`` this index should be created in the background. - `sparse`: if ``True``, omit from the index any documents that lack the indexed field. - `bucketSize`: for use with geoHaystack indexes. Number of documents to group together within a certain proximity to a given longitude and latitude. - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` index. - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` index. - `expireAfterSeconds`: <int> Used to create an expiring (TTL) collection. MongoDB will automatically delete documents from this collection after <int> seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for a partial index. - `collation`: An instance of :class:`~pymongo.collation.Collation` that specifies the collation to use in MongoDB >= 3.4. See the MongoDB documentation for a full list of supported options by server version. .. note:: `partialFilterExpression` requires server version **>= 3.2** :Parameters: - `keys`: a single key or a list of (key, direction) pairs specifying the index to create - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments .. versionchanged:: 3.2 Added partialFilterExpression to support partial indexes. """ keys = _index_list(keys) if "name" not in kwargs: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) collation = validate_collation_or_none(kwargs.pop('collation', None)) self.__document = kwargs if collation is not None: self.__document['collation'] = collation
def __init__(self, collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ # Initialize all attributes used in __del__ before possibly raising # an error to avoid attribute errors during garbage collection. self.__id = None self.__exhaust = False self.__exhaust_mgr = None self.__killed = False if session: self.__session = session self.__explicit_session = True else: self.__session = None self.__explicit_session = False spec = filter if spec is None: spec = {} validate_is_mapping("filter", spec) if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): raise ValueError("not a valid value for cursor_type") validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) if modifiers is not None: warnings.warn("the 'modifiers' parameter is deprecated", DeprecationWarning, stacklevel=2) validate_is_mapping("modifiers", modifiers) if not isinstance(batch_size, integer_types): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") if projection is not None: if not projection: projection = {"_id": 1} projection = helpers._fields_list_to_dict(projection, "projection") self.__collection = collection self.__spec = spec self.__projection = projection self.__skip = skip self.__limit = limit self.__batch_size = batch_size self.__modifiers = modifiers and modifiers.copy() or {} self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__comment = comment self.__max_time_ms = max_time_ms self.__max_await_time_ms = None self.__max = max self.__min = min self.__manipulate = manipulate self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id self.__snapshot = snapshot self.__set_hint(hint) # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: raise InvalidOperation('Exhaust cursors are ' 'not supported by mongos') if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__data = deque() self.__address = None self.__retrieved = 0 self.__codec_options = collection.codec_options self.__read_preference = collection.read_preference self.__read_concern = collection.read_concern self.__query_flags = cursor_type if self.__read_preference != ReadPreference.PRIMARY: self.__query_flags |= _QUERY_OPTIONS["slave_okay"] if no_cursor_timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if allow_partial_results: self.__query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]
def __init__( self, collection: "Collection[_DocumentType]", filter: Optional[Mapping[str, Any]] = None, projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, skip: int = 0, limit: int = 0, no_cursor_timeout: bool = False, cursor_type: int = CursorType.NON_TAILABLE, sort: Optional[_Sort] = None, allow_partial_results: bool = False, oplog_replay: bool = False, batch_size: int = 0, collation: Optional[_CollationIn] = None, hint: Optional[_Hint] = None, max_scan: Optional[int] = None, max_time_ms: Optional[int] = None, max: Optional[_Sort] = None, min: Optional[_Sort] = None, return_key: Optional[bool] = None, show_record_id: Optional[bool] = None, snapshot: Optional[bool] = None, comment: Optional[Any] = None, session: Optional["ClientSession"] = None, allow_disk_use: Optional[bool] = None, let: Optional[bool] = None, ) -> None: """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_. """ # Initialize all attributes used in __del__ before possibly raising # an error to avoid attribute errors during garbage collection. self.__collection: Collection[_DocumentType] = collection self.__id: Any = None self.__exhaust = False self.__sock_mgr: Any = None self.__killed = False self.__session: Optional["ClientSession"] if session: self.__session = session self.__explicit_session = True else: self.__session = None self.__explicit_session = False spec: Mapping[str, Any] = filter or {} validate_is_mapping("filter", spec) if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) if no_cursor_timeout and not self.__explicit_session: warnings.warn( "use an explicit session with no_cursor_timeout=True " "otherwise the cursor may still timeout after " "30 minutes, for more info see " "https://docs.mongodb.com/v4.4/reference/method/" "cursor.noCursorTimeout/" "#session-idle-timeout-overrides-nocursortimeout", UserWarning, stacklevel=2, ) if cursor_type not in ( CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST, ): raise ValueError("not a valid value for cursor_type") validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") # Only set if allow_disk_use is provided by the user, else None. if allow_disk_use is not None: allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) if projection is not None: projection = helpers._fields_list_to_dict(projection, "projection") if let is not None: validate_is_document_type("let", let) self.__let = let self.__spec = spec self.__has_filter = filter is not None self.__projection = projection self.__skip = skip self.__limit = limit self.__batch_size = batch_size self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__comment = comment self.__max_time_ms = max_time_ms self.__max_await_time_ms: Optional[int] = None self.__max: Optional[Union[SON[Any, Any], _Sort]] = max self.__min: Optional[Union[SON[Any, Any], _Sort]] = min self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id self.__allow_disk_use = allow_disk_use self.__snapshot = snapshot self.__set_hint(hint) # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: raise InvalidOperation( "Exhaust cursors are not supported by mongos") if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__data: deque = deque() self.__address = None self.__retrieved = 0 self.__codec_options = collection.codec_options # Read preference is set when the initial find is sent. self.__read_preference = None self.__read_concern = collection.read_concern self.__query_flags = cursor_type if no_cursor_timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if allow_partial_results: self.__query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] # The namespace to use for find/getMore commands. self.__dbname = collection.database.name self.__collname = collection.name
def __init__(self, collection: 'aiomongo.Collection', filter: Optional[dict] = None, projection: Optional[Union[dict, list]] = None, skip: int = 0, limit: int = 0, sort: Optional[List[tuple]] = None, modifiers: Optional[dict] = None, batch_size: int = 0, no_cursor_timeout: bool = False, collation: Optional[Union[Collation, dict]]=None) -> None: spec = filter if spec is None: spec = {} validate_is_mapping('filter', spec) if not isinstance(skip, int): raise TypeError('skip must be an instance of int') if not isinstance(limit, int): raise TypeError('limit must be an instance of int') if modifiers is not None: validate_is_mapping('modifiers', modifiers) if not isinstance(batch_size, int): raise TypeError('batch_size must be an integer') if batch_size < 0: raise ValueError('batch_size must be >= 0') if projection is not None: if not projection: projection = {'_id': 1} projection = helpers._fields_list_to_dict(projection, 'projection') self.__id = None self.__codec_options = DEFAULT_CODEC_OPTIONS self.__collation = validate_collation_or_none(collation) self.__collection = collection self.__connection = None self.__data = deque() self.__explain = False self.__max_scan = None self.__spec = spec self.__projection = projection self.__skip = skip self.__limit = limit self.__batch_size = batch_size self.__modifiers = modifiers or {} self.__ordering = sort and helpers._index_document(sort) or None self.__hint = None self.__comment = None self.__max_time_ms = None self.__max_await_time_ms = None self.__max = None self.__min = None self.__killed = False self.__codec_options = collection.codec_options self.__read_preference = collection.read_preference self.__read_concern = collection.read_concern self.__retrieved = 0 self.__query_flags = 0 if self.__read_preference != ReadPreference.PRIMARY: self.__query_flags |= _QUERY_OPTIONS['slave_okay'] if no_cursor_timeout: self.__query_flags |= _QUERY_OPTIONS['no_timeout']
def __init__(self, collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ self.__id = None self.__exhaust = False self.__exhaust_mgr = None spec = filter if spec is None: spec = {} validate_is_mapping("filter", spec) if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): raise ValueError("not a valid value for cursor_type") validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) if modifiers is not None: warnings.warn("the 'modifiers' parameter is deprecated", DeprecationWarning, stacklevel=2) validate_is_mapping("modifiers", modifiers) if not isinstance(batch_size, integer_types): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") if projection is not None: if not projection: projection = {"_id": 1} projection = helpers._fields_list_to_dict(projection, "projection") self.__collection = collection self.__spec = spec self.__projection = projection self.__skip = skip self.__limit = limit self.__batch_size = batch_size self.__modifiers = modifiers and modifiers.copy() or {} self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__comment = comment self.__max_time_ms = max_time_ms self.__max_await_time_ms = None self.__max = max self.__min = min self.__manipulate = manipulate self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id self.__snapshot = snapshot self.__set_hint(hint) # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: raise InvalidOperation('Exhaust cursors are ' 'not supported by mongos') if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__data = deque() self.__address = None self.__retrieved = 0 self.__killed = False self.__codec_options = collection.codec_options self.__read_preference = collection.read_preference self.__read_concern = collection.read_concern self.__query_flags = cursor_type if self.__read_preference != ReadPreference.PRIMARY: self.__query_flags |= _QUERY_OPTIONS["slave_okay"] if no_cursor_timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if allow_partial_results: self.__query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]
def __init__(self, keys, **kwargs): """Create an Index instance. For use with :meth:`~pymongo.collection.Collection.create_indexes`. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). Valid options include, but are not limited to: - `name`: custom name to use for this index - if none is given, a name will be generated. - `unique`: if ``True`` creates a uniqueness constraint on the index. - `background`: if ``True`` this index should be created in the background. - `sparse`: if ``True``, omit from the index any documents that lack the indexed field. - `bucketSize`: for use with geoHaystack indexes. Number of documents to group together within a certain proximity to a given longitude and latitude. - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` index. - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` index. - `expireAfterSeconds`: <int> Used to create an expiring (TTL) collection. MongoDB will automatically delete documents from this collection after <int> seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for a partial index. - `collation`: An instance of :class:`~pymongo.collation.Collation` that specifies the collation to use in MongoDB >= 3.4. See the MongoDB documentation for a full list of supported options by server version. .. note:: `partialFilterExpression` requires server version **>= 3.2** :Parameters: - `keys`: a single key or a list of (key, direction) pairs specifying the index to create - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments .. versionchanged:: 3.2 Added partialFilterExpression to support partial indexes. """ keys = _index_list(keys) if "name" not in kwargs: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) collation = validate_collation_or_none(kwargs.pop('collation', None)) self.__document = kwargs if collation is not None: self.__document['collation'] = collation