def hint(self, index): """Adds a 'hint', telling Mongo the proper index to use for the query. Judicious use of hints can greatly improve query performance. When doing a query on multiple fields (at least one of which is indexed) pass the indexed field as a hint to the query. Hinting will not do anything if the corresponding index does not exist. Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. `index` should be an index as passed to :meth:`~pymongo.collection.Collection.create_index` (e.g. ``[('field', ASCENDING)]``). If `index` is ``None`` any existing hints for this query are cleared. The last hint applied to this cursor takes precedence over all others. :Parameters: - `index`: index to hint on (as an index specifier) """ self.__check_okay_to_chain() if index is None: self.__hint = None return self self.__hint = helpers._index_document(index) return self
def __init__(self, collection, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, max_scan=None, as_class=None, _sock=None, _must_use_master=False, _is_command=False): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ self.__id = None if spec is None: spec = {} if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") if not isinstance(timeout, bool): raise TypeError("timeout must be an instance of bool") if not isinstance(snapshot, bool): raise TypeError("snapshot must be an instance of bool") if not isinstance(tailable, bool): raise TypeError("tailable must be an instance of bool") if fields is not None: if not fields: fields = {"_id": 1} if not isinstance(fields, dict): fields = helpers._fields_list_to_dict(fields) if as_class is None: as_class = collection.database.connection.document_class self.__collection = collection self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__timeout = timeout self.__tailable = tailable self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__hint = None self.__as_class = as_class self.__socket = _sock self.__must_use_master = _must_use_master self.__is_command = _is_command self.__data = [] self.__connection_id = None self.__retrieved = 0 self.__killed = False
def __set_hint(self, index): if index is None: self.__hint = None return if isinstance(index, string_type): self.__hint = index else: self.__hint = helpers._index_document(index)
def __init__(self, keys, **kwargs): """Create an Index instance. For use with :meth:`~pymongo.collection.Collection.create_indexes`. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). Valid options include, but are not limited to: - `name`: custom name to use for this index - if none is given, a name will be generated. - `unique`: if ``True`` creates a uniqueness constraint on the index. - `background`: if ``True`` this index should be created in the background. - `sparse`: if ``True``, omit from the index any documents that lack the indexed field. - `bucketSize`: for use with geoHaystack indexes. Number of documents to group together within a certain proximity to a given longitude and latitude. - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` index. - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` index. - `expireAfterSeconds`: <int> Used to create an expiring (TTL) collection. MongoDB will automatically delete documents from this collection after <int> seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for a partial index. See the MongoDB documentation for a full list of supported options by server version. .. note:: `partialFilterExpression` requires server version **>= 3.2** :Parameters: - `keys`: a single key or a list of (key, direction) pairs specifying the index to create - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments .. versionchanged:: 3.2 Added partialFilterExpression to support partial indexes. """ keys = _index_list(keys) if "name" not in kwargs: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) self.__document = kwargs
def create_index(self, key_or_list, unique=False, ttl=300): """Creates an index on this collection. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring`, and the directions must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`). Returns the name of the created index. To create a single key index on the key ``'mike'`` we just use a string argument: >>> my_collection.create_index("mike") For a `compound index`_ on ``'mike'`` descending and ``'eliot'`` ascending we need to use a list of tuples: >>> my_collection.create_index([("mike", pymongo.DESCENDING), ... ("eliot", pymongo.ASCENDING)]) :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the index to create - `unique` (optional): should this index guarantee uniqueness? - `ttl` (optional): time window (in seconds) during which this index will be recognized by subsequent calls to :meth:`ensure_index` - see documentation for :meth:`ensure_index` for details .. seealso:: :meth:`ensure_index` .. _compound index: http://www.mongodb.org/display/DOCS/Indexes#Indexes-CompoundKeysIndexes .. mongodoc:: indexes """ if not isinstance(key_or_list, (str, unicode, list)): raise TypeError("key_or_list must either be a single key " "or a list of (key, direction) pairs") to_save = SON() keys = helpers._index_list(key_or_list) name = self._gen_index_name(keys) to_save["name"] = name to_save["ns"] = self.__full_name to_save["key"] = helpers._index_document(keys) to_save["unique"] = unique self.__database.connection._cache_index(self.__database.name, self.__name, name, ttl) self.__database.system.indexes.insert(to_save, manipulate=False, check_keys=False) return to_save["name"]
def __init__( self, collection, spec, fields, skip, limit, slave_okay, timeout, tailable, snapshot=False, sort=None, max_scan=None, _sock=None, _must_use_master=False, _is_command=False, ): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ self.__id = None self.__collection = collection self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__slave_okay = slave_okay self.__timeout = timeout self.__tailable = tailable self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__hint = None self.__socket = _sock self.__must_use_master = _must_use_master self.__is_command = _is_command self.__data = [] self.__connection_id = None self.__retrieved = 0 self.__killed = False
def sort(self, key_or_list, direction=None): """Sorts this cursor's results. Pass a field name and a direction, either :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`:: for doc in collection.find().sort('field', pymongo.ASCENDING): print(doc) To sort by multiple fields, pass a list of (key, direction) pairs:: for doc in collection.find().sort([ ('field1', pymongo.ASCENDING), ('field2', pymongo.DESCENDING)]): print(doc) Beginning with MongoDB version 2.6, text search results can be sorted by relevance:: cursor = db.test.find( {'$text': {'$search': 'some words'}}, {'score': {'$meta': 'textScore'}}) # Sort by 'score' field. cursor.sort([('score', {'$meta': 'textScore'})]) for doc in cursor: print(doc) Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. Only the last :meth:`sort` applied to this cursor has any effect. :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the keys to sort on - `direction` (optional): only used if `key_or_list` is a single key, if not given :data:`~pymongo.ASCENDING` is assumed """ self.__check_okay_to_chain() keys = helpers._index_list(key_or_list, direction) self.__ordering = helpers._index_document(keys) return self
def __init__(self,collection, spec, fields, skip, limit, slave_okay, timeout, tailable, snapshot=False, _IOStream=None, _must_use_master=False, _is_command=False, as_class=None, sort = None): if as_class is None: as_class = collection.database.connection.document_class self.__collection = collection self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__slave_okay = slave_okay self.__timeout = timeout self.__tailable = tailable self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__explain = False self.__hint = None self.__IOStream = _IOStream self.__must_use_master = _must_use_master self.__is_command = _is_command self.__as_class = as_class self.__data = [] self.__id = None self.__connection_id = None self.__retrieved = 0 self.__killed = False
def sort(self, key_or_list, direction=None): """Sorts this cursor's results. Takes either a single key and a direction, or a list of (key, direction) pairs. The key(s) must be an instance of ``(str, unicode)``, and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`). Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. Only the last :meth:`sort` applied to this cursor has any effect. :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the keys to sort on - `direction` (optional): only used if `key_or_list` is a single key, if not given :data:`~pymongo.ASCENDING` is assumed """ self.__check_okay_to_chain() keys = helpers._index_list(key_or_list, direction) self.__ordering = helpers._index_document(keys) return self
def create_index(self, key_or_list, cache_for=300, **kwargs): """Creates an index on this collection. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the directions must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`). Returns the name of the created index. All optional index creation paramaters should be passed as keyword arguments to this method. Valid options include: check http://docs.mongodb.org/manual/reference/method/db.collection.ensureIndex/#db.collection.ensureIndex """ keys = _index_list(key_or_list) index = {"key": _index_document(keys), "ns": self._collection_name} name = "name" in kwargs and kwargs["name"] or helpers._gen_index_name(keys) index["name"] = name index.update(kwargs) Client(self._database, 'system.indexes').insert(index, check_keys=False) self._database._cache_index(self._collection, name, cache_for) return name
def create_index(self, key_or_list, unique=False, ttl=300): """Creates an index on this collection. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring`, and the directions must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`). Returns the name of the created index. :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the index to create - `unique` (optional): should this index guarantee uniqueness? - `ttl` (optional): time window (in seconds) during which this index will be recognized by subsequent calls to :meth:`ensure_index` - see documentation for :meth:`ensure_index` for details """ if not isinstance(key_or_list, (str, unicode, list)): raise TypeError("key_or_list must either be a single key " "or a list of (key, direction) pairs") to_save = SON() keys = helpers._index_list(key_or_list) name = self._gen_index_name(keys) to_save["name"] = name to_save["ns"] = self.__full_name to_save["key"] = helpers._index_document(keys) to_save["unique"] = unique self.__database.connection._cache_index(self.__database.name, self.__name, name, ttl) self.__database.system.indexes.insert(to_save, manipulate=False, check_keys=False) return to_save["name"]
def __init__(self, collection, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, max_scan=None, as_class=None, slave_okay=False, await_data=False, partial=False, manipulate=True, read_preference=ReadPreference.PRIMARY, tag_sets=[{}], secondary_acceptable_latency_ms=None, exhaust=False, compile_re=True, oplog_replay=False, modifiers=None, _must_use_master=False, _codec_options=None, **kwargs): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ # Backport aliases. if 'filter' in kwargs: spec = kwargs['filter'] if 'projection' in kwargs: fields = kwargs['projection'] if 'no_cursor_timeout' in kwargs: timeout = not kwargs['no_cursor_timeout'] if 'allow_partial_results' in kwargs: partial = kwargs['allow_partial_results'] if 'cursor_type' in kwargs: crt = kwargs['cursor_type'] if crt not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): raise ValueError("not a valid value for cursor_type") exhaust = crt == CursorType.EXHAUST tailable = crt == CursorType.TAILABLE if crt == CursorType.TAILABLE_AWAIT: await_data = True tailable = True if modifiers is not None: if not isinstance(modifiers, dict): raise TypeError("%s must be an instance of dict or subclass" % (modifiers, )) if '$snapshot' in modifiers: snapshot = modifiers['$snapshot'] if '$maxScan' in modifiers: max_scan = modifiers['$maxScan'] self.__id = None if spec is None: spec = {} if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") if not isinstance(timeout, bool): raise TypeError("timeout must be an instance of bool") if not isinstance(snapshot, bool): raise TypeError("snapshot must be an instance of bool") if not isinstance(tailable, bool): raise TypeError("tailable must be an instance of bool") if not isinstance(slave_okay, bool): raise TypeError("slave_okay must be an instance of bool") if not isinstance(await_data, bool): raise TypeError("await_data must be an instance of bool") if not isinstance(partial, bool): raise TypeError("partial must be an instance of bool") if not isinstance(exhaust, bool): raise TypeError("exhaust must be an instance of bool") if not isinstance(oplog_replay, bool): raise TypeError("oplog_replay must be an instance of bool") if fields is not None: if not fields: fields = {"_id": 1} if not isinstance(fields, dict): fields = helpers._fields_list_to_dict(fields) self.__collection = collection self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__max_time_ms = None self.__batch_size = 0 self.__max = None self.__min = None self.__modifiers = modifiers and modifiers.copy() or {} # Exhaust cursor support if self.__collection.database.connection.is_mongos and exhaust: raise InvalidOperation('Exhaust cursors are ' 'not supported by mongos') if limit and exhaust: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = exhaust self.__exhaust_mgr = None # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__hint = None self.__comment = None self.__slave_okay = slave_okay self.__manipulate = manipulate self.__read_preference = read_preference self.__tag_sets = tag_sets self.__secondary_acceptable_latency_ms = secondary_acceptable_latency_ms self.__compile_re = compile_re self.__must_use_master = _must_use_master copts = _codec_options or collection.codec_options if as_class is not None: copts = _CodecOptions(as_class, copts.tz_aware, copts.uuid_representation) self.__codec_options = copts self.__data = deque() self.__connection_id = None self.__retrieved = 0 self.__killed = False self.__query_flags = 0 if tailable: self.__query_flags |= _QUERY_OPTIONS["tailable_cursor"] if not timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if tailable and await_data: self.__query_flags |= _QUERY_OPTIONS["await_data"] if exhaust: self.__query_flags |= _QUERY_OPTIONS["exhaust"] if partial: self.__query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] # this is for passing network_timeout through if it's specified # need to use kwargs as None is a legit value for network_timeout self.__kwargs = kwargs
def __init__(self, collection, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, max_scan=None, as_class=None, slave_okay=False, _must_use_master=False, _is_command=False, **kwargs): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ self.__id = None if spec is None: spec = {} if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") if not isinstance(timeout, bool): raise TypeError("timeout must be an instance of bool") if not isinstance(snapshot, bool): raise TypeError("snapshot must be an instance of bool") if not isinstance(tailable, bool): raise TypeError("tailable must be an instance of bool") if fields is not None: if not fields: fields = {"_id": 1} if not isinstance(fields, dict): fields = helpers._fields_list_to_dict(fields) if as_class is None: as_class = collection.database.connection.document_class self.__collection = collection self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__batch_size = 0 # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__timeout = timeout self.__tailable = tailable self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__hint = None self.__as_class = as_class self.__slave_okay = slave_okay self.__tz_aware = collection.database.connection.tz_aware self.__must_use_master = _must_use_master self.__is_command = _is_command self.__data = [] self.__connection_id = None self.__retrieved = 0 self.__killed = False # this is for passing network_timeout through if it's specified # need to use kwargs as None is a legit value for network_timeout self.__kwargs = kwargs
def __init__(self, collection: 'aiomongo.Collection', filter: Optional[dict] = None, projection: Optional[Union[dict, list]] = None, skip: int = 0, limit: int = 0, sort: Optional[List[tuple]] = None, modifiers: Optional[dict] = None, batch_size: int = 0, no_cursor_timeout: bool = False) -> None: spec = filter if spec is None: spec = {} validate_is_mapping('filter', spec) if not isinstance(skip, int): raise TypeError('skip must be an instance of int') if not isinstance(limit, int): raise TypeError('limit must be an instance of int') if modifiers is not None: validate_is_mapping('modifiers', modifiers) if not isinstance(batch_size, int): raise TypeError('batch_size must be an integer') if batch_size < 0: raise ValueError('batch_size must be >= 0') if projection is not None: if not projection: projection = {'_id': 1} projection = helpers._fields_list_to_dict(projection, 'projection') self.__id = None self.__codec_options = DEFAULT_CODEC_OPTIONS self.__collection = collection self.__connection = None self.__data = deque() self.__explain = False self.__max_scan = None self.__spec = spec self.__projection = projection self.__skip = skip self.__limit = limit self.__batch_size = batch_size self.__modifiers = modifiers or {} self.__ordering = sort and helpers._index_document(sort) or None self.__hint = None self.__comment = None self.__max_time_ms = None self.__max_await_time_ms = None self.__max = None self.__min = None self.__killed = False self.__codec_options = collection.codec_options self.__read_preference = collection.read_preference self.__read_concern = collection.read_concern self.__retrieved = 0 self.__query_flags = 0 if self.__read_preference != ReadPreference.PRIMARY: self.__query_flags |= _QUERY_OPTIONS['slave_okay'] if no_cursor_timeout: self.__query_flags |= _QUERY_OPTIONS['no_timeout']
def __init__(self, collection, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, max_scan=None, as_class=None, slave_okay=False, await_data=False, partial=False, manipulate=True, read_preference=ReadPreference.PRIMARY, tag_sets=[{}], secondary_acceptable_latency_ms=None, exhaust=False, compile_re=True, oplog_replay=False, modifiers=None, _must_use_master=False, _codec_options=None, **kwargs): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ # Backport aliases. if 'filter' in kwargs: spec = kwargs['filter'] if 'projection' in kwargs: fields = kwargs['projection'] if 'no_cursor_timeout' in kwargs: timeout = not kwargs['no_cursor_timeout'] if 'allow_partial_results' in kwargs: partial = kwargs['allow_partial_results'] if 'cursor_type' in kwargs: crt = kwargs['cursor_type'] if crt not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): raise ValueError("not a valid value for cursor_type") exhaust = crt == CursorType.EXHAUST tailable = crt == CursorType.TAILABLE if crt == CursorType.TAILABLE_AWAIT: await_data = True tailable = True if modifiers is not None: if not isinstance(modifiers, dict): raise TypeError("%s must be an instance of dict or subclass" % (modifiers,)) if '$snapshot' in modifiers: snapshot = modifiers['$snapshot'] if '$maxScan' in modifiers: max_scan = modifiers['$maxScan'] self.__id = None if spec is None: spec = {} if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") if not isinstance(timeout, bool): raise TypeError("timeout must be an instance of bool") if not isinstance(snapshot, bool): raise TypeError("snapshot must be an instance of bool") if not isinstance(tailable, bool): raise TypeError("tailable must be an instance of bool") if not isinstance(slave_okay, bool): raise TypeError("slave_okay must be an instance of bool") if not isinstance(await_data, bool): raise TypeError("await_data must be an instance of bool") if not isinstance(partial, bool): raise TypeError("partial must be an instance of bool") if not isinstance(exhaust, bool): raise TypeError("exhaust must be an instance of bool") if not isinstance(oplog_replay, bool): raise TypeError("oplog_replay must be an instance of bool") if fields is not None: if not fields: fields = {"_id": 1} if not isinstance(fields, dict): fields = helpers._fields_list_to_dict(fields) self.__collection = collection self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__max_time_ms = None self.__batch_size = 0 self.__max = None self.__min = None self.__modifiers = modifiers and modifiers.copy() or {} # Exhaust cursor support if self.__collection.database.connection.is_mongos and exhaust: raise InvalidOperation('Exhaust cursors are ' 'not supported by mongos') if limit and exhaust: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = exhaust self.__exhaust_mgr = None # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__hint = None self.__comment = None self.__slave_okay = slave_okay self.__manipulate = manipulate self.__read_preference = read_preference self.__tag_sets = tag_sets self.__secondary_acceptable_latency_ms = secondary_acceptable_latency_ms self.__compile_re = compile_re self.__must_use_master = _must_use_master copts = _codec_options or collection.codec_options if as_class is not None: copts = _CodecOptions( as_class, copts.tz_aware, copts.uuid_representation) self.__codec_options = copts self.__data = deque() self.__connection_id = None self.__retrieved = 0 self.__killed = False self.__query_flags = 0 if tailable: self.__query_flags |= _QUERY_OPTIONS["tailable_cursor"] if not timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if tailable and await_data: self.__query_flags |= _QUERY_OPTIONS["await_data"] if exhaust: self.__query_flags |= _QUERY_OPTIONS["exhaust"] if partial: self.__query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] # this is for passing network_timeout through if it's specified # need to use kwargs as None is a legit value for network_timeout self.__kwargs = kwargs
def __init__(self, collection, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, max_scan=None, as_class=None, await_data=False, partial=False, manipulate=True, read_preference=None, exhaust=False, compile_re=True, _uuid_subtype=None): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ self.__id = None if spec is None: spec = {} if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") if not isinstance(timeout, bool): raise TypeError("timeout must be an instance of bool") if not isinstance(snapshot, bool): raise TypeError("snapshot must be an instance of bool") if not isinstance(tailable, bool): raise TypeError("tailable must be an instance of bool") if not isinstance(await_data, bool): raise TypeError("await_data must be an instance of bool") if not isinstance(partial, bool): raise TypeError("partial must be an instance of bool") if not isinstance(exhaust, bool): raise TypeError("exhaust must be an instance of bool") if fields is not None: if not fields: fields = {"_id": 1} if not isinstance(fields, dict): fields = helpers._fields_list_to_dict(fields) if as_class is None: as_class = collection.database.connection.document_class self.__collection = collection self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__max_time_ms = None self.__batch_size = 0 self.__max = None self.__min = None # Exhaust cursor support if self.__collection.database.connection.is_mongos and exhaust: raise InvalidOperation('Exhaust cursors are ' 'not supported by mongos') if limit and exhaust: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = exhaust self.__exhaust_mgr = None # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__hint = None self.__comment = None self.__as_class = as_class self.__manipulate = manipulate self.__read_preference = read_preference or collection.read_preference self.__tz_aware = collection.database.connection.tz_aware self.__compile_re = compile_re self.__uuid_subtype = _uuid_subtype or collection.uuid_subtype self.__data = deque() self.__connection_id = None self.__retrieved = 0 self.__killed = False self.__query_flags = 0 if tailable: self.__query_flags |= _QUERY_OPTIONS["tailable_cursor"] if self.__read_preference != ReadPreference.PRIMARY: self.__query_flags |= _QUERY_OPTIONS["slave_okay"] if not timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if tailable and await_data: self.__query_flags |= _QUERY_OPTIONS["await_data"] if exhaust: self.__query_flags |= _QUERY_OPTIONS["exhaust"] if partial: self.__query_flags |= _QUERY_OPTIONS["partial"]
def __init__( self, collection: "Collection[_DocumentType]", filter: Optional[Mapping[str, Any]] = None, projection: Optional[Union[Mapping[str, Any], Iterable[str]]] = None, skip: int = 0, limit: int = 0, no_cursor_timeout: bool = False, cursor_type: int = CursorType.NON_TAILABLE, sort: Optional[_Sort] = None, allow_partial_results: bool = False, oplog_replay: bool = False, batch_size: int = 0, collation: Optional[_CollationIn] = None, hint: Optional[_Hint] = None, max_scan: Optional[int] = None, max_time_ms: Optional[int] = None, max: Optional[_Sort] = None, min: Optional[_Sort] = None, return_key: Optional[bool] = None, show_record_id: Optional[bool] = None, snapshot: Optional[bool] = None, comment: Optional[Any] = None, session: Optional["ClientSession"] = None, allow_disk_use: Optional[bool] = None, let: Optional[bool] = None, ) -> None: """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_. """ # Initialize all attributes used in __del__ before possibly raising # an error to avoid attribute errors during garbage collection. self.__collection: Collection[_DocumentType] = collection self.__id: Any = None self.__exhaust = False self.__sock_mgr: Any = None self.__killed = False self.__session: Optional["ClientSession"] if session: self.__session = session self.__explicit_session = True else: self.__session = None self.__explicit_session = False spec: Mapping[str, Any] = filter or {} validate_is_mapping("filter", spec) if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) if no_cursor_timeout and not self.__explicit_session: warnings.warn( "use an explicit session with no_cursor_timeout=True " "otherwise the cursor may still timeout after " "30 minutes, for more info see " "https://docs.mongodb.com/v4.4/reference/method/" "cursor.noCursorTimeout/" "#session-idle-timeout-overrides-nocursortimeout", UserWarning, stacklevel=2, ) if cursor_type not in ( CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST, ): raise ValueError("not a valid value for cursor_type") validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) if not isinstance(batch_size, int): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") # Only set if allow_disk_use is provided by the user, else None. if allow_disk_use is not None: allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use) if projection is not None: projection = helpers._fields_list_to_dict(projection, "projection") if let is not None: validate_is_document_type("let", let) self.__let = let self.__spec = spec self.__has_filter = filter is not None self.__projection = projection self.__skip = skip self.__limit = limit self.__batch_size = batch_size self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__comment = comment self.__max_time_ms = max_time_ms self.__max_await_time_ms: Optional[int] = None self.__max: Optional[Union[SON[Any, Any], _Sort]] = max self.__min: Optional[Union[SON[Any, Any], _Sort]] = min self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id self.__allow_disk_use = allow_disk_use self.__snapshot = snapshot self.__set_hint(hint) # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: raise InvalidOperation( "Exhaust cursors are not supported by mongos") if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__data: deque = deque() self.__address = None self.__retrieved = 0 self.__codec_options = collection.codec_options # Read preference is set when the initial find is sent. self.__read_preference = None self.__read_concern = collection.read_concern self.__query_flags = cursor_type if no_cursor_timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if allow_partial_results: self.__query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] # The namespace to use for find/getMore commands. self.__dbname = collection.database.name self.__collname = collection.name
def __init__(self, collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None, session=None): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ # Initialize all attributes used in __del__ before possibly raising # an error to avoid attribute errors during garbage collection. self.__id = None self.__exhaust = False self.__exhaust_mgr = None self.__killed = False if session: self.__session = session self.__explicit_session = True else: self.__session = None self.__explicit_session = False spec = filter if spec is None: spec = {} validate_is_mapping("filter", spec) if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): raise ValueError("not a valid value for cursor_type") validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) if modifiers is not None: warnings.warn("the 'modifiers' parameter is deprecated", DeprecationWarning, stacklevel=2) validate_is_mapping("modifiers", modifiers) if not isinstance(batch_size, integer_types): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") if projection is not None: if not projection: projection = {"_id": 1} projection = helpers._fields_list_to_dict(projection, "projection") self.__collection = collection self.__spec = spec self.__projection = projection self.__skip = skip self.__limit = limit self.__batch_size = batch_size self.__modifiers = modifiers and modifiers.copy() or {} self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__comment = comment self.__max_time_ms = max_time_ms self.__max_await_time_ms = None self.__max = max self.__min = min self.__manipulate = manipulate self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id self.__snapshot = snapshot self.__set_hint(hint) # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: raise InvalidOperation('Exhaust cursors are ' 'not supported by mongos') if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__data = deque() self.__address = None self.__retrieved = 0 self.__codec_options = collection.codec_options self.__read_preference = collection.read_preference self.__read_concern = collection.read_concern self.__query_flags = cursor_type if self.__read_preference != ReadPreference.PRIMARY: self.__query_flags |= _QUERY_OPTIONS["slave_okay"] if no_cursor_timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if allow_partial_results: self.__query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]
def create_index(self, key_or_list, deprecated_unique=None, ttl=300, **kwargs): """Creates an index on this collection. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring`, and the directions must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`). Returns the name of the created index. To create a single key index on the key ``'mike'`` we just use a string argument: >>> my_collection.create_index("mike") For a compound index on ``'mike'`` descending and ``'eliot'`` ascending we need to use a list of tuples: >>> my_collection.create_index([("mike", pymongo.DESCENDING), ... ("eliot", pymongo.ASCENDING)]) All optional index creation paramaters should be passed as keyword arguments to this method. Valid options include: - `name`: custom name to use for this index - if none is given, a name will be generated - `unique`: should this index guarantee uniqueness? - `dropDups` or `drop_dups`: should we drop duplicates during index creation when creating a unique index? - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` index - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` index :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the index to create - `deprecated_unique`: DEPRECATED - use `unique` as a kwarg - `ttl` (optional): time window (in seconds) during which this index will be recognized by subsequent calls to :meth:`ensure_index` - see documentation for :meth:`ensure_index` for details - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments .. versionchanged:: 1.5.1 Accept kwargs to support all index creation options. .. versionadded:: 1.5 The `name` parameter. .. seealso:: :meth:`ensure_index` .. mongodoc:: indexes """ keys = helpers._index_list(key_or_list) index_doc = helpers._index_document(keys) index = {"key": index_doc, "ns": self.__full_name} if deprecated_unique is not None: warnings.warn("using a positional arg to specify unique is " "deprecated, please use kwargs", DeprecationWarning) index["unique"] = deprecated_unique name = "name" in kwargs and kwargs["name"] or _gen_index_name(keys) index["name"] = name if "drop_dups" in kwargs: kwargs["dropDups"] = kwargs.pop("drop_dups") index.update(kwargs) self.__database.system.indexes.insert(index, manipulate=False, check_keys=False, safe=True) self.__database.connection._cache_index(self.__database.name, self.__name, name, ttl) return name
def __init__(self, keys: _IndexKeyHint, **kwargs: Any) -> None: """Create an Index instance. For use with :meth:`~pymongo.collection.Collection.create_indexes`. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). Valid options include, but are not limited to: - `name`: custom name to use for this index - if none is given, a name will be generated. - `unique`: if ``True``, creates a uniqueness constraint on the index. - `background`: if ``True``, this index should be created in the background. - `sparse`: if ``True``, omit from the index any documents that lack the indexed field. - `bucketSize`: for use with geoHaystack indexes. Number of documents to group together within a certain proximity to a given longitude and latitude. - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` index. - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` index. - `expireAfterSeconds`: <int> Used to create an expiring (TTL) collection. MongoDB will automatically delete documents from this collection after <int> seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for a partial index. - `collation`: An instance of :class:`~pymongo.collation.Collation` that specifies the collation to use. - `wildcardProjection`: Allows users to include or exclude specific field paths from a `wildcard index`_ using the { "$**" : 1} key pattern. Requires MongoDB >= 4.2. - `hidden`: if ``True``, this index will be hidden from the query planner and will not be evaluated as part of query plan selection. Requires MongoDB >= 4.4. See the MongoDB documentation for a full list of supported options by server version. :Parameters: - `keys`: a single key or a list of (key, direction) pairs specifying the index to create - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments .. versionchanged:: 3.11 Added the ``hidden`` option. .. versionchanged:: 3.2 Added the ``partialFilterExpression`` option to support partial indexes. .. _wildcard index: https://docs.mongodb.com/master/core/index-wildcard/ """ keys = _index_list(keys) if "name" not in kwargs: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) collation = validate_collation_or_none(kwargs.pop("collation", None)) self.__document = kwargs if collation is not None: self.__document["collation"] = collation
def create_index(self, key_or_list, deprecated_unique=None, ttl=300, **kwargs): """Creates an index on this collection. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring`, and the directions must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`). Returns the name of the created index. To create a single key index on the key ``'mike'`` we just use a string argument: >>> my_collection.create_index("mike") For a compound index on ``'mike'`` descending and ``'eliot'`` ascending we need to use a list of tuples: >>> my_collection.create_index([("mike", pymongo.DESCENDING), ... ("eliot", pymongo.ASCENDING)]) All optional index creation paramaters should be passed as keyword arguments to this method. Valid options include: - `name`: custom name to use for this index - if none is given, a name will be generated - `unique`: should this index guarantee uniqueness? - `dropDups` or `drop_dups`: should we drop duplicates during index creation when creating a unique index? - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` index - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` index :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the index to create - `deprecated_unique`: DEPRECATED - use `unique` as a kwarg - `ttl` (optional): time window (in seconds) during which this index will be recognized by subsequent calls to :meth:`ensure_index` - see documentation for :meth:`ensure_index` for details - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments .. versionchanged:: 1.5.1 Accept kwargs to support all index creation options. .. versionadded:: 1.5 The `name` parameter. .. seealso:: :meth:`ensure_index` .. mongodoc:: indexes """ keys = helpers._index_list(key_or_list) index_doc = helpers._index_document(keys) index = {"key": index_doc, "ns": self.__full_name} if deprecated_unique is not None: warnings.warn("using a positional arg to specify unique is " "deprecated, please use kwargs", DeprecationWarning) index["unique"] = deprecated_unique name = "name" in kwargs and kwargs["name"] or _gen_index_name(keys) index["name"] = name if "drop_dups" in kwargs: kwargs["dropDups"] = kwargs.pop("drop_dups") index.update(kwargs) self.__database.connection._cache_index(self.__database.name, self.__name, name, ttl) self.__database.system.indexes.insert(index, manipulate=False, check_keys=False) return name
def __init__(self, collection, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, max_scan=None, as_class=None, slave_okay=False, await_data=False, partial=False, manipulate=True, read_preference=ReadPreference.PRIMARY, tag_sets=[{}], secondary_acceptable_latency_ms=None, exhaust=False, compile_re=True, _must_use_master=False, _uuid_subtype=None, **kwargs): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ self.__id = None if spec is None: spec = {} if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") if not isinstance(timeout, bool): raise TypeError("timeout must be an instance of bool") if not isinstance(snapshot, bool): raise TypeError("snapshot must be an instance of bool") if not isinstance(tailable, bool): raise TypeError("tailable must be an instance of bool") if not isinstance(slave_okay, bool): raise TypeError("slave_okay must be an instance of bool") if not isinstance(await_data, bool): raise TypeError("await_data must be an instance of bool") if not isinstance(partial, bool): raise TypeError("partial must be an instance of bool") if not isinstance(exhaust, bool): raise TypeError("exhaust must be an instance of bool") if fields is not None: if not fields: fields = {"_id": 1} if not isinstance(fields, dict): fields = helpers._fields_list_to_dict(fields) if as_class is None: as_class = collection.database.connection.document_class self.__collection = collection self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__max_time_ms = None self.__batch_size = 0 self.__max = None self.__min = None # Exhaust cursor support if self.__collection.database.connection.is_mongos and exhaust: raise InvalidOperation('Exhaust cursors are ' 'not supported by mongos') if limit and exhaust: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = exhaust self.__exhaust_mgr = None # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__hint = None self.__comment = None self.__as_class = as_class self.__slave_okay = slave_okay self.__manipulate = manipulate self.__read_preference = read_preference self.__tag_sets = tag_sets self.__secondary_acceptable_latency_ms = secondary_acceptable_latency_ms self.__tz_aware = collection.database.connection.tz_aware self.__compile_re = compile_re self.__must_use_master = _must_use_master self.__uuid_subtype = _uuid_subtype or collection.uuid_subtype self.__data = deque() self.__connection_id = None self.__retrieved = 0 self.__killed = False self.__query_flags = 0 if tailable: self.__query_flags |= _QUERY_OPTIONS["tailable_cursor"] if not timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if tailable and await_data: self.__query_flags |= _QUERY_OPTIONS["await_data"] if exhaust: self.__query_flags |= _QUERY_OPTIONS["exhaust"] if partial: self.__query_flags |= _QUERY_OPTIONS["partial"] # this is for passing network_timeout through if it's specified # need to use kwargs as None is a legit value for network_timeout self.__kwargs = kwargs
def __init__(self, collection, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, max_scan=None, as_class=None, slave_okay=False, await_data=False, partial=False, manipulate=True, read_preference=ReadPreference.PRIMARY, tag_sets=[{}], secondary_acceptable_latency_ms=None, _must_use_master=False, _uuid_subtype=None, **kwargs): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ self.__id = None if spec is None: spec = {} if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") if not isinstance(timeout, bool): raise TypeError("timeout must be an instance of bool") if not isinstance(snapshot, bool): raise TypeError("snapshot must be an instance of bool") if not isinstance(tailable, bool): raise TypeError("tailable must be an instance of bool") if not isinstance(slave_okay, bool): raise TypeError("slave_okay must be an instance of bool") if not isinstance(await_data, bool): raise TypeError("await_data must be an instance of bool") if not isinstance(partial, bool): raise TypeError("partial must be an instance of bool") if fields is not None: if not fields: fields = {"_id": 1} if not isinstance(fields, dict): fields = helpers._fields_list_to_dict(fields) if as_class is None: as_class = collection.database.connection.document_class self.__collection = collection self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__batch_size = 0 # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__timeout = timeout self.__tailable = tailable self.__await_data = tailable and await_data self.__partial = partial self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__hint = None self.__as_class = as_class self.__slave_okay = slave_okay self.__manipulate = manipulate self.__read_preference = read_preference self.__tag_sets = tag_sets self.__secondary_acceptable_latency_ms = secondary_acceptable_latency_ms self.__tz_aware = collection.database.connection.tz_aware self.__must_use_master = _must_use_master self.__uuid_subtype = _uuid_subtype or collection.uuid_subtype self.__query_flags = 0 self.__data = deque() self.__connection_id = None self.__retrieved = 0 self.__killed = False # this is for passing network_timeout through if it's specified # need to use kwargs as None is a legit value for network_timeout self.__kwargs = kwargs
def __init__(self, collection, filter=None, projection=None, skip=0, limit=0, no_cursor_timeout=False, cursor_type=CursorType.NON_TAILABLE, sort=None, allow_partial_results=False, oplog_replay=False, modifiers=None, batch_size=0, manipulate=True, collation=None, hint=None, max_scan=None, max_time_ms=None, max=None, min=None, return_key=False, show_record_id=False, snapshot=False, comment=None): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ self.__id = None self.__exhaust = False self.__exhaust_mgr = None spec = filter if spec is None: spec = {} validate_is_mapping("filter", spec) if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") validate_boolean("no_cursor_timeout", no_cursor_timeout) if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): raise ValueError("not a valid value for cursor_type") validate_boolean("allow_partial_results", allow_partial_results) validate_boolean("oplog_replay", oplog_replay) if modifiers is not None: warnings.warn("the 'modifiers' parameter is deprecated", DeprecationWarning, stacklevel=2) validate_is_mapping("modifiers", modifiers) if not isinstance(batch_size, integer_types): raise TypeError("batch_size must be an integer") if batch_size < 0: raise ValueError("batch_size must be >= 0") if projection is not None: if not projection: projection = {"_id": 1} projection = helpers._fields_list_to_dict(projection, "projection") self.__collection = collection self.__spec = spec self.__projection = projection self.__skip = skip self.__limit = limit self.__batch_size = batch_size self.__modifiers = modifiers and modifiers.copy() or {} self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__comment = comment self.__max_time_ms = max_time_ms self.__max_await_time_ms = None self.__max = max self.__min = min self.__manipulate = manipulate self.__collation = validate_collation_or_none(collation) self.__return_key = return_key self.__show_record_id = show_record_id self.__snapshot = snapshot self.__set_hint(hint) # Exhaust cursor support if cursor_type == CursorType.EXHAUST: if self.__collection.database.client.is_mongos: raise InvalidOperation('Exhaust cursors are ' 'not supported by mongos') if limit: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = True # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__data = deque() self.__address = None self.__retrieved = 0 self.__killed = False self.__codec_options = collection.codec_options self.__read_preference = collection.read_preference self.__read_concern = collection.read_concern self.__query_flags = cursor_type if self.__read_preference != ReadPreference.PRIMARY: self.__query_flags |= _QUERY_OPTIONS["slave_okay"] if no_cursor_timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if allow_partial_results: self.__query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]
def __init__(self, keys, **kwargs): """Create an Index instance. For use with :meth:`~pymongo.collection.Collection.create_indexes`. Takes either a single key or a list of (key, direction) pairs. The key(s) must be an instance of :class:`basestring` (:class:`str` in python 3), and the direction(s) must be one of (:data:`~pymongo.ASCENDING`, :data:`~pymongo.DESCENDING`, :data:`~pymongo.GEO2D`, :data:`~pymongo.GEOHAYSTACK`, :data:`~pymongo.GEOSPHERE`, :data:`~pymongo.HASHED`, :data:`~pymongo.TEXT`). Valid options include, but are not limited to: - `name`: custom name to use for this index - if none is given, a name will be generated. - `unique`: if ``True`` creates a uniqueness constraint on the index. - `background`: if ``True`` this index should be created in the background. - `sparse`: if ``True``, omit from the index any documents that lack the indexed field. - `bucketSize`: for use with geoHaystack indexes. Number of documents to group together within a certain proximity to a given longitude and latitude. - `min`: minimum value for keys in a :data:`~pymongo.GEO2D` index. - `max`: maximum value for keys in a :data:`~pymongo.GEO2D` index. - `expireAfterSeconds`: <int> Used to create an expiring (TTL) collection. MongoDB will automatically delete documents from this collection after <int> seconds. The indexed field must be a UTC datetime or the data will not expire. - `partialFilterExpression`: A document that specifies a filter for a partial index. - `collation`: An instance of :class:`~pymongo.collation.Collation` that specifies the collation to use in MongoDB >= 3.4. See the MongoDB documentation for a full list of supported options by server version. .. note:: `partialFilterExpression` requires server version **>= 3.2** :Parameters: - `keys`: a single key or a list of (key, direction) pairs specifying the index to create - `**kwargs` (optional): any additional index creation options (see the above list) should be passed as keyword arguments .. versionchanged:: 3.2 Added partialFilterExpression to support partial indexes. """ keys = _index_list(keys) if "name" not in kwargs: kwargs["name"] = _gen_index_name(keys) kwargs["key"] = _index_document(keys) collation = validate_collation_or_none(kwargs.pop('collation', None)) self.__document = kwargs if collation is not None: self.__document['collation'] = collation
def find_and_modify(self, query={}, update=None, upsert=False, sort=None, full_response=False, manipulate=False, **kwargs): warnings.warn( "find_and_modify is deprecated, use find_one_and_delete" ", find_one_and_replace, or find_one_and_update instead", DeprecationWarning, stacklevel=2) if not update and not kwargs.get('remove', None): raise ValueError("Must either update or remove") if update and kwargs.get('remove', None): raise ValueError("Can't do both update and remove") # No need to include empty args if query: kwargs['query'] = query if update: kwargs['update'] = update if upsert: kwargs['upsert'] = upsert if sort: # Accept a list of tuples to match Cursor's sort parameter. if isinstance(sort, list): kwargs['sort'] = helpers._index_document(sort) # Accept OrderedDict, SON, and dict with len == 1 so we # don't break existing code already using find_and_modify. elif (isinstance(sort, ORDERED_TYPES) or isinstance(sort, dict) and len(sort) == 1): warnings.warn( "Passing mapping types for `sort` is deprecated," " use a list of (key, direction) pairs instead", DeprecationWarning, stacklevel=2) kwargs['sort'] = sort else: raise TypeError("sort must be a list of (key, direction) " "pairs, a dict of len 1, or an instance of " "SON or OrderedDict") fields = kwargs.pop("fields", None) if fields is not None: kwargs["fields"] = helpers._fields_list_to_dict(fields, "fields") collation = validate_collation_or_none(kwargs.pop('collation', None)) cmd = SON([("findAndModify", self.__name)]) cmd.update(kwargs) write_concern = self._write_concern_for_cmd(cmd, None) def _find_and_modify(session, sock_info, retryable_write): if (sock_info.max_wire_version >= 4 and not write_concern.is_server_default): cmd['writeConcern'] = write_concern.document result = self._command(sock_info, cmd, read_preference=ReadPreference.PRIMARY, collation=collation, session=session, retryable_write=retryable_write, user_fields=_FIND_AND_MODIFY_DOC_FIELDS) _check_write_command_response(result) return result out = self.__database.client._retryable_write( write_concern.acknowledged, _find_and_modify, None) if full_response: return out else: document = out.get('value') if manipulate: document = self.__database._fix_outgoing(document, self) return document