def __init__(self, collection, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, max_scan=None, as_class=None, slave_okay=False, await_data=False, partial=False, manipulate=True, read_preference=ReadPreference.PRIMARY, tag_sets=[{}], secondary_acceptable_latency_ms=None, exhaust=False, compile_re=True, oplog_replay=False, modifiers=None, _must_use_master=False, _codec_options=None, **kwargs): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ # Backport aliases. if 'filter' in kwargs: spec = kwargs['filter'] if 'projection' in kwargs: fields = kwargs['projection'] if 'no_cursor_timeout' in kwargs: timeout = not kwargs['no_cursor_timeout'] if 'allow_partial_results' in kwargs: partial = kwargs['allow_partial_results'] if 'cursor_type' in kwargs: crt = kwargs['cursor_type'] if crt not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): raise ValueError("not a valid value for cursor_type") exhaust = crt == CursorType.EXHAUST tailable = crt == CursorType.TAILABLE if crt == CursorType.TAILABLE_AWAIT: await_data = True tailable = True if modifiers is not None: if not isinstance(modifiers, dict): raise TypeError("%s must be an instance of dict or subclass" % (modifiers, )) if '$snapshot' in modifiers: snapshot = modifiers['$snapshot'] if '$maxScan' in modifiers: max_scan = modifiers['$maxScan'] self.__id = None if spec is None: spec = {} if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") if not isinstance(timeout, bool): raise TypeError("timeout must be an instance of bool") if not isinstance(snapshot, bool): raise TypeError("snapshot must be an instance of bool") if not isinstance(tailable, bool): raise TypeError("tailable must be an instance of bool") if not isinstance(slave_okay, bool): raise TypeError("slave_okay must be an instance of bool") if not isinstance(await_data, bool): raise TypeError("await_data must be an instance of bool") if not isinstance(partial, bool): raise TypeError("partial must be an instance of bool") if not isinstance(exhaust, bool): raise TypeError("exhaust must be an instance of bool") if not isinstance(oplog_replay, bool): raise TypeError("oplog_replay must be an instance of bool") if fields is not None: if not fields: fields = {"_id": 1} if not isinstance(fields, dict): fields = helpers._fields_list_to_dict(fields) self.__collection = collection self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__max_time_ms = None self.__batch_size = 0 self.__max = None self.__min = None self.__modifiers = modifiers and modifiers.copy() or {} # Exhaust cursor support if self.__collection.database.connection.is_mongos and exhaust: raise InvalidOperation('Exhaust cursors are ' 'not supported by mongos') if limit and exhaust: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = exhaust self.__exhaust_mgr = None # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__hint = None self.__comment = None self.__slave_okay = slave_okay self.__manipulate = manipulate self.__read_preference = read_preference self.__tag_sets = tag_sets self.__secondary_acceptable_latency_ms = secondary_acceptable_latency_ms self.__compile_re = compile_re self.__must_use_master = _must_use_master copts = _codec_options or collection.codec_options if as_class is not None: copts = _CodecOptions(as_class, copts.tz_aware, copts.uuid_representation) self.__codec_options = copts self.__data = deque() self.__connection_id = None self.__retrieved = 0 self.__killed = False self.__query_flags = 0 if tailable: self.__query_flags |= _QUERY_OPTIONS["tailable_cursor"] if not timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if tailable and await_data: self.__query_flags |= _QUERY_OPTIONS["await_data"] if exhaust: self.__query_flags |= _QUERY_OPTIONS["exhaust"] if partial: self.__query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] # this is for passing network_timeout through if it's specified # need to use kwargs as None is a legit value for network_timeout self.__kwargs = kwargs
def _command(self, command, value=1, check=True, allowable_errors=None, uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True, read_preference=None, codec_options=None, **kwargs): """Internal command helper. """ if isinstance(command, basestring): command = SON([(command, value)]) command_name = command.keys()[0].lower() must_use_master = kwargs.pop('_use_master', False) if command_name not in secondary_ok_commands: must_use_master = True # Special-case: mapreduce can go to secondaries only if inline if command_name == 'mapreduce': out = command.get('out') or kwargs.get('out') if not isinstance(out, dict) or not out.get('inline'): must_use_master = True # Special-case: aggregate with $out cannot go to secondaries. if command_name == 'aggregate': for stage in kwargs.get('pipeline', []): if '$out' in stage: must_use_master = True break if codec_options is None or 'as_class' in kwargs: opts = {} if 'as_class' in kwargs: opts['document_class'] = kwargs.pop('as_class') # 'as_class' must be in kwargs so don't use document_class if codec_options: opts['tz_aware'] = codec_options.tz_aware opts['uuid_representation'] = codec_options.uuid_representation else: opts['uuid_representation'] = uuid_subtype codec_options = _CodecOptions(**opts) extra_opts = { 'slave_okay': kwargs.pop('slave_okay', self.slave_okay), '_codec_options': codec_options, '_must_use_master': must_use_master, } if isinstance(read_preference, _ServerMode): extra_opts['read_preference'] = read_preference.mode extra_opts['tag_sets'] = read_preference.tag_sets else: if read_preference is None: read_preference = self.read_preference extra_opts['read_preference'] = read_preference extra_opts['tag_sets'] = kwargs.pop( 'tag_sets', self.tag_sets) extra_opts['secondary_acceptable_latency_ms'] = kwargs.pop( 'secondary_acceptable_latency_ms', self.secondary_acceptable_latency_ms) extra_opts['compile_re'] = compile_re fields = kwargs.get('fields') if fields is not None and not isinstance(fields, dict): kwargs['fields'] = helpers._fields_list_to_dict(fields) command.update(kwargs) # Warn if must_use_master will override read_preference. if (extra_opts['read_preference'] != ReadPreference.PRIMARY and extra_opts['_must_use_master'] and self.connection._rs_client): warnings.warn("%s does not support %s read preference " "and will be routed to the primary instead." % (command_name, modes[extra_opts['read_preference']]), UserWarning, stacklevel=3) cursor = self["$cmd"].find(command, **extra_opts).limit(-1) for doc in cursor: result = doc if check: helpers._check_command_response( result, self.connection._disconnect, None, allowable_errors) return result, cursor.conn_id
def __init__(self, collection, spec=None, fields=None, skip=0, limit=0, timeout=True, snapshot=False, tailable=False, sort=None, max_scan=None, as_class=None, slave_okay=False, await_data=False, partial=False, manipulate=True, read_preference=ReadPreference.PRIMARY, tag_sets=[{}], secondary_acceptable_latency_ms=None, exhaust=False, compile_re=True, oplog_replay=False, modifiers=None, _must_use_master=False, _codec_options=None, **kwargs): """Create a new cursor. Should not be called directly by application developers - see :meth:`~pymongo.collection.Collection.find` instead. .. mongodoc:: cursors """ # Backport aliases. if 'filter' in kwargs: spec = kwargs['filter'] if 'projection' in kwargs: fields = kwargs['projection'] if 'no_cursor_timeout' in kwargs: timeout = not kwargs['no_cursor_timeout'] if 'allow_partial_results' in kwargs: partial = kwargs['allow_partial_results'] if 'cursor_type' in kwargs: crt = kwargs['cursor_type'] if crt not in (CursorType.NON_TAILABLE, CursorType.TAILABLE, CursorType.TAILABLE_AWAIT, CursorType.EXHAUST): raise ValueError("not a valid value for cursor_type") exhaust = crt == CursorType.EXHAUST tailable = crt == CursorType.TAILABLE if crt == CursorType.TAILABLE_AWAIT: await_data = True tailable = True if modifiers is not None: if not isinstance(modifiers, dict): raise TypeError("%s must be an instance of dict or subclass" % (modifiers,)) if '$snapshot' in modifiers: snapshot = modifiers['$snapshot'] if '$maxScan' in modifiers: max_scan = modifiers['$maxScan'] self.__id = None if spec is None: spec = {} if not isinstance(spec, dict): raise TypeError("spec must be an instance of dict") if not isinstance(skip, int): raise TypeError("skip must be an instance of int") if not isinstance(limit, int): raise TypeError("limit must be an instance of int") if not isinstance(timeout, bool): raise TypeError("timeout must be an instance of bool") if not isinstance(snapshot, bool): raise TypeError("snapshot must be an instance of bool") if not isinstance(tailable, bool): raise TypeError("tailable must be an instance of bool") if not isinstance(slave_okay, bool): raise TypeError("slave_okay must be an instance of bool") if not isinstance(await_data, bool): raise TypeError("await_data must be an instance of bool") if not isinstance(partial, bool): raise TypeError("partial must be an instance of bool") if not isinstance(exhaust, bool): raise TypeError("exhaust must be an instance of bool") if not isinstance(oplog_replay, bool): raise TypeError("oplog_replay must be an instance of bool") if fields is not None: if not fields: fields = {"_id": 1} if not isinstance(fields, dict): fields = helpers._fields_list_to_dict(fields) self.__collection = collection self.__spec = spec self.__fields = fields self.__skip = skip self.__limit = limit self.__max_time_ms = None self.__batch_size = 0 self.__max = None self.__min = None self.__modifiers = modifiers and modifiers.copy() or {} # Exhaust cursor support if self.__collection.database.connection.is_mongos and exhaust: raise InvalidOperation('Exhaust cursors are ' 'not supported by mongos') if limit and exhaust: raise InvalidOperation("Can't use limit and exhaust together.") self.__exhaust = exhaust self.__exhaust_mgr = None # This is ugly. People want to be able to do cursor[5:5] and # get an empty result set (old behavior was an # exception). It's hard to do that right, though, because the # server uses limit(0) to mean 'no limit'. So we set __empty # in that case and check for it when iterating. We also unset # it anytime we change __limit. self.__empty = False self.__snapshot = snapshot self.__ordering = sort and helpers._index_document(sort) or None self.__max_scan = max_scan self.__explain = False self.__hint = None self.__comment = None self.__slave_okay = slave_okay self.__manipulate = manipulate self.__read_preference = read_preference self.__tag_sets = tag_sets self.__secondary_acceptable_latency_ms = secondary_acceptable_latency_ms self.__compile_re = compile_re self.__must_use_master = _must_use_master copts = _codec_options or collection.codec_options if as_class is not None: copts = _CodecOptions( as_class, copts.tz_aware, copts.uuid_representation) self.__codec_options = copts self.__data = deque() self.__connection_id = None self.__retrieved = 0 self.__killed = False self.__query_flags = 0 if tailable: self.__query_flags |= _QUERY_OPTIONS["tailable_cursor"] if not timeout: self.__query_flags |= _QUERY_OPTIONS["no_timeout"] if tailable and await_data: self.__query_flags |= _QUERY_OPTIONS["await_data"] if exhaust: self.__query_flags |= _QUERY_OPTIONS["exhaust"] if partial: self.__query_flags |= _QUERY_OPTIONS["partial"] if oplog_replay: self.__query_flags |= _QUERY_OPTIONS["oplog_replay"] # this is for passing network_timeout through if it's specified # need to use kwargs as None is a legit value for network_timeout self.__kwargs = kwargs
def _command(self, command, value=1, check=True, allowable_errors=None, uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True, read_preference=None, codec_options=None, **kwargs): """Internal command helper. """ if isinstance(command, str): command = SON([(command, value)]) command_name = list(command.keys())[0].lower() must_use_master = kwargs.pop('_use_master', False) if command_name not in secondary_ok_commands: must_use_master = True # Special-case: mapreduce can go to secondaries only if inline if command_name == 'mapreduce': out = command.get('out') or kwargs.get('out') if not isinstance(out, dict) or not out.get('inline'): must_use_master = True # Special-case: aggregate with $out cannot go to secondaries. if command_name == 'aggregate': for stage in kwargs.get('pipeline', []): if '$out' in stage: must_use_master = True break if codec_options is None or 'as_class' in kwargs: opts = {} if 'as_class' in kwargs: opts['document_class'] = kwargs.pop('as_class') # 'as_class' must be in kwargs so don't use document_class if codec_options: opts['tz_aware'] = codec_options.tz_aware opts['uuid_representation'] = codec_options.uuid_representation else: opts['uuid_representation'] = uuid_subtype codec_options = _CodecOptions(**opts) extra_opts = { 'slave_okay': kwargs.pop('slave_okay', self.slave_okay), '_codec_options': codec_options, '_must_use_master': must_use_master, } if isinstance(read_preference, _ServerMode): extra_opts['read_preference'] = read_preference.mode extra_opts['tag_sets'] = read_preference.tag_sets else: if read_preference is None: read_preference = self.read_preference extra_opts['read_preference'] = read_preference extra_opts['tag_sets'] = kwargs.pop( 'tag_sets', self.tag_sets) extra_opts['secondary_acceptable_latency_ms'] = kwargs.pop( 'secondary_acceptable_latency_ms', self.secondary_acceptable_latency_ms) extra_opts['compile_re'] = compile_re fields = kwargs.get('fields') if fields is not None and not isinstance(fields, dict): kwargs['fields'] = helpers._fields_list_to_dict(fields) command.update(kwargs) # Warn if must_use_master will override read_preference. if (extra_opts['read_preference'] != ReadPreference.PRIMARY and extra_opts['_must_use_master'] and self.connection._rs_client): warnings.warn("%s does not support %s read preference " "and will be routed to the primary instead." % (command_name, modes[extra_opts['read_preference']]), UserWarning, stacklevel=3) cursor = self["$cmd"].find(command, **extra_opts).limit(-1) for doc in cursor: result = doc if check: msg = "command %s on namespace %s failed: %%s" % ( repr(command).replace("%", "%%"), self.name + '.$cmd') helpers._check_command_response(result, self.connection.disconnect, msg, allowable_errors) return result, cursor.conn_id
def _command( self, command, value=1, check=True, allowable_errors=None, uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True, read_preference=None, codec_options=None, **kwargs ): """Internal command helper. """ if isinstance(command, basestring): command = SON([(command, value)]) command_name = command.keys()[0].lower() must_use_master = kwargs.pop("_use_master", False) if command_name not in secondary_ok_commands: must_use_master = True # Special-case: mapreduce can go to secondaries only if inline if command_name == "mapreduce": out = command.get("out") or kwargs.get("out") if not isinstance(out, dict) or not out.get("inline"): must_use_master = True # Special-case: aggregate with $out cannot go to secondaries. if command_name == "aggregate": for stage in kwargs.get("pipeline", []): if "$out" in stage: must_use_master = True break if codec_options is None or "as_class" in kwargs: opts = {} if "as_class" in kwargs: opts["document_class"] = kwargs.pop("as_class") # 'as_class' must be in kwargs so don't use document_class if codec_options: opts["tz_aware"] = codec_options.tz_aware opts["uuid_representation"] = codec_options.uuid_representation else: opts["uuid_representation"] = uuid_subtype codec_options = _CodecOptions(**opts) extra_opts = { "slave_okay": kwargs.pop("slave_okay", self.slave_okay), "_codec_options": codec_options, "_must_use_master": must_use_master, } if isinstance(read_preference, _ServerMode): extra_opts["read_preference"] = read_preference.mode extra_opts["tag_sets"] = read_preference.tag_sets else: if read_preference is None: read_preference = self.read_preference extra_opts["read_preference"] = read_preference extra_opts["tag_sets"] = kwargs.pop("tag_sets", self.tag_sets) extra_opts["secondary_acceptable_latency_ms"] = kwargs.pop( "secondary_acceptable_latency_ms", self.secondary_acceptable_latency_ms ) extra_opts["compile_re"] = compile_re fields = kwargs.get("fields") if fields is not None and not isinstance(fields, dict): kwargs["fields"] = helpers._fields_list_to_dict(fields) command.update(kwargs) # Warn if must_use_master will override read_preference. if ( extra_opts["read_preference"] != ReadPreference.PRIMARY and extra_opts["_must_use_master"] and self.connection._rs_client ): warnings.warn( "%s does not support %s read preference " "and will be routed to the primary instead." % (command_name, modes[extra_opts["read_preference"]]), UserWarning, stacklevel=3, ) cursor = self["$cmd"].find(command, **extra_opts).limit(-1) for doc in cursor: result = doc if check: msg = "command %s on namespace %s failed: %%s" % (repr(command).replace("%", "%%"), self.name + ".$cmd") helpers._check_command_response(result, self.connection.disconnect, msg, allowable_errors) return result, cursor.conn_id