示例#1
0
    def save(self, to_save, manipulate=True, check_keys=True, **kwargs):

        warnings.warn(
            "save is deprecated. Use insert_one or replace_one "
            "instead",
            DeprecationWarning,
            stacklevel=2)
        common.validate_is_document_type("to_save", to_save)

        write_concern = None
        collation = validate_collation_or_none(kwargs.pop('collation', None))
        if kwargs:
            write_concern = WriteConcern(**kwargs)

        if not (isinstance(to_save, RawBSONDocument) or "_id" in to_save):
            return self._insert(to_save, True, check_keys, manipulate,
                                write_concern)
        else:
            self._update_retryable({"_id": to_save["_id"]},
                                   to_save,
                                   True,
                                   check_keys,
                                   False,
                                   manipulate,
                                   write_concern,
                                   collation=collation)
            return to_save.get("_id")
示例#2
0
 def add_insert(self, document):
     """Add an insert document to the list of ops."""
     validate_is_document_type("document", document)
     # Generate ObjectId client side.
     if not (isinstance(document, RawBSONDocument) or "_id" in document):
         document["_id"] = ObjectId()
     self.ops.append((_INSERT, document))
示例#3
0
 def __init__(self,
              collection,
              ordered,
              bypass_document_validation,
              comment=None,
              let=None):
     """Initialize a _Bulk instance."""
     self.collection = collection.with_options(
         codec_options=collection.codec_options._replace(
             unicode_decode_error_handler="replace", document_class=dict))
     self.let = let
     if self.let is not None:
         common.validate_is_document_type("let", self.let)
     self.comment = comment
     self.ordered = ordered
     self.ops = []
     self.executed = False
     self.bypass_doc_val = bypass_document_validation
     self.uses_collation = False
     self.uses_array_filters = False
     self.uses_hint_update = False
     self.uses_hint_delete = False
     self.is_retryable = True
     self.retrying = False
     self.started_retryable_write = False
     # Extra state so that we know where to pick up on a retry attempt.
     self.current_run = None
     self.next_run = None
示例#4
0
文件: bulk.py 项目: Yucie/Arianrhod
 def add_insert(self, document):
     """Add an insert document to the list of ops.
     """
     validate_is_document_type("document", document)
     # Generate ObjectId client side.
     if not (isinstance(document, RawBSONDocument) or '_id' in document):
         document['_id'] = ObjectId()
     self.ops.append((_INSERT, document))
示例#5
0
 def add_insert(self, document: dict) -> None:
     """Add an insert document to the list of ops.
     """
     validate_is_document_type('document', document)
     # Generate ObjectId client side.
     if '_id' not in document:
         document['_id'] = ObjectId()
     self.ops.append((_INSERT, document))
示例#6
0
 def gen():
     """A generator that validates documents and handles _ids."""
     for document in documents:
         common.validate_is_document_type("document", document)
         if not isinstance(document, RawBSONDocument):
             if "_id" not in document:
                 document["_id"] = ObjectId()
             inserted_ids.append(document["_id"])
         yield (message._INSERT, document)
    def __init__(self,
                 collection,
                 filter=None,
                 projection=None,
                 skip=0,
                 limit=0,
                 no_cursor_timeout=False,
                 cursor_type=CursorType.NON_TAILABLE,
                 sort=None,
                 allow_partial_results=False,
                 oplog_replay=False,
                 batch_size=0,
                 collation=None,
                 hint=None,
                 max_scan=None,
                 max_time_ms=None,
                 max=None,
                 min=None,
                 return_key=None,
                 show_record_id=None,
                 snapshot=None,
                 comment=None,
                 session=None,
                 allow_disk_use=None,
                 let=None):
        """Create a new cursor.

        Should not be called directly by application developers - see
        :meth:`~pymongo.collection.Collection.find` instead.

        .. seealso:: The MongoDB documentation on `cursors <https://dochub.mongodb.org/core/cursors>`_.
        """
        # Initialize all attributes used in __del__ before possibly raising
        # an error to avoid attribute errors during garbage collection.
        self.__collection = collection
        self.__id = None
        self.__exhaust = False
        self.__sock_mgr = None
        self.__killed = False

        if session:
            self.__session = session
            self.__explicit_session = True
        else:
            self.__session = None
            self.__explicit_session = False

        spec = filter
        if spec is None:
            spec = {}

        validate_is_mapping("filter", spec)
        if not isinstance(skip, int):
            raise TypeError("skip must be an instance of int")
        if not isinstance(limit, int):
            raise TypeError("limit must be an instance of int")
        validate_boolean("no_cursor_timeout", no_cursor_timeout)
        if no_cursor_timeout and not self.__explicit_session:
            warnings.warn(
                "use an explicit session with no_cursor_timeout=True "
                "otherwise the cursor may still timeout after "
                "30 minutes, for more info see "
                "https://docs.mongodb.com/v4.4/reference/method/"
                "cursor.noCursorTimeout/"
                "#session-idle-timeout-overrides-nocursortimeout",
                UserWarning,
                stacklevel=2)
        if cursor_type not in (CursorType.NON_TAILABLE, CursorType.TAILABLE,
                               CursorType.TAILABLE_AWAIT, CursorType.EXHAUST):
            raise ValueError("not a valid value for cursor_type")
        validate_boolean("allow_partial_results", allow_partial_results)
        validate_boolean("oplog_replay", oplog_replay)
        if not isinstance(batch_size, int):
            raise TypeError("batch_size must be an integer")
        if batch_size < 0:
            raise ValueError("batch_size must be >= 0")
        # Only set if allow_disk_use is provided by the user, else None.
        if allow_disk_use is not None:
            allow_disk_use = validate_boolean("allow_disk_use", allow_disk_use)

        if projection is not None:
            projection = helpers._fields_list_to_dict(projection, "projection")

        if let:
            validate_is_document_type("let", let)

        self.__let = let
        self.__spec = spec
        self.__projection = projection
        self.__skip = skip
        self.__limit = limit
        self.__batch_size = batch_size
        self.__ordering = sort and helpers._index_document(sort) or None
        self.__max_scan = max_scan
        self.__explain = False
        self.__comment = comment
        self.__max_time_ms = max_time_ms
        self.__max_await_time_ms = None
        self.__max = max
        self.__min = min
        self.__collation = validate_collation_or_none(collation)
        self.__return_key = return_key
        self.__show_record_id = show_record_id
        self.__allow_disk_use = allow_disk_use
        self.__snapshot = snapshot
        self.__set_hint(hint)

        # Exhaust cursor support
        if cursor_type == CursorType.EXHAUST:
            if self.__collection.database.client.is_mongos:
                raise InvalidOperation('Exhaust cursors are '
                                       'not supported by mongos')
            if limit:
                raise InvalidOperation("Can't use limit and exhaust together.")
            self.__exhaust = True

        # This is ugly. People want to be able to do cursor[5:5] and
        # get an empty result set (old behavior was an
        # exception). It's hard to do that right, though, because the
        # server uses limit(0) to mean 'no limit'. So we set __empty
        # in that case and check for it when iterating. We also unset
        # it anytime we change __limit.
        self.__empty = False

        self.__data = deque()
        self.__address = None
        self.__retrieved = 0

        self.__codec_options = collection.codec_options
        # Read preference is set when the initial find is sent.
        self.__read_preference = None
        self.__read_concern = collection.read_concern

        self.__query_flags = cursor_type
        if no_cursor_timeout:
            self.__query_flags |= _QUERY_OPTIONS["no_timeout"]
        if allow_partial_results:
            self.__query_flags |= _QUERY_OPTIONS["partial"]
        if oplog_replay:
            self.__query_flags |= _QUERY_OPTIONS["oplog_replay"]

        # The namespace to use for find/getMore commands.
        self.__dbname = collection.database.name
        self.__collname = collection.name
示例#8
0
    def insert_one(self, document):

        common.validate_is_document_type("document", document)
        if not (isinstance(document, RawBSONDocument) or "_id" in document):
            document["_id"] = ObjectId()