def save(self, commit=True): """If commit is True the embedded document is added to the parent document. Otherwise the parent_document is left untouched and the embedded is returned as usual. """ if self.errors: raise ValueError("The %s could not be saved because the data didn't" " validate." % self.instance.__class__.__name__) if commit: field = self.parent_document._fields.get(self._meta.embedded_field) if isinstance(field, ListField) and self.position is None: # no position given, simply appending to ListField try: self.parent_document.update(**{"push__" + self._meta.embedded_field: self.instance}) except: raise OperationError("The %s could not be appended." % self.instance.__class__.__name__) elif isinstance(field, ListField) and self.position is not None: # updating ListField at given position try: self.parent_document.update(**{"__".join(("set", self._meta.embedded_field, str(self.position))): self.instance}) except: raise OperationError("The %s could not be updated at position " "%d." % (self.instance.__class__.__name__, self.position)) else: # not a listfield on parent, treat as an embedded field setattr(self.parent_document, self._meta.embedded_field, self.instance) self.parent_document.save() return self.instance
def to_dbref(self): """Returns an instance of :class:`~bson.dbref.DBRef` useful in `__raw__` queries.""" if self.pk is None: msg = 'Only saved documents can have a valid dbref' raise OperationError(msg) return DBRef(self.__class__._get_collection_name(), self.pk)
def delete(self, signal_kwargs=None, **write_concern): """Delete the :class:`~mongoengine.Document` from the database. This will only take effect if the document has been previously saved. :param signal_kwargs: (optional) kwargs dictionary to be passed to the signal calls. :param write_concern: Extra keyword arguments are passed down which will be used as options for the resultant ``getLastError`` command. For example, ``save(..., w: 2, fsync: True)`` will wait until at least two servers have recorded the write and will force an fsync on the primary server. .. versionchanged:: 0.10.7 Add signal_kwargs argument """ signal_kwargs = signal_kwargs or {} signals.pre_delete.send(self.__class__, document=self, **signal_kwargs) # Delete FileFields separately FileField = _import_class('FileField') for name, field in self._fields.iteritems(): if isinstance(field, FileField): getattr(self, name).delete() try: self._qs.filter(**self._object_key).delete( write_concern=write_concern, _from_doc_delete=True) except pymongo.errors.OperationFailure as err: message = u'Could not delete document (%s)' % err.message raise OperationError(message) signals.post_delete.send(self.__class__, document=self, **signal_kwargs)
def update(self, **kwargs): """Performs an update on the :class:`~mongoengine.Document` A convenience wrapper to :meth:`~mongoengine.QuerySet.update`. Raises :class:`OperationError` if called on an object that has not yet been saved. """ if not self.pk: raise OperationError('attempt to update a document not yet saved') # Need to add shard key to query, or you get an error return self._qs.filter(**self._object_key).update_one(**kwargs)
def drop_collection(cls): """Drops the entire collection associated with this :class:`~mongoengine.Document` type from the database. Raises :class:`OperationError` if the document has no collection set (i.g. if it is `abstract`) """ coll_name = cls._get_collection_name() if not coll_name: raise OperationError( "Document %s has no collection defined (is it abstract ?)" % cls) cls._collection = None db = cls._get_db() db.drop_collection(coll_name)
def drop_collection(cls): """Drops the entire collection associated with this :class:`~mongoengine.Document` type from the database. Raises :class:`OperationError` if the document has no collection set (i.g. if it is `abstract`) .. versionchanged:: 0.10.7 :class:`OperationError` exception raised if no collection available """ col_name = cls._get_collection_name() if not col_name: raise OperationError('Document %s has no collection defined ' '(is it abstract ?)' % cls) cls._collection = None db = cls._get_db() db.drop_collection(col_name)
def update(self, **kwargs): """Performs an update on the :class:`~mongoengine.Document` A convenience wrapper to :meth:`~mongoengine.QuerySet.update`. Raises :class:`OperationError` if called on an object that has not yet been saved. """ if self.pk is None: if kwargs.get("upsert", False): query = self.to_mongo() if "_cls" in query: del query["_cls"] return self._qs.filter(**query).update_one(**kwargs) else: raise OperationError("attempt to update a document not yet saved") # Need to add shard key to query, or you get an error return self._qs.filter(**self._object_key).update_one(**kwargs)
def delete(self, **write_concern): """Delete the :class:`~mongoengine.Document` from the database. This will only take effect if the document has been previously saved. :param write_concern: Extra keyword arguments are passed down which will be used as options for the resultant ``getLastError`` command. For example, ``save(..., write_concern={w: 2, fsync: True}, ...)`` will wait until at least two servers have recorded the write and will force an fsync on the primary server. """ signals.pre_delete.send(self.__class__, document=self) try: self._qs.filter(**self._object_key).delete( write_concern=write_concern, _from_doc_delete=True) except pymongo.errors.OperationFailure as err: message = 'Could not delete document (%s)' % err.message raise OperationError(message) signals.post_delete.send(self.__class__, document=self)
def reload(self, max_depth=1): """Reloads all attributes from the database. .. versionadded:: 0.1.2 .. versionchanged:: 0.6 Now chainable """ id_field = self._meta['id_field'] obj = self._qs.filter(**{ id_field: self[id_field] }).limit(1).select_related(max_depth=max_depth) if obj: obj = obj[0] else: msg = "Reloaded document has been deleted" raise OperationError(msg) for field in self._fields: setattr(self, field, self._reload(field, obj[field])) if self._dynamic: for name in list(self._dynamic_fields.keys()): setattr(self, name, self._reload(name, obj._data[name])) self._changed_fields = obj._changed_fields self._created = False return obj
def save(self, force_insert=False, validate=True, clean=True, write_concern=None, cascade=None, cascade_kwargs=None, _refs=None, save_condition=None, signal_kwargs=None, **kwargs): """Save the :class:`~mongoengine.Document` to the database. If the document already exists, it will be updated, otherwise it will be created. :param force_insert: only try to create a new document, don't allow updates of existing documents. :param validate: validates the document; set to ``False`` to skip. :param clean: call the document clean method, requires `validate` to be True. :param write_concern: Extra keyword arguments are passed down to :meth:`~pymongo.collection.Collection.save` OR :meth:`~pymongo.collection.Collection.insert` which will be used as options for the resultant ``getLastError`` command. For example, ``save(..., write_concern={w: 2, fsync: True}, ...)`` will wait until at least two servers have recorded the write and will force an fsync on the primary server. :param cascade: Sets the flag for cascading saves. You can set a default by setting "cascade" in the document __meta__ :param cascade_kwargs: (optional) kwargs dictionary to be passed throw to cascading saves. Implies ``cascade=True``. :param _refs: A list of processed references used in cascading saves :param save_condition: only perform save if matching record in db satisfies condition(s) (e.g. version number). Raises :class:`OperationError` if the conditions are not satisfied :param signal_kwargs: (optional) kwargs dictionary to be passed to the signal calls. .. versionchanged:: 0.5 In existing documents it only saves changed fields using set / unset. Saves are cascaded and any :class:`~bson.dbref.DBRef` objects that have changes are saved as well. .. versionchanged:: 0.6 Added cascading saves .. versionchanged:: 0.8 Cascade saves are optional and default to False. If you want fine grain control then you can turn off using document meta['cascade'] = True. Also you can pass different kwargs to the cascade save using cascade_kwargs which overwrites the existing kwargs with custom values. .. versionchanged:: 0.8.5 Optional save_condition that only overwrites existing documents if the condition is satisfied in the current db record. .. versionchanged:: 0.10 :class:`OperationError` exception raised if save_condition fails. .. versionchanged:: 0.10.1 :class: save_condition failure now raises a `SaveConditionError` .. versionchanged:: 0.10.7 Add signal_kwargs argument """ if self._meta.get('abstract'): raise InvalidDocumentError('Cannot save an abstract document.') signal_kwargs = signal_kwargs or {} signals.pre_save.send(self.__class__, document=self, **signal_kwargs) if validate: self.validate(clean=clean) if write_concern is None: write_concern = {'w': 1} doc = self.to_mongo() created = ('_id' not in doc or self._created or force_insert) signals.pre_save_post_validation.send(self.__class__, document=self, created=created, **signal_kwargs) # it might be refreshed by the pre_save_post_validation hook, e.g., for etag generation doc = self.to_mongo() if self._meta.get('auto_create_index', True): self.ensure_indexes() try: # Save a new document or update an existing one if created: object_id = self._save_create(doc, force_insert, write_concern) else: object_id, created = self._save_update(doc, save_condition, write_concern) if cascade is None: cascade = (self._meta.get('cascade', False) or cascade_kwargs is not None) if cascade: kwargs = { 'force_insert': force_insert, 'validate': validate, 'write_concern': write_concern, 'cascade': cascade } if cascade_kwargs: # Allow granular control over cascades kwargs.update(cascade_kwargs) kwargs['_refs'] = _refs self.cascade_save(**kwargs) except pymongo.errors.DuplicateKeyError as err: message = u'Tried to save duplicate unique keys (%s)' raise NotUniqueError(message % six.text_type(err)) except pymongo.errors.OperationFailure as err: message = 'Could not save document (%s)' if re.match('^E1100[01] duplicate key', six.text_type(err)): # E11000 - duplicate key error index # E11001 - duplicate key on update message = u'Tried to save duplicate unique keys (%s)' raise NotUniqueError(message % six.text_type(err)) raise OperationError(message % six.text_type(err)) # Make sure we store the PK on this document now that it's saved id_field = self._meta['id_field'] if created or id_field not in self._meta.get('shard_key', []): self[id_field] = self._fields[id_field].to_python(object_id) signals.post_save.send(self.__class__, document=self, created=created, **signal_kwargs) self._clear_changed_fields() self._created = False return self
def save(self, force_insert=False, validate=True, clean=True, write_concern=None, cascade=None, cascade_kwargs=None, _refs=None, save_condition=None, signal_kwargs=None, **kwargs): """Save the :class:`~mongoengine.Document` to the database. If the document already exists, it will be updated, otherwise it will be created. :param force_insert: only try to create a new document, don't allow updates of existing documents :param validate: validates the document; set to ``False`` to skip. :param clean: call the document clean method, requires `validate` to be True. :param write_concern: Extra keyword arguments are passed down to :meth:`~pymongo.collection.Collection.save` OR :meth:`~pymongo.collection.Collection.insert` which will be used as options for the resultant ``getLastError`` command. For example, ``save(..., write_concern={w: 2, fsync: True}, ...)`` will wait until at least two servers have recorded the write and will force an fsync on the primary server. :param cascade: Sets the flag for cascading saves. You can set a default by setting "cascade" in the document __meta__ :param cascade_kwargs: (optional) kwargs dictionary to be passed throw to cascading saves. Implies ``cascade=True``. :param _refs: A list of processed references used in cascading saves :param save_condition: only perform save if matching record in db satisfies condition(s) (e.g. version number). Raises :class:`OperationError` if the conditions are not satisfied :parm signal_kwargs: (optional) kwargs dictionary to be passed to the signal calls. .. versionchanged:: 0.5 In existing documents it only saves changed fields using set / unset. Saves are cascaded and any :class:`~bson.dbref.DBRef` objects that have changes are saved as well. .. versionchanged:: 0.6 Added cascading saves .. versionchanged:: 0.8 Cascade saves are optional and default to False. If you want fine grain control then you can turn off using document meta['cascade'] = True. Also you can pass different kwargs to the cascade save using cascade_kwargs which overwrites the existing kwargs with custom values. .. versionchanged:: 0.8.5 Optional save_condition that only overwrites existing documents if the condition is satisfied in the current db record. .. versionchanged:: 0.10 :class:`OperationError` exception raised if save_condition fails. .. versionchanged:: 0.10.1 :class: save_condition failure now raises a `SaveConditionError` .. versionchanged:: 0.10.7 Add signal_kwargs argument """ signal_kwargs = signal_kwargs or {} signals.pre_save.send(self.__class__, document=self, **signal_kwargs) if validate: self.validate(clean=clean) if write_concern is None: write_concern = {'w': 1} doc = self.to_mongo() created = ('_id' not in doc or self._created or force_insert) signals.pre_save_post_validation.send(self.__class__, document=self, created=created, **signal_kwargs) try: collection = self._get_collection() if self._meta.get('auto_create_index', True): self.ensure_indexes() if created: if force_insert: object_id = collection.insert(doc, **write_concern) else: object_id = collection.save(doc, **write_concern) # In PyMongo 3.0, the save() call calls internally the _update() call # but they forget to return the _id value passed back, therefore getting it back here # Correct behaviour in 2.X and in 3.0.1+ versions if not object_id and pymongo.version_tuple == (3, 0): pk_as_mongo_obj = self._fields.get(self._meta['id_field']).to_mongo(self.pk) object_id = ( self._qs.filter(pk=pk_as_mongo_obj).first() and self._qs.filter(pk=pk_as_mongo_obj).first().pk ) # TODO doesn't this make 2 queries? else: object_id = doc['_id'] updates, removals = self._delta() # Need to add shard key to query, or you get an error if save_condition is not None: select_dict = transform.query(self.__class__, **save_condition) else: select_dict = {} select_dict['_id'] = object_id shard_key = self._meta.get('shard_key', tuple()) for k in shard_key: path = self._lookup_field(k.split('.')) actual_key = [p.db_field for p in path] val = doc for ak in actual_key: val = val[ak] select_dict['.'.join(actual_key)] = val def is_new_object(last_error): if last_error is not None: updated = last_error.get('updatedExisting') if updated is not None: return not updated return created update_query = {} if updates: update_query['$set'] = updates if removals: update_query['$unset'] = removals if updates or removals: upsert = save_condition is None last_error = collection.update(select_dict, update_query, upsert=upsert, **write_concern) if not upsert and last_error['n'] == 0: raise SaveConditionError('Race condition preventing' ' document update detected') created = is_new_object(last_error) if cascade is None: cascade = self._meta.get( 'cascade', False) or cascade_kwargs is not None if cascade: kwargs = { 'force_insert': force_insert, 'validate': validate, 'write_concern': write_concern, 'cascade': cascade } if cascade_kwargs: # Allow granular control over cascades kwargs.update(cascade_kwargs) kwargs['_refs'] = _refs self.cascade_save(**kwargs) except pymongo.errors.DuplicateKeyError as err: message = 'Tried to save duplicate unique keys (%s)' raise NotUniqueError(message % six.text_type(err)) except pymongo.errors.OperationFailure as err: message = 'Could not save document (%s)' if re.match('^E1100[01] duplicate key', six.text_type(err)): # E11000 - duplicate key error index # E11001 - duplicate key on update message = 'Tried to save duplicate unique keys (%s)' raise NotUniqueError(message % six.text_type(err)) raise OperationError(message % six.text_type(err)) id_field = self._meta['id_field'] if created or id_field not in self._meta.get('shard_key', []): self[id_field] = self._fields[id_field].to_python(object_id) signals.post_save.send(self.__class__, document=self, created=created, **signal_kwargs) self._clear_changed_fields() self._created = False return self
def save(self, force_insert=False, validate=True, clean=True, write_concern=None, cascade=None, cascade_kwargs=None, _refs=None, **kwargs): """Save the :class:`~mongoengine.Document` to the database. If the document already exists, it will be updated, otherwise it will be created. :param force_insert: only try to create a new document, don't allow updates of existing documents :param validate: validates the document; set to ``False`` to skip. :param clean: call the document clean method, requires `validate` to be True. :param write_concern: Extra keyword arguments are passed down to :meth:`~pymongo.collection.Collection.save` OR :meth:`~pymongo.collection.Collection.insert` which will be used as options for the resultant ``getLastError`` command. For example, ``save(..., write_concern={w: 2, fsync: True}, ...)`` will wait until at least two servers have recorded the write and will force an fsync on the primary server. :param cascade: Sets the flag for cascading saves. You can set a default by setting "cascade" in the document __meta__ :param cascade_kwargs: (optional) kwargs dictionary to be passed throw to cascading saves. Implies ``cascade=True``. :param _refs: A list of processed references used in cascading saves .. versionchanged:: 0.5 In existing documents it only saves changed fields using set / unset. Saves are cascaded and any :class:`~bson.dbref.DBRef` objects that have changes are saved as well. .. versionchanged:: 0.6 Added cascading saves .. versionchanged:: 0.8 Cascade saves are optional and default to False. If you want fine grain control then you can turn off using document meta['cascade'] = True. Also you can pass different kwargs to the cascade save using cascade_kwargs which overwrites the existing kwargs with custom values. """ signals.pre_save.send(self.__class__, document=self) if validate: self.validate(clean=clean) if write_concern is None: write_concern = {"w": 1} doc = self.to_mongo() created = ('_id' not in doc or self._created or force_insert) signals.pre_save_post_validation.send(self.__class__, document=self, created=created) try: collection = self._get_collection() if created: if force_insert: object_id = collection.insert(doc, **write_concern) else: object_id = collection.save(doc, **write_concern) else: object_id = doc['_id'] updates, removals = self._delta() # Need to add shard key to query, or you get an error select_dict = {'_id': object_id} shard_key = self.__class__._meta.get('shard_key', tuple()) for k in shard_key: actual_key = self._db_field_map.get(k, k) select_dict[actual_key] = doc[actual_key] def is_new_object(last_error): if last_error is not None: updated = last_error.get("updatedExisting") if updated is not None: return not updated return created update_query = {} if updates: update_query["$set"] = updates if removals: update_query["$unset"] = removals if updates or removals: last_error = collection.update(select_dict, update_query, upsert=True, **write_concern) created = is_new_object(last_error) if cascade is None: cascade = self._meta.get('cascade', False) or cascade_kwargs is not None if cascade: kwargs = { "force_insert": force_insert, "validate": validate, "write_concern": write_concern, "cascade": cascade } if cascade_kwargs: # Allow granular control over cascades kwargs.update(cascade_kwargs) kwargs['_refs'] = _refs self.cascade_save(**kwargs) except pymongo.errors.DuplicateKeyError as err: message = 'Tried to save duplicate unique keys (%s)' raise NotUniqueError(message % str(err)) except pymongo.errors.OperationFailure as err: message = 'Could not save document (%s)' if re.match('^E1100[01] duplicate key', str(err)): # E11000 - duplicate key error index # E11001 - duplicate key on update message = 'Tried to save duplicate unique keys (%s)' raise NotUniqueError(message % str(err)) raise OperationError(message % str(err)) id_field = self._meta['id_field'] if id_field not in self._meta.get('shard_key', []): self[id_field] = self._fields[id_field].to_python(object_id) self._clear_changed_fields() self._created = False signals.post_save.send(self.__class__, document=self, created=created) return self