def delete(self): if self.included_pks is not None: keys = [key for key in self.included_pks if key is not None] else: keys = self.fetch() if keys: Delete(keys)
def delete(self): if self.pk_filters is not None: keys = [key for key in self.pk_filters if key is not None] else: keys = self.fetch() if keys: Delete(keys)
def delete(self): if self.included_pks is not None: keys = [key for key in self.included_pks if key is not None] else: keys = [ key_dict[self.query.get_meta().pk.column] for key_dict in self.fetch() ] if keys: Delete(keys)
def delete_all_entities(): for namespace in get_namespaces(): set_namespace(namespace) for kind in get_kinds(): if kind.startswith('__'): continue while True: data = Query(kind=kind, keys_only=True).Get(200) if not data: break Delete(data)
def cleanup(cls, datastore_key): # Kindless query, we don't know the kinds because we don't know all the fields # that use contains. But, we do know that all the things we need to delete are: # a.) A descendent # b.) Have a key name of whatever OPERATOR is qry = Query(keys_only=True, namespace=datastore_key.namespace()) qry = qry.Ancestor(datastore_key) # Delete all the entities matching the ancestor query Delete([x for x in qry.Run() if x.name() == cls.OPERATOR])
def delete_all_entities(): from google.appengine.api.datastore import Delete, Query from google.appengine.ext.db.metadata import get_kinds, get_namespaces from google.appengine.api.namespace_manager import set_namespace for namespace in get_namespaces(): set_namespace(namespace) for kind in get_kinds(): if kind.startswith('__'): continue while True: data = Query(kind=kind, keys_only=True).Get(200) if not data: break Delete(data)
def _release_identifiers(identifiers, namespace): keys = [ Key.from_path(UniqueMarker.kind(), x, namespace=namespace) for x in identifiers ] Delete(keys)
def txn(): Delete([marker.key() for marker in markers])
def delete(): keys = [Key.from_path(UniqueMarker.kind(), x) for x in identifiers] Delete(keys)
def delete(marker): Delete(marker.key())
def defer(obj, *args, **kwargs): """ This is a replacement for google.appengine.ext.deferred.defer which doesn't suffer the bug where tasks are deferred non-transactionally when they hit a certain limit. It also *always* uses an entity group, unless you pass _small_task=True in which case it *never* uses an entity group (but you are limited by 100K) """ from google.appengine.ext.deferred.deferred import ( run_from_datastore, serialize, taskqueue, _DeferredTaskEntity, _DEFAULT_URL, _TASKQUEUE_HEADERS, _DEFAULT_QUEUE ) KWARGS = { "countdown", "eta", "name", "target", "retry_options" } taskargs = {x: kwargs.pop(("_%s" % x), None) for x in KWARGS} taskargs["url"] = kwargs.pop("_url", _DEFAULT_URL) transactional = kwargs.pop("_transactional", False) small_task = kwargs.pop("_small_task", False) wipe_related_caches = kwargs.pop("_wipe_related_caches", True) taskargs["headers"] = dict(_TASKQUEUE_HEADERS) taskargs["headers"].update(kwargs.pop("_headers", {})) queue = kwargs.pop("_queue", _DEFAULT_QUEUE) if wipe_related_caches: args = list(args) _wipe_caches(args, kwargs) args = tuple(args) pickled = serialize(obj, *args, **kwargs) key = None try: # Always use an entity group unless this has been # explicitly marked as a small task if not small_task: key = _DeferredTaskEntity(data=pickled).put() # Defer the task task = taskqueue.Task(payload=pickled, **taskargs) ret = task.add(queue, transactional=transactional) # Delete the key as it wasn't needed if key: Delete(key) return ret except taskqueue.TaskTooLargeError: if small_task: raise pickled = serialize(run_from_datastore, str(key)) task = taskqueue.Task(payload=pickled, **taskargs) # This is the line that fixes a bug in the SDK. The SDK # code doesn't pass transactional here. return task.add(queue, transactional=transactional) except: # noqa # Any other exception? Delete the key if key: Delete(key) raise
def _map_entity(self, entity): try: Delete(entity.key()) except datastore_errors.EntityNotFoundError: return