def execute(self): self.select.execute() # This is a little bit more inefficient than just doing a keys_only query and # sending it to delete, but I think this is the sacrifice to make for the unique caching layer keys = [] def spawn_query(kind, key): qry = Query(kind, namespace=key.namespace() or None) # TODO: is the namespace necessary if we're passing the key? qry["__key__ ="] = key return qry queries = [spawn_query(x.key().kind(), x.key()) for x in self.select.results] if not queries: return for entity in QueryByKeys(self.model, queries, [], self.namespace).Run(): keys.append(entity.key()) # Delete constraints if that's enabled if constraints.constraint_checks_enabled(self.model): constraints.release(self.model, entity) caching.remove_entities_from_cache_by_key(keys, self.namespace) datastore.Delete(keys)
def execute(self): self.select.execute() # This is a little bit more inefficient than just doing a keys_only query and # sending it to delete, but I think this is the sacrifice to make for the unique caching layer keys = [] def spawn_query(kind, key): qry = Query(kind) qry["__key__ ="] = key return qry queries = [spawn_query(self.select.db_table, x.key()) for x in self.select.results] if not queries: return for entity in QueryByKeys(self.select.model, queries, []).Run(): keys.append(entity.key()) # Delete constraints if that's enabled if constraints.constraint_checks_enabled(self.select.model): constraints.release(self.select.model, entity) caching.remove_entity_from_cache_by_key(entity.key()) datastore.Delete(keys)
def delete_batch(key_slice): entities = datastore.Get(key_slice) #FIXME: We need to make sure the entity still matches the query! # entities = (x for x in entities if utils.entity_matches_query(x, self.select.gae_query)) to_delete = [] to_update = [] updated_keys = [] # Go through the entities for entity in entities: if entity is None: continue wipe_polymodel_from_entity(entity, self.table_to_delete) if not entity.get('class'): to_delete.append(entity) constraints.release(self.model, entity) else: to_update.append(entity) updated_keys.append(entity.key()) datastore.DeleteAsync([x.key() for x in to_delete]) datastore.PutAsync(to_update) caching.remove_entities_from_cache_by_key(updated_keys, self.namespace) return len(updated_keys)
def delete_batch(key_slice): entities = datastore.Get(key_slice) #FIXME: We need to make sure the entity still matches the query! # entities = (x for x in entities if utils.entity_matches_query(x, self.select.gae_query)) to_delete = [] to_update = [] updated_keys = [] # Go through the entities for entity in entities: if entity is None: continue wipe_polymodel_from_entity(entity, self.table_to_delete) if not entity.get('class'): to_delete.append(entity) constraints.release(self.model, entity) else: to_update.append(entity) updated_keys.append(entity.key()) datastore.DeleteAsync([x.key() for x in to_delete]) datastore.PutAsync(to_update) caching.remove_entities_from_cache_by_key( updated_keys, self.namespace ) return len(updated_keys)
def execute(self): self.select.execute() # This is a little bit more inefficient than just doing a keys_only query and # sending it to delete, but I think this is the sacrifice to make for the unique caching layer keys = [] def spawn_query(kind, key): qry = Query( kind, namespace=key.namespace() or None ) # TODO: is the namespace necessary if we're passing the key? qry["__key__ ="] = key return qry queries = [ spawn_query(x.key().kind(), x.key()) for x in self.select.results ] if not queries: return for entity in QueryByKeys(self.model, queries, [], self.namespace).Run(): keys.append(entity.key()) # Delete constraints if that's enabled if constraints.constraint_checks_enabled(self.model): constraints.release(self.model, entity) caching.remove_entities_from_cache_by_key(keys, self.namespace) datastore.Delete(keys)
def execute(self): self.select.execute() #This is a little bit more inefficient than just doing a keys_only query and #sending it to delete, but I think this is the sacrifice to make for the unique caching layer keys = [] for entity in self.select.results: keys.append(entity.key()) constraints.release(self.select.model, entity) entity_deleted.send(sender=self.select.model, entity=entity) datastore.Delete(keys)
def execute(self): self.select.execute() #This is a little bit more inefficient than just doing a keys_only query and #sending it to delete, but I think this is the sacrifice to make for the unique caching layer keys = [] for entity in QueryByKeys( Query(self.select.model._meta.db_table), [ x.key() for x in self.select.results ], [] ).Run(): keys.append(entity.key()) constraints.release(self.select.model, entity) caching.remove_entity_from_context_cache_by_key(entity.key()) datastore.Delete(keys)
def delete_batch(key_slice): entities = rpc.Get(key_slice) # FIXME: We need to make sure the entity still matches the query! # entities = (x for x in entities if utils.entity_matches_query(x, self.select.gae_query)) to_delete = [] to_update = [] updated_keys = [] # Go through the entities for entity in entities: if entity is None: continue wipe_polymodel_from_entity(entity, self.table_to_delete) if not entity.get('class'): to_delete.append(entity.key()) if constraints_enabled: constraints.release(self.model, entity) else: to_update.append(entity) updated_keys.append(entity.key()) rpc.DeleteAsync(to_delete) rpc.PutAsync(to_update) # Clean up any special index things that need to be cleaned for indexer in indexers_for_model(self.model): for key in to_delete: indexer.cleanup(key) caching.remove_entities_from_cache_by_key( updated_keys, self.namespace ) return len(updated_keys)
def release_constraints(entity): if constraints_enabled: constraints.release(self.model, entity)