def get(self): cacheclient = memcache.Client() future = getAlertKeySetAndFetchAlertsAtom(cacheclient) oldkeys, xml = future.get_result() parser = capparser.Parser() parser.alertFactory = lambda: models.Alert() def setAlertId(alert, id): alert.key = ndb.Key(models.Alert, id) parser.setAlertId = setAlertId parser.geoPtFactory = models.GeoPt counter = [0, 0] curkeys = set() def onAlertCreated(alert): curkeys.add(alert.key) if alert.key in oldkeys: oldkeys.remove(alert.key) counter[0] += 1 else: alert.put_async() counter[1] += 1 parser.onAlertCreated = onAlertCreated parser.parse(xml) ndb.delete_multi_async(oldkeys) self.response.write("Dupes ignored %d, put %d, deleted %d<br>\r\n" % (counter[0], counter[1], len(oldkeys))) cacheclient.set('alertkeyset', curkeys)
def post(self): user_key = users.user_key(self.user, create=False) request = json.loads(self.request.body) volume_ids = request['volumes'] logging.info('Removing subscriptions: %r', volume_ids); results = defaultdict(list) keys = [ subscriptions.subscription_key( volume_id, user=user_key, create=False ) for volume_id in volume_ids ] # prefetch for efficiency ndb.get_multi(keys) candidates = [] for key in keys: subscription = key.get() if subscription: candidates.append(key) else: results['skipped'].append(key.id()) logging.info('%d candidates, %d volumes', len(candidates), len(volume_ids)) # prefetch for efficiency ndb.delete_multi_async(candidates) response = { 'status': 200, 'message': 'removed %d subscriptions' % len(candidates), 'results': [key.id() for key in candidates], } self.response.write(json.dumps(response))
def dispatch(self): self.__scope = None auth_token = self.request.cookies.get('auth_token') if users.is_current_user_admin(): self.__locked = False if self.request.get('logg_ut') == 'true': self.redirect(users.create_logout_url('/prioriter')) else: self.__scope = TravelGrantsApplication.query( TravelGrantsApplication.application_year == myapp.APPLICATION_YEAR) elif auth_token: self.__locked = myapp.time_locked(11, 29, 5) auth_token = SHA256.new(auth_token).hexdigest() if self.request.get('logg_ut') == 'true': ndb.delete_multi_async( Otp.query( ndb.OR( Otp.token == auth_token, Otp.valid_until < datetime.datetime.now())).fetch( options=ndb.QueryOptions(keys_only=True))) self.response.delete_cookie('auth_token') self.redirect('/prioriter') else: otp = get_otp_by_token(auth_token) if otp: self.__scope = TravelGrantsApplication.query( ndb.AND( TravelGrantsApplication.learning_association == otp.learning_association, TravelGrantsApplication.application_year == myapp.APPLICATION_YEAR)) otp.put() # Refresh expiration super(PrioritizeHandler, self).dispatch()
def delete_for_feed(cls, feed): more = True cursor = None while more: entries, cursor, more = yield cls.latest_for_feed(feed).fetch_page_async(25, start_cursor=cursor) entries_keys = [x.key for x in entries] ndb.delete_multi_async(entries_keys)
def post(self,oid): '''Delete an obituary ''' oid = int(oid) assert oid, 'oid required' results = [] # create key for the obituary # obit_key = ndb.Key(models.Obituary,oid) obit = models.Obituary.get_by_id(oid) # find all references to the obituary and delete them too bookmark_keys = models.Bookmark.query(models.Bookmark.obit_key == obit.key).iter(keys_only=True) results.extend(ndb.delete_multi_async(bookmark_keys)) results.extend(ndb.delete_multi_async(obit.narrative_keys)) results.extend(ndb.delete_multi_async(obit.message_keys)) # grab the attached photo references and delete them along with the referenced blob keys photos = obit.photos logging.info(type(photos)) blob_keys = [] photo_keys = [] for photo in photos: logging.info(photo) blob_keys.append(photo.img_key) photo_keys.append(photo.key) # delete the photo entities results.extend(ndb.delete_multi_async(photo_keys)) # delete the blobs blobstore.delete(blob_keys) # delete the obit obit.key.delete() # get all the results r = [f.get_result() for f in results]
def delete_for_feed(cls, feed): more = True cursor = None while more: entries, cursor, more = yield cls.latest_for_feed( feed).fetch_page_async(25, start_cursor=cursor) entries_keys = [x.key for x in entries] ndb.delete_multi_async(entries_keys)
def clean_unborn(): """Removes asynchronously those tickets that technically were created but never modified for the first time.""" # Makes me sick, but ndb only understands == True or == False unborn_ticket_keys = Ticket.query(Ticket.born == False).fetch( keys_only=True) ndb.delete_multi_async(unborn_ticket_keys)
def delete_game(self, game): tokens = Token.query(Token.game == game.key) keys = [token.key for token in tokens] votes = Vote.query(Vote.game == game.key) keys.extend([vote.key for vote in votes]) votes = SelfVote.query(SelfVote.game == game.key) keys.extend([vote.key for vote in votes]) ndb.delete_multi_async(keys) game.key.delete()
def delete(self): delete = [] q = Asset.query(ancestor=self.key) for asset in q.fetch(): asset.delete_asset() delete.append(asset.key) ndb.delete_multi_async(delete) return super(AssetedModel, self).delete()
def delete_all(cls, user_key): """ Delete all of the notes that are associated with ``user_key``. :param user_key: Key of the :class:`google.appengine.api.users.User` for which to delete the notes. :type user_key: :class:`google.appengine.ext.db.Key` """ keys = cls._get(user_key).iter(keys_only=True) ndb.delete_multi_async(keys)
def PutPlayers(self, player_list, old_no_pairs): ''' Create a or update PlayerPair Entities corresponding to each player pair in this tournament (1 ... no_pairs). This method saves any useful information from player_list and puts it into Datastore as a child of this Tournament. If the no_players has changed, generates a unique (for this tournament) id associated with each new pair, but keeps existing pair coedes the same. Playerpairs are put asynchronously so a caller of this method should be decorated with @ndb.toplevel. Args: player_list: list of dicts with keys pair_no (req), name (opt), and email (opt) Must have same length as current number of pairs. old_no_pairs: the total number of pairs this tournament used to have. ''' pair_dict = {} if player_list: for player in player_list: pair_no = player['pair_no'] del player['pair_no'] if pair_dict.get(pair_no): pair_dict[pair_no].append(player) else: pair_dict[pair_no] = [player] existing_player_futures = self.GetAllPlayerPairsAsync(old_no_pairs) # If the number of players doesn't change, we just override some fields # in existing pairs. Otherwise, we delete existing pairs and create new # ones. if (self.no_pairs > old_no_pairs): random_ids = self._RandomId(self.no_pairs - old_no_pairs) elif (self.no_pairs < old_no_pairs): ndb.delete_multi_async([ existing_player_futures[i].get_result().key for i in xrange(self.no_pairs, old_no_pairs) ]) # The create a PlayerPair and put it into Datastore for each possible # number. Use reversed to start with new players and give futures more # time to return. for i in reversed(range(1, self.no_pairs + 1)): pair_members = pair_dict.get(i) str_pair_members = json.dumps(pair_members) if pair_members else '' if i <= old_no_pairs: player_pair = existing_player_futures[i - 1].get_result() player_pair.players = str_pair_members else: player_pair = PlayerPair(players=str_pair_members, pair_no=i, id=random_ids[i - 1 - old_no_pairs], parent=self.key) player_pair.key = PlayerPair.CreateKey(self, i) player_pair.put_async()
def cleanup_and_get_new_connection_token(cls, user_id): # note: ensure user is logged in and has display name before calling this function if user_id: # delete any existing connect token for the user ndb.delete_multi_async(cls.fetch_by_user(user_id)) # create a new token and return it token = enki.libutil.generate_connect_code() entity = cls(token=token, user_id=int(user_id)) entity.put() return token return None
def cleanup_and_get_new_connection_token( user_id ): # note: ensure user is logged in and has display name before calling this function if user_id: # delete any existing connect token for the user ndb.delete_multi_async( fetch_EnkiModelRestAPIConnectToken_by_user( user_id )) # create a new token and return it token = generate_connect_code() entity = EnkiModelRestAPIConnectToken( token = token, user_id = int( user_id )) entity.put() return token return None
def delete(self): delete = [] key = self.key # Collect items for deletion delete += Subscription.query(ancestor=key).fetch(keys_only=True) delete += Notification.query(ancestor=key).fetch(keys_only=True) delete += Saved.query(ancestor=key).fetch(keys_only=True) ndb.delete_multi_async(delete) super(User, self).delete()
def _DeleteTestData(test_key): logging.info('DELETING TEST DATA FOR %s', utils.TestPath(test_key)) futures = [] num_tests_processed = 0 finished = True descendants = list_tests.GetTestDescendants(test_key) for descendant in descendants: rows = graph_data.GetLatestRowsForTest(descendant, _ROWS_TO_DELETE_AT_ONCE, keys_only=True) if rows: futures.extend(ndb.delete_multi_async(rows)) finished = False num_tests_processed += 1 if num_tests_processed > _MAX_DELETIONS_PER_TASK: break # Only delete TestMetadata entities after all Row entities have been deleted. if finished: descendants = ndb.get_multi(descendants) for descendant in descendants: _SendNotificationEmail(descendant) futures.append(descendant.key.delete_async()) ndb.Future.wait_all(futures) return finished
def Reset(self): """Resets all policy (i.e. votes, rules, score) for the target blockable. Raises: BlockableNotFound: The target blockable ID is not a known Blockable. """ logging.info('Resetting blockable: %s', self.blockable_id) self.blockable = base.Blockable.get_by_id(self.blockable_id) votes = self.blockable.GetVotes() # Delete existing votes. delete_futures = ndb.delete_multi_async(vote.key for vote in votes) # Store old vote entities with a different key indicating that they are # deactivated so they won't be counted towards the blockable's score. archived_votes = votes for vote in archived_votes: vote.key = base.Vote.GetKey( vote.blockable_key, vote.user_key, in_effect=False) ndb.put_multi_async(archived_votes) # Disable all existing rules. existing_rules = self.blockable.GetRules() for rule in existing_rules: rule.MarkDisabled() ndb.put_multi_async(existing_rules) # Create REMOVE-type rules from the existing blockable rules. self._GenerateRemoveRules(existing_rules) # Ensure past votes are deleted and then reset the blockable score. ndb.Future.wait_all(delete_futures) self.blockable.ResetState()
def _batch_write(self): """Writes updates and deletes entities in a batch.""" logging.debug("Batch writing") if self.to_put and len(self.to_put) > 0: put_rpcs = ndb.put_multi_async(self.to_put) else: put_rpcs = None if self.to_delete and len(self.to_delete) > 0: delete_rpcs = ndb.delete_multi_async(self.to_delete) else: delete_rpcs = None if self.blobstore_to_delete and len(self.blobstore_to_delete) > 0: blobstore_delete_rpc = blobstore.delete_async(self.blobstore_to_delete) else: blobstore_delete_rpc = None if put_rpcs: for put_rpc in put_rpcs: put_rpc.wait() self.to_put = [] if delete_rpcs: for delete_rpc in delete_rpcs: delete_rpc.wait() self.to_delete = [] if blobstore_delete_rpc: blobstore_delete_rpc.wait() self.blobstore_to_delete = []
def _DeleteTestData(test_key, notify): futures = [] num_tests_processed = 0 more = False descendants = list_tests.GetTestDescendants(test_key) for descendant in descendants: rows = graph_data.GetLatestRowsForTest( descendant, _ROWS_TO_DELETE_AT_ONCE, keys_only=True) if rows: futures.extend(ndb.delete_multi_async(rows)) more = True num_tests_processed += 1 if num_tests_processed > _MAX_DELETIONS_PER_TASK: break if not more: more = _DeleteTestHistogramData(descendant) # Only delete TestMetadata entities after all Row entities have been deleted. if not more: descendants = ndb.get_multi(descendants) for descendant in descendants: _SendNotificationEmail(descendant, notify) futures.append(descendant.key.delete_async()) ndb.Future.wait_all(futures) return not more
def _DeleteHistogramClassData(cls): query = cls.query(cls.test == test_key) keys = yield query.fetch_async( limit=_MAX_DELETIONS_PER_TASK, use_cache=False, use_memcache=False, keys_only=True) yield ndb.delete_multi_async(keys) raise ndb.Return(bool(keys))
def _assign_task_num(time_fn=datetime.datetime.utcnow): expired_keys = [] unassigned = [] used_task_nums = [] time_now = time_fn() expired_time = time_now - datetime.timedelta( seconds=shared.INSTANCE_EXPIRE_SEC) for entity in shared.Instance.query(): # Don't reassign expired task_num right away to avoid races. if entity.task_num >= 0: used_task_nums.append(entity.task_num) # At the same time, don't assign task_num to expired entities. if entity.last_updated < expired_time: expired_keys.append(entity.key) shared.expired_counter.increment() logging.debug( 'Expiring %s task_num %d, inactive for %s', entity.key.id(), entity.task_num, time_now - entity.last_updated) elif entity.task_num < 0: shared.started_counter.increment() unassigned.append(entity) logging.debug('Found %d expired and %d unassigned instances', len(expired_keys), len(unassigned)) used_task_nums = sorted(used_task_nums) for entity, task_num in zip(unassigned, find_gaps(used_task_nums)): entity.task_num = task_num logging.debug('Assigned %s task_num %d', entity.key.id(), task_num) futures_unassigned = ndb.put_multi_async(unassigned) futures_expired = ndb.delete_multi_async(expired_keys) ndb.Future.wait_all(futures_unassigned + futures_expired) logging.debug('Committed all changes')
def update_auth_db(): # AuthDB changed since 'new_auth_db_snapshot' transaction? Back off. state = model.get_replication_state() if state.auth_db_rev != current_state.auth_db_rev: return False, state # Update auth_db_rev in AuthReplicationState. state.auth_db_rev = auth_db_rev state.modified_ts = modified_ts # Apply changes. futures = [] futures.extend(ndb.put_multi_async([state] + entites_to_put)) futures.extend(ndb.delete_multi_async(keys_to_delete)) # Wait for all pending futures to complete. Aborting the transaction with # outstanding futures is a bad idea (ndb complains in log about that). ndb.Future.wait_all(futures) # Raise an exception, if any. for future in futures: future.check_success() # Success. return True, state
def delete_all_data(): """Deletes all timezones from datastore. """ # Get a list of all the timezones in the datastore. tz_all = get_all_timezones() futures = ndb.delete_multi_async([ndb.Key(TimeZoneData, x) for x in tz_all], use_memcache=False, use_cache=False) TimeZoneData.remove_from_cache(tz_all) # Delete the SpytzData timezone information. spytz_data = SpytzData.get_spytz_data() if spytz_data: spytz_data.all_tz = [] spytz_data.put(use_memcache=False, use_cache=False) # And delete the SpytzData memcache object. memcache.delete(MC_ALLTZS, namespace=MC_NAMESPACE) ndb.Future.wait_all(futures) # Return the list of timezones deleted. return tz_all
def deleteFromDatastore(self, q_futures, woeid, timestamp): logging.info("Deleting from datastore") q_futures.extend( ndb.delete_multi_async( TrendWindow.query( TrendWindow.timestamp < timestamp, TrendWindow.woeid == woeid).fetch(keys_only=True)))
def _assign_task_num(time_fn=datetime.datetime.utcnow): expired_keys = [] unassigned = [] used_task_nums = [] time_now = time_fn() expired_time = time_now - datetime.timedelta( seconds=shared.INSTANCE_EXPIRE_SEC) for entity in shared.Instance.query(): # Don't reassign expired task_num right away to avoid races. if entity.task_num >= 0: used_task_nums.append(entity.task_num) # At the same time, don't assign task_num to expired entities. if entity.last_updated < expired_time: expired_keys.append(entity.key) shared.expired_counter.increment() logging.debug('Expiring %s task_num %d, inactive for %s', entity.key.id(), entity.task_num, time_now - entity.last_updated) elif entity.task_num < 0: shared.started_counter.increment() unassigned.append(entity) logging.debug('Found %d expired and %d unassigned instances', len(expired_keys), len(unassigned)) used_task_nums = sorted(used_task_nums) for entity, task_num in zip(unassigned, find_gaps(used_task_nums)): entity.task_num = task_num logging.debug('Assigned %s task_num %d', entity.key.id(), task_num) futures_unassigned = ndb.put_multi_async(unassigned) futures_expired = ndb.delete_multi_async(expired_keys) ndb.Future.wait_all(futures_unassigned + futures_expired) logging.debug('Committed all changes')
def txn(): first_batch_call = not all([hasattr(self, "_nodes_to_put"), hasattr(self, "_indices_to_put"), hasattr(self, "_identifier_cache"), hasattr(self, "_keys_to_delete")]) if first_batch_call: self._nodes_to_put = dict() self._indices_to_put = dict() self._identifier_cache = dict() self._keys_to_delete = set() try: results = func() if first_batch_call and any([self._nodes_to_put, self._indices_to_put, self._keys_to_delete]): futures = ndb.delete_multi_async(self._keys_to_delete) ndb.put_multi(chain(self._nodes_to_put.itervalues(), self._indices_to_put.itervalues())) [future.get_result() for future in futures] finally: if first_batch_call: del self._nodes_to_put del self._indices_to_put del self._identifier_cache del self._keys_to_delete return results
def DeleteAsync(key): internal_key = _NamespaceKey(key, namespace=datastore_hooks.INTERNAL) external_key = _NamespaceKey(key, namespace=datastore_hooks.EXTERNAL) yield (ndb.delete_multi_async([ ndb.Key('CachedPickledString', internal_key), ndb.Key('CachedPickledString', external_key) ]), stored_object.DeleteAsync(key))
def _DeleteTestData(test_key): futures = [] num_tests_processed = 0 finished = True descendants = graph_data.Test.query(ancestor=test_key).fetch(keys_only=True) descendants.append(test_key) for descendant in descendants: rows = graph_data.Row.query(graph_data.Row.parent_test == descendant).fetch( keys_only=True, limit=_ROWS_TO_DELETE_AT_ONCE) if rows: futures.extend(ndb.delete_multi_async(rows)) finished = False num_tests_processed += 1 if num_tests_processed > _MAX_DELETIONS_PER_TASK: break # Only delete Test entities after all Row entities have been deleted. if finished: descendants = ndb.get_multi(descendants) for descendant in descendants: _SendNotificationEmail(descendant) futures.append(descendant.key.delete_async()) ndb.Future.wait_all(futures) return finished
def txn(): first_batch_call = not all([ hasattr(self, "_nodes_to_put"), hasattr(self, "_indices_to_put"), hasattr(self, "_identifier_cache"), hasattr(self, "_keys_to_delete") ]) if first_batch_call: self._nodes_to_put = dict() self._indices_to_put = dict() self._identifier_cache = dict() self._keys_to_delete = set() try: results = func() if first_batch_call and any([ self._nodes_to_put, self._indices_to_put, self._keys_to_delete ]): futures = ndb.delete_multi_async(self._keys_to_delete) ndb.put_multi( chain(self._nodes_to_put.itervalues(), self._indices_to_put.itervalues())) [future.get_result() for future in futures] finally: if first_batch_call: del self._nodes_to_put del self._indices_to_put del self._identifier_cache del self._keys_to_delete return results
def _DeleteTestData(test_key, notify): futures = [] num_tests_processed = 0 more = False descendants = list_tests.GetTestDescendants(test_key) for descendant in descendants: rows = graph_data.GetLatestRowsForTest(descendant, _ROWS_TO_DELETE_AT_ONCE, keys_only=True) if rows: futures.extend(ndb.delete_multi_async(rows)) more = True num_tests_processed += 1 if num_tests_processed > _MAX_DELETIONS_PER_TASK: break if not more: more = _DeleteTestHistogramData(descendant) # Only delete TestMetadata entities after all Row entities have been deleted. if not more: descendants = ndb.get_multi(descendants) for descendant in descendants: _SendNotificationEmail(descendant, notify) futures.append(descendant.key.delete_async()) ndb.Future.wait_all(futures) return not more
def _DeleteTestData(test_key): logging.info('DELETING TEST DATA FOR %s', utils.TestPath(test_key)) futures = [] num_tests_processed = 0 finished = True descendants = list_tests.GetTestDescendants(test_key) for descendant in descendants: rows = graph_data.GetLatestRowsForTest( descendant, _ROWS_TO_DELETE_AT_ONCE, keys_only=True) if rows: futures.extend(ndb.delete_multi_async(rows)) finished = False num_tests_processed += 1 if num_tests_processed > _MAX_DELETIONS_PER_TASK: break # Only delete TestMetadata entities after all Row entities have been deleted. if finished: descendants = ndb.get_multi(descendants) for descendant in descendants: _SendNotificationEmail(descendant) futures.append(descendant.key.delete_async()) ndb.Future.wait_all(futures) return finished
def delete_multi(q, bulk_size=100): while True: events = q.fetch(bulk_size, keys_only=True) if not events: break yield ndb.delete_multi_async(events)
def deleteFromDatastore(self, q_futures, woeid, timestamp): logging.info("Deleting from datastore") q_futures.extend( ndb.delete_multi_async( TrendWindow.query(TrendWindow.timestamp < timestamp, TrendWindow.woeid == woeid) .fetch(keys_only=True)))
def _DeleteHistogramClassData(cls): query = cls.query(cls.test == test_key) keys = yield query.fetch_async(limit=_MAX_DELETIONS_PER_TASK, use_cache=False, use_memcache=False, keys_only=True) yield ndb.delete_multi_async(keys) raise ndb.Return(bool(keys))
def delete(self): delete = [] # Collect for deletion delete += Subscription.query(Subscription.feed == self.key).fetch( keys_only=True) delete += Saved.query(ancestor=self.key).fetch(keys_only=True) for article in Article.query(ancestor=self.key).fetch(): asset = article.Asset() if asset: asset.delete_asset() delete.append(asset.key) ndb.delete_multi_async(delete) super(Feed, self).delete()
def _run(cls, key_id, model_name): try: Model = model.__dict__[model_name] except KeyError as e: raise deferred.PermanentTaskFailure(e) key_start = ndb.Key(Model, key_id) key_end = ndb.Key(Model, "{0}/\xff".format(key_id)) query = Model.query(ndb.AND(Model.key >= key_start, Model.key <= key_end)) cursor = None while True: results, cursor, more = yield query.fetch_page_async(page_size=100, keys_only=True, start_cursor=cursor) ndb.delete_multi_async(results) if not more: return
def DeleteAsync(key): internal_key = _NamespaceKey(key, namespace=datastore_hooks.INTERNAL) external_key = _NamespaceKey(key, namespace=datastore_hooks.EXTERNAL) yield ( ndb.delete_multi_async( [ndb.Key('CachedPickledString', internal_key), ndb.Key('CachedPickledString', external_key)]), stored_object.DeleteAsync(key))
def DeleteAsync(key): unnamespaced_future = stored_object.DeleteAsync(key) # See the comment in stored_object.DeleteAsync() about this get(). entities = yield ndb.get_multi_async([ CachedPickledString.NamespacedKey(key, datastore_hooks.INTERNAL), CachedPickledString.NamespacedKey(key, datastore_hooks.EXTERNAL), ]) keys = [entity.key for entity in entities if entity] yield (unnamespaced_future, ndb.delete_multi_async(keys))
def get(self): ''' clear the spoofed data ''' if os.environ['SERVER_SOFTWARE'].startswith('Development') == False: return self.say('You cant be here.') event_key = ndb.Key(models.Event,'G9b') e_fut = event_key.delete_async() c_keys = models.Contestant.query(ancestor = event_key).fetch(None,keys_only = True) c_futs = ndb.delete_multi_async(c_keys) ticket_sales = [] for key in c_keys: ticket_sales.extend(models.TicketSale.query(ancestor = key).iter(keys_only = True)) t_futs = ndb.delete_multi_async(ticket_sales) self.say(e_fut.get_result()) self.say([f.get_result() for f in c_futs]) self.say([f.get_result() for f in t_futs]) return self.redirect('/e/spoof')
def testPinocchioEmojisAgainstDictionary( deleteGlossarioMissingInPinocchio=False): import gloss from collections import defaultdict PC = getPinocchioChapters() dictionaryGlosses = set(gloss.getAllGlossEmojis()) pinocchioGlossesDict = defaultdict(list) for ch_num, chapter in enumerate(PC, 1): for line_num, row in enumerate(chapter, 1): charList = row[1] emojiWords = getEmojiWords(charList) for ew in emojiWords: emojiGloss = ''.join(ew) if emojiGloss not in dictionaryGlosses: ew = removeFunctionalTagsAndMakeSingular(ew) emojiGloss = ''.join(ew) pinocchioGlossesDict[emojiGloss].append('{}.{}'.format( ch_num, line_num)) pinocchioGlosses = set(pinocchioGlossesDict.keys()) missingInDictionary = set(pinocchioGlosses).difference(dictionaryGlosses) missingInDictionaryIndexes = [ '{}: {}'.format(e, ', '.join(pinocchioGlossesDict[e])) for e in missingInDictionary ] missingInPinocchio = set(dictionaryGlosses).difference(pinocchioGlosses) if deleteGlossarioMissingInPinocchio: from google.appengine.ext import ndb to_delete = [] for e in missingInPinocchio: g = gloss.getGlossWithEmoji(e) assert g to_delete.append(g.key) print 'deleting {} glosses from dictionary not found in pinocchio'.format( len(to_delete)) create_futures = ndb.delete_multi_async(to_delete) ndb.Future.wait_all(create_futures) pinocchioGlossesIndexes = [ '{}: {}'.format(e, ', '.join(iList)) for e, iList, in pinocchioGlossesDict.iteritems() ] missingInDictionaryFile = LATEX_DIR_CHECK + '/MissingInDictionary.txt' missingInPinocchioFile = LATEX_DIR_CHECK + '/MissingInPinocchio.txt' pinocchioFile = LATEX_DIR_CHECK + '/PinocchioGlosses.txt' dictionaryFile = LATEX_DIR_CHECK + '/DictionaryGlosses.txt' for file, emojiList in zip([ missingInDictionaryFile, missingInPinocchioFile, pinocchioFile, dictionaryFile ], [ missingInDictionaryIndexes, missingInPinocchio, pinocchioGlossesIndexes, dictionaryGlosses ]): with open(file, 'w') as f: for e in emojiList: f.write('{}\n'.format(e)) f.close()
def get(self): ''' clear the spoofed data ''' if os.environ['SERVER_SOFTWARE'].startswith('Development') == False: return self.say('You cant be here.') event_key = ndb.Key(models.Event, 'G9b') e_fut = event_key.delete_async() c_keys = models.Contestant.query(ancestor=event_key).fetch( None, keys_only=True) c_futs = ndb.delete_multi_async(c_keys) ticket_sales = [] for key in c_keys: ticket_sales.extend( models.TicketSale.query(ancestor=key).iter(keys_only=True)) t_futs = ndb.delete_multi_async(ticket_sales) self.say(e_fut.get_result()) self.say([f.get_result() for f in c_futs]) self.say([f.get_result() for f in t_futs]) return self.redirect('/e/spoof')
def deleteMense(): more, cursor = True, None while more: to_delete = [] keys, cursor, more = Mensa.query().fetch_page(1000, start_cursor=cursor, keys_only=True) for k in keys: to_delete.append(k) if to_delete: create_futures = ndb.delete_multi_async(to_delete) ndb.Future.wait_all(create_futures)
def deletePeople(): more, cursor = True, None to_delete = [] while more: keys, cursor, more = Person.query().fetch_page(1000, start_cursor=cursor, keys_only=True) for k in keys: if not k.id().startswith('T'): to_delete.append(k) if to_delete: print('Deleting {} entities'.format(len(to_delete))) create_futures = ndb.delete_multi_async(to_delete) ndb.Future.wait_all(create_futures)
def cleanup_item( self ): likelyhood = 10 # deletion occurs with a probability of 1% number = random.randint( 1, 1000 ) if number < likelyhood: ndb.delete_multi_async ( self.fetch_old_backoff_timers( 3 )) ndb.delete_multi_async ( self.fetch_old_auth_tokens( 3 )) ndb.delete_multi_async ( self.fetch_old_sessions( 30 ))
def resetAllEntities(test=False): for x in [RideOffer, Route]: #Person, Feramta more, cursor = True, None total = 0 while more: records, cursor, more = x.query().fetch_page(1000, keys_only=True, start_cursor=cursor) total += len(records) if records: create_futures = ndb.delete_multi_async(records) ndb.Future.wait_all(create_futures) print("Cleaned {} from {}".format(total, x.__name__))
def apply_import(revision, entities_to_put, keys_to_delete): """Transactionally puts and deletes a bunch of entities.""" # DB changed between transactions, retry. if auth.get_auth_db_revision() != revision: return False # Apply mutations, bump revision number. futures = [] futures.extend(ndb.put_multi_async(entities_to_put)) futures.extend(ndb.delete_multi_async(keys_to_delete)) ndb.Future.wait_all(futures) if any(f.get_exception() for f in futures): raise ndb.Rollback() auth.replicate_auth_db() return True
def remove(self, request): account = models.Account.get_by_id(request.id) if not account: raise endpoints.NotFoundException transactions_futures = ndb.delete_multi_async( [transaction.key for transaction in account.transactions]) account_future = account.key.delete_async() for future in transactions_futures: future.get_result() account_future.get_result() return messages.message_types.VoidMessage()
def do_deleteall(query): cursor = None more = True count = 0 futures = list() while more: events, cursor, more = query.fetch_page( 100, start_cursor=cursor, keys_only=True, batch_size=100) count += len(events) ndb.Future.wait_all(futures) futures = ndb.delete_multi_async( events, use_cache=False, use_memcache=False) ndb.Future.wait_all(futures) return count
def dispatch(self): self.__scope = None auth_token = self.request.cookies.get('auth_token') if users.is_current_user_admin(): self.__locked = False if self.request.get('logg_ut') == 'true': self.redirect(users.create_logout_url('/prioriter')) else: self.__scope = TravelGrantsApplication.query(TravelGrantsApplication.application_year == myapp.APPLICATION_YEAR) elif auth_token: self.__locked = myapp.time_locked(12, 2, 5) auth_token = SHA256.new(auth_token).hexdigest() if self.request.get('logg_ut') == 'true': ndb.delete_multi_async(Otp.query(ndb.OR(Otp.token==auth_token, Otp.valid_until<datetime.datetime.now())).fetch(options=ndb.QueryOptions(keys_only=True))) self.response.delete_cookie('auth_token') self.redirect('/prioriter') else: otp = get_otp_by_token(auth_token) if otp: self.__scope = TravelGrantsApplication.query(ndb.AND(TravelGrantsApplication.learning_association == otp.learning_association, TravelGrantsApplication.application_year == myapp.APPLICATION_YEAR)) otp.put() # Refresh expiration super(PrioritizeHandler, self).dispatch()
def _MigrateTestRows(old_parent_key, new_parent_key): """Copies Row entities from one parent to another, deleting old ones. Args: old_parent_key: TestMetadata entity key of the test to move from. new_parent_key: TestMetadata entity key of the test to move to. Returns: A tuple of (futures, more) futures: A list of Future objects for entities being put. more: Whether or not there's more work to do on the row data. """ # In this function we'll build up lists of entities to put and delete # before returning Future objects for the entities being put and deleted. rows_to_put = [] rows_to_delete = [] # Add some Row entities to the lists of entities to put and delete. rows = graph_data.GetLatestRowsForTest( old_parent_key, _MAX_DATASTORE_PUTS_PER_PUT_MULTI_CALL) rows_to_put = yield [ _GetOrCreate(graph_data.Row, r, r.key.id(), new_parent_key, _ROW_EXCLUDE) for r in rows ] rows_to_delete = [r.key for r in rows] # Clear the cached revision range selector data for both the old and new # tests because it will no longer be valid after migration. The cache should # be updated with accurate data the next time it's set, which will happen # when someone views the graph. futures = ndb.put_multi_async(rows_to_put, use_cache=False, use_memcache=False) if rows_to_put: futures.append(rows_to_put[0].UpdateParentAsync()) futures.extend(ndb.delete_multi_async(rows_to_delete)) futures.append( graph_revisions.DeleteCacheAsync(utils.TestPath(old_parent_key))) futures.append( graph_revisions.DeleteCacheAsync(utils.TestPath(new_parent_key))) yield futures raise ndb.Return(bool(rows_to_put))
def _MoveRowsForTest(test_key): """Moves rows for the given test.""" row_query = graph_data.Row.query(graph_data.Row.parent_test == test_key, graph_data.Row.revision > _MIN_TIMESTAMP) rows = row_query.fetch(limit=_NUM_ROWS) test_path = utils.TestPath(test_key) logging.info('Moving %d rows for test "%s".', len(rows), test_path) to_put = [] to_delete = [] for row in rows: new_row = _CopyRow(row, _ConvertTimestamp(row.revision)) to_put.append(new_row) to_delete.append(row.key) put_futures = ndb.put_multi_async(to_put) delete_futures = ndb.delete_multi_async(to_delete) return put_futures + delete_futures
def _MigrateTestRows(old_parent_key, new_parent_key): """Copies Row entities from one parent to another, deleting old ones. Args: old_parent_key: TestMetadata entity key of the test to move from. new_parent_key: TestMetadata entity key of the test to move to. Returns: A tuple of (futures, more) futures: A list of Future objects for entities being put. more: Whether or not there's more work to do on the row data. """ # In this function we'll build up lists of entities to put and delete # before returning Future objects for the entities being put and deleted. rows_to_put = [] rows_to_delete = [] # Add some Row entities to the lists of entities to put and delete. rows = graph_data.GetLatestRowsForTest( old_parent_key, _MAX_DATASTORE_PUTS_PER_PUT_MULTI_CALL) rows_to_put = yield [ _GetOrCreate( graph_data.Row, r, r.key.id(), new_parent_key, _ROW_EXCLUDE) for r in rows] rows_to_delete = [r.key for r in rows] # Clear the cached revision range selector data for both the old and new # tests because it will no longer be valid after migration. The cache should # be updated with accurate data the next time it's set, which will happen # when someone views the graph. futures = ndb.put_multi_async( rows_to_put, use_cache=False, use_memcache=False) if rows_to_put: futures.append(rows_to_put[0].UpdateParentAsync()) futures.extend(ndb.delete_multi_async(rows_to_delete)) futures.append( graph_revisions.DeleteCacheAsync(utils.TestPath(old_parent_key))) futures.append( graph_revisions.DeleteCacheAsync(utils.TestPath(new_parent_key))) yield futures raise ndb.Return(bool(rows_to_put))
def clean_up_bots(): """Cleans up expired leases.""" # Maximum number of in-flight ndb.Futures. MAX_IN_FLIGHT = 50 bot_ids = [] deleted = {} for machine_type in MachineType.query( MachineType.num_pending_deletion > 0): logging.info('Deleting bots: %s', ', '.join(sorted(machine_type.pending_deletion))) bot_ids.extend(machine_type.pending_deletion) deleted[machine_type.key] = machine_type.pending_deletion # Generate a few asynchronous requests at a time in order to # prevent having too many in-flight ndb.Futures at a time. futures = [] while bot_ids: num_futures = len(futures) if num_futures < MAX_IN_FLIGHT: keys = [ bot_management.get_info_key(bot_id) for bot_id in bot_ids[:MAX_IN_FLIGHT - num_futures] ] bot_ids = bot_ids[MAX_IN_FLIGHT - num_futures:] futures.extend(ndb.delete_multi_async(keys)) ndb.Future.wait_any(futures) futures = [future for future in futures if not future.done()] if futures: ndb.Future.wait_all(futures) # There should be relatively few MachineType entitites, so # just process them sequentially. # TODO(smut): Parallelize this. for machine_key, hostnames in deleted.iteritems(): successfully_deleted = [] for hostname in hostnames: if bot_management.get_info_key(hostname).get(): logging.error('Failed to delete BotInfo: %s', hostname) else: successfully_deleted.append(hostname) logging.info('Deleted bots: %s', ', '.join(sorted(successfully_deleted))) _clear_bots_pending_deletion(machine_key, hostnames)
def _MoveRowsForTest(test_key): """Moves rows for the given test.""" row_query = graph_data.Row.query( graph_data.Row.parent_test == test_key, graph_data.Row.revision > _MIN_TIMESTAMP) rows = row_query.fetch(limit=_NUM_ROWS) test_path = utils.TestPath(test_key) logging.info('Moving %d rows for test "%s".', len(rows), test_path) to_put = [] to_delete = [] for row in rows: new_row = _CopyRow(row, _ConvertTimestamp(row.revision)) to_put.append(new_row) to_delete.append(row.key) put_futures = ndb.put_multi_async(to_put) delete_futures = ndb.delete_multi_async(to_delete) return put_futures + delete_futures
def cleanup_after_bot(bot_root_key): """Removes all BotDimensions and BotTaskDimensions for this bot. Arguments: bot_root_key: ndb.Key to bot_management.BotRoot Do not clean up TaskDimensions. There could be pending tasks and there's a possibility that a bot with the same ID could come up afterward (low chance in practice but it's a possibility). In this case, if TaskDimensions is deleted, the pending task would not be correctly run even when a bot comes back online as assert_bot_async() would fail to create the corresponding BotTaskDimensions. """ q = BotTaskDimensions.query(ancestor=bot_root_key).iter(keys_only=True) futures = ndb.delete_multi_async(q) futures.append(ndb.Key(BotDimensions, 1, parent=bot_root_key).delete_async()) _flush_futures(futures)
def delete_async(name, num_shards): """ Asynchronously delete the named counter and all of its shards. Args: name: The name of the counter. num_shards: the number of shards in the counter Return: A list of Futures for each entity making up the counter. """ all_keys = _all_keys(name, num_shards) delete_futs = ndb.delete_multi_async( all_keys ) memcache.delete( name ) return delete_futs