Exemplo n.º 1
0
def cron_delete_old_bot_events():
    """Deletes very old BotEvent entites."""
    count = 0
    start = utils.utcnow()
    try:
        # Run for 4.5 minutes and schedule the cron job every 5 minutes. Running for
        # 9.5 minutes (out of 10 allowed for a cron job) results in 'Exceeded soft
        # private memory limit of 512 MB with 512 MB' even if this loop should be
        # fairly light on memory usage.
        time_to_stop = start + datetime.timedelta(seconds=int(4.5 * 60))

        # Order is by key, so it is naturally ordered by bot, which means the
        # operations will mainly operate on one root entity at a time.
        q = BotEvent.query(default_options=ndb.QueryOptions(
            keys_only=True)).filter(
                BotEvent.ts <= start - _OLD_BOT_EVENTS_CUT_OFF)
        more = True
        cursor = None
        while more:
            keys, cursor, more = q.fetch_page(10, start_cursor=cursor)
            ndb.delete_multi(keys)
            count += len(keys)
            if utils.utcnow() >= time_to_stop:
                break
        return count
    except runtime.DeadlineExceededError:
        pass
    finally:
        logging.info('Deleted %d entities', count)
Exemplo n.º 2
0
    def get_high_scores(self, request):
        """Return ordered high scores based on 1.more won 2.less guesses number"""
        qo = ndb.QueryOptions(limit=request.limitation)
        scoreQuery = Score.query().order(-Score.won, Score.guesses)
        scoreList = scoreQuery.fetch(10, options=qo)

        return ScoreForms(items=[score.to_form() for score in scoreList])
Exemplo n.º 3
0
def ajax_reports():
    offset = request.args.get('start', type=int)
    limit = request.args.get('length', type=int)
    search_term = request.args.get('search[value]')
    order_index = request.args.get('order[0][column]', type=int)
    order_dir = request.args.get('order[0][dir]')

    fields = [
        EmailReport.date_reported, EmailReport.reported_by,
        EmailReport.report_type, EmailReport.subject, EmailReport.status
    ]

    query = EmailReport.domain_query(g.domain)
    total_count = query.count()

    # Setup the correct ordering
    order_field = EmailReport.key
    if order_index > 0:
        order_field = fields[order_index - 1]
    if order_dir == 'desc':
        order_field = -order_field

    query_options = ndb.QueryOptions(limit=limit, offset=offset)

    results = query.order(order_field).fetch(options=query_options,
                                             projection=fields)
    results_json = [result.to_dict() for result in results]
    return jsonify({
        'draw': int(request.args.get('draw')),
        'recordsTotal': total_count,
        'recordsFiltered': total_count,
        'data': results_json
    })
Exemplo n.º 4
0
def removePlayers():
    playerKeys = Player.query(ancestor=ndb.Key('GameStatus', 'Bball'),
                              default_options=ndb.QueryOptions(keys_only=True))
    if playerKeys.count():
        ndb.delete_multi(playerKeys)
    setGameStatus(False)
    return True
Exemplo n.º 5
0
    def list(self, request):
        """Provides list of known bots.

    Deleted bots will not be listed.
    """
        logging.debug('%s', request)
        now = utils.utcnow()
        # Disable the in-process local cache. This is important, as there can be up
        # to a thousand entities loaded in memory, and this is a pure memory leak,
        # as there's no chance this specific instance will need these again,
        # therefore this leads to 'Exceeded soft memory limit' AppEngine errors.
        q = bot_management.BotInfo.query(default_options=ndb.QueryOptions(
            use_cache=False))
        try:
            q = bot_management.filter_dimensions(q, request.dimensions)
            q = bot_management.filter_availability(
                q, swarming_rpcs.to_bool(request.quarantined),
                swarming_rpcs.to_bool(request.in_maintenance),
                swarming_rpcs.to_bool(request.is_dead),
                swarming_rpcs.to_bool(request.is_busy))
        except ValueError as e:
            raise endpoints.BadRequestException(str(e))

        bots, cursor = datastore_utils.fetch_page(q, request.limit,
                                                  request.cursor)
        return swarming_rpcs.BotList(
            cursor=cursor,
            death_timeout=config.settings().bot_death_timeout_secs,
            items=[message_conversion.bot_info_to_rpc(bot) for bot in bots],
            now=now)
Exemplo n.º 6
0
    def make_directed_ndb_query(self, kind_class, keys_only=False):
        """Construct an NDB query for this key range, including the scan direction.

    Args:
      kind_class: An ndb.Model subclass.
      keys_only: bool, default False, use keys_only on Query?

    Returns:
      An ndb.Query instance.

    Raises:
      KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
    """
        assert issubclass(kind_class, ndb.Model)
        if keys_only:
            default_options = ndb.QueryOptions(keys_only=True)
        else:
            default_options = None
        query = kind_class.query(app=self._app,
                                 namespace=self.namespace,
                                 default_options=default_options)
        query = self.filter_ndb_query(query)
        if self.__get_direction(True, False):
            query = query.order(kind_class._key)
        else:
            query = query.order(-kind_class._key)
        return query
Exemplo n.º 7
0
def cron_delete_old_bot():
    """Deletes stale BotRoot entity groups."""
    start = utils.utcnow()
    # Run for 4.5 minutes and schedule the cron job every 5 minutes. Running for
    # 9.5 minutes (out of 10 allowed for a cron job) results in 'Exceeded soft
    # private memory limit of 512 MB with 512 MB' even if this loop should be
    # fairly light on memory usage.
    time_to_stop = start + datetime.timedelta(seconds=int(4.5 * 60))
    total = 0
    deleted = []
    try:
        q = BotRoot.query(default_options=ndb.QueryOptions(keys_only=True))
        for bot_root_key in q:
            # Check if it has any BotEvent left. If not, it means that the entity is
            # older than _OLD_BOT_EVENTS_CUF_OFF, so the whole thing can be deleted
            # now.
            # In particular, ignore the fact that BotInfo may still exist, since if
            # there's no BotEvent left, it's probably a broken entity or a forgotten
            # dead bot.
            if BotEvent.query(ancestor=bot_root_key).count(limit=1):
                continue
            deleted.append(bot_root_key.string_id())
            # Delete the whole group. An ancestor query will retrieve the entity
            # itself too, so no need to explicitly delete it.
            keys = ndb.Query(ancestor=bot_root_key).fetch(keys_only=True)
            ndb.delete_multi(keys)
            total += len(keys)
            if utils.utcnow() >= time_to_stop:
                break
        return total
    except runtime.DeadlineExceededError:
        pass
    finally:
        logging.info('Deleted %d entities from the following bots:\n%s', total,
                     ', '.join(sorted(deleted)))
Exemplo n.º 8
0
def get_game_props(weekday=None):
    '''Return a GameProperties object for the current day of the week'''
    q = GameProperties.query(default_options=ndb.QueryOptions(keys_only=True))

    if weekday is None:
        weekday = today().weekday()

    dayKeys = q.filter(GameProperties.weekDay == weekday)
    if dayKeys.count():
        for dayProp in dayKeys:
            try:
                day = dayProp.get()
            except BadValueError as e:
                dayProp.delete()
                continue

            return day

    logging.info('no properties for today, initializing to default')

    day = GameProperties(
        weekDay=weekday,
        minNumPlayers=8,
        maxNumPlayers=12,
        provisionalNumPlayers=12,
        # openRosterHoursPrior = DEFAULT_ROSTER_OPEN_HOURS,
        # rosterStartTime = DEFAULT_ROSTER_START_TIME,
        # rosterOpenTime = DEFAULT_ROSTER_OPEN_TIME,
        # isGameDay = weekday in DEFAULT_GAME_DAYS
    )

    day.put()
    return day
Exemplo n.º 9
0
def yield_expired_task_to_run():
    """Yields the expired TaskToRun still marked as available."""
    # The query fetches tasks that reached expiration time recently
    # to avoid fetching all past tasks. It uses a large batch size
    # since the entities are very small and to reduce RPC overhead.
    opts = ndb.QueryOptions(batch_size=256)
    now = utils.utcnow()
    # The backsearch here is just to ensure that we find entities that we forgot
    # about before because the cron job couldn't keep up. In practice
    # expiration_ts should not be more than 1 minute old (as the cron job runs
    # every minutes) but keep it high in case there's an outage.
    cut_off = now - datetime.timedelta(hours=24)
    q = TaskToRun.query(TaskToRun.expiration_ts < now,
                        TaskToRun.expiration_ts > cut_off,
                        default_options=opts)
    total = 0
    skipped = 0
    try:
        for task in q:
            if not task.queue_number:
                skipped += 1
                logging.info(
                    '%s/%s: queue_number is None, but expiration_ts is %s.',
                    task.task_id, task.task_slice_index, task.expiration_ts)
                # Flush it, otherwise we'll keep on looping on it.
                task.expiration_ts = None
                task.put()
            else:
                yield task
                total += 1
    finally:
        logging.debug('Yielded %d tasks; skipped %d', total, skipped)
Exemplo n.º 10
0
def yield_expired_task_to_run():
    """Yields the expired TaskToRun still marked as available."""

    # The query fetches tasks that reached expiration time recently
    # to avoid fetching all past tasks. It uses a large batch size
    # since the entities are very small and to reduce RPC overhead.
    def expire(q):
        total = 0
        try:
            for task in q:
                yield task
                total += 1
        finally:
            logging.debug('Yielded %d tasks', total)

    opts = ndb.QueryOptions(batch_size=256)
    now = utils.utcnow()
    # The backsearch here is just to ensure that we find entities that we forgot
    # about before because the cron job couldn't keep up. In practice
    # expiration_ts should not be more than 1 minute old (as the cron job runs
    # every minutes) but keep it high in case there's an outage.
    cut_off = now - datetime.timedelta(hours=24)
    q = TaskToRun.query(TaskToRun.expiration_ts < now,
                        TaskToRun.expiration_ts > cut_off,
                        default_options=opts)
    for task in expire(q):
        yield task
Exemplo n.º 11
0
    def post(self):
        # Got client ID from environ
        user_id = request.environ['USER_ID']
        client_id = user_id.get().detail_id

        print request.json['id']
        screen = Screen_Layout.get_by_id(request.json['id'])
        screen.name = request.json['name']
        prev_client_id = screen.client_id
        print client_id
        if prev_client_id != client_id:  # Later this is to be changed with token.
            return jsonify({"code": 400, "message": "Not authorized."})
        screen.location = request.json['location']

        prev_rows = screen.max_rows
        prev_cols = screen.max_columns

        if prev_rows != int(request.json['max_rows']) or prev_cols != int(
                request.json['max_columns']):
            screen.max_rows = int(request.json['max_rows'])
            screen.max_columns = int(request.json['max_columns'])
            # Deleting the categories of a seat after changing the screen structure.
            options = ndb.QueryOptions(keys_only=True)
            prev_categories = Category.query().filter(
                Category.screen_id == ndb.Key('Screen_Layout',
                                              request.json['id'])).fetch(
                                                  options=options)
            ndb.delete_multi(prev_categories)
            # We should add the new seat list for new seat grid and new categories for the updated Layout..
            seats = []
            categories = request.json['categories']
            print categories
            try:
                for each in categories:
                    category = Category()
                    category.screen_id = ndb.Key('Screen',
                                                 int(request.json['id']))
                    category.name = each['name']
                    category.seats = each['seats']
                    map(lambda seat: seats.append(seat), each['seats'])
                    category.put(
                    )  # Create categories for seat for a particular screen.
                # Adding seats for the screen fetched from categories
                screen.seats = seats
                res = screen.put()
                return jsonify({
                    "code":
                    200,
                    "id":
                    res.id(),
                    "message":
                    "Success changed layout and other informations."
                })
            except:
                return jsonify({"code": 500, "message": "server error"})
        return jsonify({
            "code": 200,
            "message": "Success changed some minor informations."
        })
Exemplo n.º 12
0
	def nuke(self, Entity):
		Entity_qry = Entity.query()
		ent_count = 1
		while ent_count > 0:
			qo = ndb.QueryOptions(keys_only=True)
			ent_count = Entity_qry.count(limit=1000, options=qo)
			if (ent_count):
				ndb.delete_multi(Entity_qry.fetch(ent_count, options=qo))
Exemplo n.º 13
0
 def get(self):
     old_cutoff = utils.utcnow() - on_error.ERROR_TIME_TO_LIVE
     items = models.Error.query(
         models.Error.created_ts < old_cutoff,
         default_options=ndb.QueryOptions(keys_only=True))
     out = len(ndb.delete_multi(items))
     self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'
     self.response.write(str(out))
Exemplo n.º 14
0
def cron_delete_old_bot_events():
    """Deletes very old BotEvent entities."""
    start = utils.utcnow()
    # Run for 4.5 minutes and schedule the cron job every 5 minutes. Running for
    # 9.5 minutes (out of 10 allowed for a cron job) results in 'Exceeded soft
    # private memory limit of 512 MB with 512 MB' even if this loop should be
    # fairly light on memory usage.
    time_to_stop = start + datetime.timedelta(seconds=int(4.5 * 60))
    end_ts = start - _OLD_BOT_EVENTS_CUT_OFF
    more = True
    cursor = None
    count = 0
    first_ts = None
    try:
        # Order is by key, so it is naturally ordered by bot, which means the
        # operations will mainly operate on one root entity at a time.
        q = BotEvent.query(default_options=ndb.QueryOptions(
            keys_only=True)).filter(BotEvent.ts <= end_ts)
        while more:
            keys, cursor, more = q.fetch_page(10, start_cursor=cursor)
            if not keys:
                break
            if not first_ts:
                # Fetch the very first entity to get an idea of the range being
                # processed.
                while keys:
                    # It's possible that the query returns ndb.Key for entities that do
                    # not exist anymore due to an inconsistent index. Handle this
                    # explicitly.
                    e = keys[0].get()
                    if not e:
                        keys = keys[1:]
                        continue
                    first_ts = e.ts
                    break
            ndb.delete_multi(keys)
            count += len(keys)
            if utils.utcnow() >= time_to_stop:
                break
        return count
    except runtime.DeadlineExceededError:
        pass
    finally:

        def _format_ts(t):
            # datetime.datetime
            return t.strftime(u'%Y-%m-%d %H:%M') if t else 'N/A'

        def _format_delta(e, s):
            # datetime.timedelta
            return str(e - s).rsplit('.', 1)[0] if e and s else 'N/A'

        logging.info(
            'Deleted %d BotEvent entities; from %s\n'
            'Cut off was %s; trailing by %s', count, _format_ts(first_ts),
            _format_ts(end_ts), _format_delta(end_ts, first_ts))
Exemplo n.º 15
0
def get_relays_for_recipient(user_id, offset, archived=False):
    qo = ndb.QueryOptions(limit=10, offset=offset)
    archive_clause = SentRelay.archived if archived else SentRelay.not_archived
    sent_relays_iter = SentRelay.query().filter(
        archive_clause == user_id, ).order(-SentRelay.timestamp).iter(
            options=qo)
    sent_relays = [item for item in sent_relays_iter]
    logging.info('get_relay_for_recipient(%s) -> %s' %
                 (user_id, str(sent_relays)))
    return sent_relays
Exemplo n.º 16
0
 def paginate(cls, *args, **kwargs):
     query = cls.build_query(*args, **kwargs)
     op = {}
     if 'cursor' in kwargs:
         op['start_cursor'] = ndb.Cursor(urlsafe=kwargs['cursor'])
     query_option = ndb.QueryOptions(**op)
     page_size = kwargs['per_page'] if 'per_page' in kwargs else 10
     results, cursor, more = query.fetch_page(page_size,
                                              options=query_option)
     return PaginateModel(results, cursor, query_option.start_cursor, more)
Exemplo n.º 17
0
def get_events_query(bot_id, order):
    """Returns an ndb.Query for most recent events in reverse chronological order.
  """
    # Disable the in-process local cache. This is important, as there can be up to
    # a thousand entities loaded in memory, and this is a pure memory leak, as
    # there's no chance this specific instance will need these again, therefore
    # this leads to 'Exceeded soft memory limit' AppEngine errors.
    q = BotEvent.query(default_options=ndb.QueryOptions(use_cache=False),
                       ancestor=get_root_key(bot_id))
    if order:
        q = q.order(BotEvent.key)
    return q
Exemplo n.º 18
0
def _get_task_to_run_query(dimensions_hash):
    """Returns a ndb.Query of TaskToRun within this dimensions_hash queue."""
    # dimensions_hash should be 32 bits but on AppEngine, which is using 32 bits
    # python, it is silently upgraded to long.
    assert isinstance(dimensions_hash, (int, long)), repr(dimensions_hash)
    opts = ndb.QueryOptions(deadline=15)
    # See _gen_queue_number() as of why << 31. This query cannot use the key
    # because it is not a root entity.
    return TaskToRun.query(default_options=opts).order(
        TaskToRun.queue_number).filter(
            TaskToRun.queue_number >= (dimensions_hash << 31),
            TaskToRun.queue_number < ((dimensions_hash + 1) << 31))
Exemplo n.º 19
0
def _getNameForEmail(email):
    q = PlayerStatus.query(ancestor=ndb.Key('GameStatus', 'Bball'),
                           default_options=ndb.QueryOptions(keys_only=True))
    playerKeys = q.filter(PlayerStatus.email == email.lower())
    for player in playerKeys:
        try:
            player = player.get()
        except:
            continue
        else:
            if player and player.name:
                return player.name
    return None
Exemplo n.º 20
0
def yield_expired_task_to_run():
    """Yields all the expired TaskToRun still marked as available."""
    # The reason it is done this way as an iteration over all the pending entities
    # instead of using a composite index with 'queue_number' and 'expiration_ts'
    # is that TaskToRun entities are very hot and it is important to not require
    # composite indexes on it. It is expected that the number of pending task is
    # 'relatively low', in the orders of 100,000 entities.
    #
    # Use a large batch size since the entities are very small and to reduce RPC
    # overhead.
    opts = ndb.QueryOptions(batch_size=256)
    now = utils.utcnow()
    for task in TaskToRun.query(TaskToRun.queue_number > 0,
                                default_options=opts):
        if task.expiration_ts < now:
            yield task
Exemplo n.º 21
0
def removePlayer(person):
    q = Player.query(ancestor=ndb.Key('GameStatus', 'Bball'),
                     default_options=ndb.QueryOptions(keys_only=True))
    # Sanitize
    name, email = emailParser(person)

    if (email == None):
        return False, None

    if not name:
        name = email

    playerKeys = q.filter(Player.email == email)
    if playerKeys.count():
        ndb.delete_multi(playerKeys)
        return True, name
    return False, name
Exemplo n.º 22
0
def metrics_api():
    keys_only = ndb.QueryOptions(keys_only=True)

    meeting_subscriptions = MeetingSubscription.query().fetch()
    metrics = []
    for subscription in meeting_subscriptions:
        data = {
            'title':
            subscription.title,
            'subscribed':
            UserSubscriptionPreferences.query(
                UserSubscriptionPreferences.subscription ==
                subscription.key).count(options=keys_only),
            'meetings':
            Meeting.query().count(options=keys_only),
        }
        metrics.append(data)
    return json.dumps(metrics)
Exemplo n.º 23
0
def removeSubscriber(email):
    try:
        q = Subscriber.query(ancestor=ndb.Key('Subscribers', 'Bball'),
                             default_options=ndb.QueryOptions(keys_only=True))
    except:
        return False

    # Sanitize
    name, email = emailParser(email)

    if (email == None):
        return False

    playerKeys = q.filter(Subscriber.email == email)
    if playerKeys.count():
        ndb.delete_multi(playerKeys)
        return True

    return False
Exemplo n.º 24
0
def _loadPlayerList(isAlist=None, onlySendEmail=False):
    '''isAlist=None means return all players'''
    q = PlayerStatus.query(ancestor=ndb.Key('GameStatus', 'Bball'),
                           default_options=ndb.QueryOptions(keys_only=True))
    if isAlist is None:
        playerKeys = q
    else:
        playerKeys = q.filter(PlayerStatus.isAlist == isAlist)
    emails = []
    for player in playerKeys:
        try:
            player = player.get()
        except:
            continue
        else:
            if player is not None and (not onlySendEmail or player.sendEmail):
                emails.append(
                    usernameEmailType(player.name, player.email.lower()))
    return emails
Exemplo n.º 25
0
def getPriorityScore(email):

    if (email == None):
        return 0

    q = PlayerStatus.query(ancestor=ndb.Key('GameStatus', 'Bball'),
                           default_options=ndb.QueryOptions(keys_only=True))

    playerKeys = q.filter(PlayerStatus.email == email)

    if not playerKeys.count():
        return 0
    else:
        for player in playerKeys:
            try:
                player = player.get()
            except:
                continue
            return player.priorityScore
Exemplo n.º 26
0
def delete_old(entity):
    key_to_delete = None
    if entity.key.parent():
        # It is a TaskRequestShard, it is very old.
        key_to_delete = entity.key.parent()
    elif not task_pack.request_key_to_result_summary_key(entity.key).get(
            use_cache=False, use_memcache=False):
        # There's a TaskRequest without TaskResultSummary, delete it.
        key_to_delete = entity.key

    if key_to_delete:
        logging.info('Deleting %s: %s', entity.task_id, key_to_delete)
        total = 1
        qo = ndb.QueryOptions(keys_only=True)
        for k in ndb.Query(default_options=qo, ancestor=key_to_delete):
            yield operation.db.Delete(k)
            total += 1
        yield operation.db.Delete(key_to_delete)
        logging.info('Deleted %d entities', total)
Exemplo n.º 27
0
    def make_ascending_ndb_query(self, kind_class, keys_only=False):
        """Construct an NDB query for this key range, without the scan direction.

    Args:
      kind_class: An ndb.Model subclass.
      keys_only: bool, default False, query only for keys.

    Returns:
      An ndb.Query instance.
    """
        assert issubclass(kind_class, ndb.Model)
        if keys_only:
            default_options = ndb.QueryOptions(keys_only=True)
        else:
            default_options = None
        query = kind_class.query(app=self._app,
                                 namespace=self.namespace,
                                 default_options=default_options)
        query = self.filter_ndb_query(query)
        query = query.order(kind_class._key)
        return query
Exemplo n.º 28
0
    def get(self):
        qo = ndb.QueryOptions(keys_only=True)
        min_max_params = {'price': 'price', 'bed': 'bedrooms', 'bath': 'bathrooms'}
        query_sets = []
        #Should have just used redis :p
        for p in min_max_params:
            q = Listing.query()
            add_to_sets = False
            try:#Will only fail if self.request.get("max_" + p) is not an int
                max_param = int(self.request.get("max_" + p))
                q = q.filter(getattr(Listing, min_max_params[p]) <= max_param)
                add_to_sets = True
            except:
                pass
            try:#Will only fail if self.request.get("max_" + p) is not an int
                min_param = int(self.request.get("min_" + p))
                q = q.filter(getattr(Listing, min_max_params[p]) >= min_param)
                add_to_sets = True
            except:
                pass
            if add_to_sets:
                query_sets = query_sets + [set(q.fetch(keys_only=True))]

        if len(query_sets):
            valid_buildings = ndb.get_multi(list(set.intersection(*query_sets)))
        else:
            valid_buildings = Listing.query().fetch()
        
        all_features = []
        for b in valid_buildings:
            all_features = all_features + [Feature(geometry=Point((b.lat, b.lng)), 
                properties={"id": b.listing_id,
                "price": b.price,
                "street": b.street,
                "bedrooms": b.bedrooms,
                "bathrooms": b.bathrooms,
                "sq_ft": b.sq_ft
                })]
        self.write((FeatureCollection(all_features)))
Exemplo n.º 29
0
def getOrCreatePlayerStatus(name, email):
    '''Get a player with email matching "email". If none is found,
        create a new entry.'''
    q = PlayerStatus.query(ancestor=ndb.Key('GameStatus', 'Bball'),
                           default_options=ndb.QueryOptions(keys_only=True))

    playerKeys = q.filter(PlayerStatus.email == email)

    if not playerKeys.count():
        player = PlayerStatus(parent=ndb.Key('GameStatus', 'Bball'),
                              name=name,
                              email=email,
                              numSignups=0,
                              numEarlySignups=0,
                              lastSignup=today(),
                              gamesPlayed=0,
                              gamesPlayedM=0,
                              gamesPlayedW=0,
                              gamesPlayedF=0,
                              lastGame=None,
                              gamesCut=0,
                              lastCut=None,
                              averageSignupTime=0.0,
                              priorityScore=0,
                              isAlist=False,
                              sendEmail=True)
        player.put()
        return player
    else:
        for playerKey in playerKeys:
            try:
                player = playerKey.get()
            except BadValueError:
                continue

            return player
Exemplo n.º 30
0
def yield_next_available_task_to_dispatch(bot_dimensions, deadline):
    """Yields next available (TaskRequest, TaskToRun) in decreasing order of
  priority.

  Once the caller determines the task is suitable to execute, it must use
  reap_task_to_run(task.key) to mark that it is not to be scheduled anymore.

  Performance is the top most priority here.

  Arguments:
  - bot_dimensions: dimensions (as a dict) defined by the bot that can be
      matched.
  - deadline: UTC timestamp (as an int) that the bot must be able to
      complete the task by. None if there is no such deadline.
  """
    # List of all the valid dimensions hashed.
    accepted_dimensions_hash = frozenset(
        _hash_dimensions(utils.encode_to_json(i))
        for i in _powerset(bot_dimensions))
    now = utils.utcnow()
    broken = 0
    cache_lookup = 0
    expired = 0
    hash_mismatch = 0
    ignored = 0
    no_queue = 0
    real_mismatch = 0
    too_long = 0
    total = 0
    # Be very aggressive in fetching the largest amount of items as possible. Note
    # that we use the default ndb.EVENTUAL_CONSISTENCY so stale items may be
    # returned. It's handled specifically.
    # - 100/200 gives 2s~40s of query time for 1275 items.
    # - 250/500 gives 2s~50s of query time for 1275 items.
    # - 50/500 gives 3s~20s of query time for 1275 items. (Slower but less
    #   variance). Spikes in 20s~40s are rarer.
    # The problem here are:
    # - Outliers, some shards are simply slower at executing the query.
    # - Median time, which we should optimize.
    # - Abusing batching will slow down this query.
    #
    # TODO(maruel): Measure query performance with stats_framework!!
    # TODO(maruel): Use fetch_page_async() + ndb.get_multi_async() +
    # memcache.get_multi_async() to do pipelined processing. Should greatly reduce
    # the effect of latency on the total duration of this function. I also suspect
    # using ndb.get_multi() will return fresher objects than what is returned by
    # the query.
    opts = ndb.QueryOptions(batch_size=50, prefetch_size=500, keys_only=True)
    try:
        # Interestingly, the filter on .queue_number>0 is required otherwise all the
        # None items are returned first.
        q = TaskToRun.query(default_options=opts).order(
            TaskToRun.queue_number).filter(TaskToRun.queue_number > 0)
        for task_key in q:
            duration = (utils.utcnow() - now).total_seconds()
            if duration > 40.:
                # Stop searching after too long, since the odds of the request blowing
                # up right after succeeding in reaping a task is not worth the dangling
                # task request that will stay in limbo until the cron job reaps it and
                # retry it. The current handlers are given 60s to complete. By using
                # 40s, it gives 20s to complete the reaping and complete the HTTP
                # request.
                return

            total += 1
            # Verify TaskToRun is what is expected. Play defensive here.
            try:
                validate_to_run_key(task_key)
            except ValueError as e:
                logging.error(str(e))
                broken += 1
                continue

            # integer_id() == dimensions_hash.
            if task_key.integer_id() not in accepted_dimensions_hash:
                hash_mismatch += 1
                continue

            # Do this after the basic weeding out but before fetching TaskRequest.
            if _lookup_cache_is_taken(task_key):
                cache_lookup += 1
                continue

            # Ok, it's now worth taking a real look at the entity.
            task = task_key.get(use_cache=False)

            # DB operations are slow, double check memcache again.
            if _lookup_cache_is_taken(task_key):
                cache_lookup += 1
                continue

            # It is possible for the index to be inconsistent since it is not executed
            # in a transaction, no problem.
            if not task.queue_number:
                no_queue += 1
                continue

            # It expired. A cron job will cancel it eventually. Since 'now' is saved
            # before the query, an expired task may still be reaped even if
            # technically expired if the query is very slow. This is on purpose so
            # slow queries do not cause exagerate expirations.
            if task.expiration_ts < now:
                expired += 1
                continue

            # The hash may have conflicts. Ensure the dimensions actually match by
            # verifying the TaskRequest. There's a probability of 2**-31 of conflicts,
            # which is low enough for our purpose. The reason use_cache=False is
            # otherwise it'll create a buffer bloat.
            request = task.request_key.get(use_cache=False)
            if not match_dimensions(request.properties.dimensions,
                                    bot_dimensions):
                real_mismatch += 1
                continue

            # If the bot has a deadline, don't allow it to reap the task unless it can
            # be completed before the deadline. We have to assume the task takes the
            # theoretical maximum amount of time possible, which is governed by
            # execution_timeout_secs. An isolated task's download phase is not subject
            # to this limit, so we need to add io_timeout_secs. When a task is
            # signalled that it's about to be killed, it receives a grace period as
            # well. grace_period_secs is given by run_isolated to the task execution
            # process, by task_runner to run_isolated, and by bot_main to the
            # task_runner. Lastly, add a few seconds to account for any overhead.
            if deadline is not None:
                if not request.properties.execution_timeout_secs:
                    # Task never times out, so it cannot be accepted.
                    too_long += 1
                    continue
                max_task_time = (utils.time_time() +
                                 request.properties.execution_timeout_secs +
                                 (request.properties.io_timeout_secs or 600) +
                                 3 *
                                 (request.properties.grace_period_secs or 30) +
                                 10)
                if deadline <= max_task_time:
                    too_long += 1
                    continue

            # It's a valid task! Note that in the meantime, another bot may have
            # reaped it.
            yield request, task
            ignored += 1
    finally:
        duration = (utils.utcnow() - now).total_seconds()
        logging.info(
            '%d/%s in %5.2fs: %d total, %d exp %d no_queue, %d hash mismatch, '
            '%d cache negative, %d dimensions mismatch, %d ignored, %d broken, '
            '%d not executable by deadline (UTC %s)', opts.batch_size,
            opts.prefetch_size, duration, total, expired, no_queue,
            hash_mismatch, cache_lookup, real_mismatch, ignored, broken,
            too_long, deadline)