Exemple #1
0
 def post(self):
     self.response.headers['Content-Type'] = 'text/plain; charset=utf-8'
     if not task_queues.rebuild_task_cache(self.request.body):
         # The task needs to be retried.
         self.response.set_status(500)
     else:
         self.response.out.write('Success.')
Exemple #2
0
 def post(self):
     if not task_queues.rebuild_task_cache(self.request.body):
         # The task likely failed due to DB transaction contention,
         # so we can reply that the service has had too many requests (429).
         # Using a 400-level response also prevents failures here from causing
         # unactionable alerts due to a high rate of 500s.
         self.response.set_status(429, 'Need to retry')
 def _enqueue_task(self, url, queue_name, **kwargs):
   if queue_name == 'rebuild-task-cache':
     # Call directly into it.
     self.assertEqual(True, task_queues.rebuild_task_cache(kwargs['payload']))
     return True
   if queue_name in ('cancel-children-tasks', 'pubsub'):
     return True
   self.fail(url)
   return False
  def test_rebuild_task_cache(self):
    # Assert that expiration works.
    now = datetime.datetime(2010, 1, 2, 3, 4, 5)
    self.mock_now(now)

    # We want _yield_BotTaskDimensions_keys() to return multiple
    # BotTaskDimensions ndb.Key to confirm that the inner loops work. This
    # requires a few bots.
    _assert_bot(bot_id=u'bot1')
    _assert_bot(bot_id=u'bot2')
    _assert_bot(bot_id=u'bot3')
    bot_root_key = bot_management.get_root_key(u'bot1')
    self.assertEqual(0, task_queues.BotTaskDimensions.query().count())
    self.assertEqual(0, task_queues.TaskDimensions.query().count())

    # Intentionally force the code to throttle the number of concurrent RPCs,
    # otherwise the inner loops wouldn't be reached with less than 50 bots, and
    # testing with 50 bots, would make the unit test slow.
    self.mock(task_queues, '_CAP_FUTURES_LIMIT', 1)

    payloads = []
    def _enqueue_task(url, name, payload):
      self.assertEqual(
          '/internal/taskqueue/important/task_queues/rebuild-cache', url)
      self.assertEqual('rebuild-task-cache', name)
      payloads.append(payload)
      return True
    self.mock(utils, 'enqueue_task', _enqueue_task)

    # The equivalent of self._assert_task(tasks=1) except that we snapshot the
    # payload.
    # Trigger multiple task queues to go deeper in the code.
    request_1 = _gen_request(
        properties=_gen_properties(
            dimensions={
              u'cpu': [u'x86-64'],
              u'pool': [u'default'],
            }))
    task_queues.assert_task(request_1)
    self.assertEqual(1, len(payloads))
    self.assertEqual(True, task_queues.rebuild_task_cache(payloads[-1]))
    self.assertEqual(3, task_queues.BotTaskDimensions.query().count())
    self.assertEqual(1, task_queues.TaskDimensions.query().count())
    self.assertEqual(60, request_1.expiration_secs)
    expected = now + task_queues._EXTEND_VALIDITY + datetime.timedelta(
        seconds=request_1.expiration_secs)
    self.assertEqual(
        expected, task_queues.TaskDimensions.query().get().valid_until_ts)

    request_2 = _gen_request(
        properties=_gen_properties(
            dimensions={
              u'os': [u'Ubuntu-16.04'],
              u'pool': [u'default'],
            }))
    task_queues.assert_task(request_2)
    self.assertEqual(2, len(payloads))
    self.assertEqual(True, task_queues.rebuild_task_cache(payloads[-1]))
    self.assertEqual(6, task_queues.BotTaskDimensions.query().count())
    self.assertEqual(2, task_queues.TaskDimensions.query().count())
    self.assertEqual(
        [227177418, 1843498234], task_queues.get_queues(bot_root_key))
    memcache.flush_all()
    self.assertEqual(
        [227177418, 1843498234], task_queues.get_queues(bot_root_key))

    # Now expire the two TaskDimensions, one at a time, and rebuild the task
    # queue.
    offset = (task_queues._EXTEND_VALIDITY + datetime.timedelta(
      seconds=request_1.expiration_secs)).total_seconds() + 1
    self.mock_now(now, offset)
    self.assertEqual(True, task_queues.rebuild_task_cache(payloads[0]))
    self.assertEqual(6, task_queues.BotTaskDimensions.query().count())
    self.assertEqual(2, task_queues.TaskDimensions.query().count())
    self.assertEqual(
        [227177418, 1843498234], task_queues.get_queues(bot_root_key))
    # Observe the effect of memcache. See comment in get_queues().
    memcache.flush_all()
    self.assertEqual([], task_queues.get_queues(bot_root_key))

    # Re-running still do not delete TaskDimensions because they are kept until
    # _KEEP_DEAD.
    self.assertEqual(True, task_queues.rebuild_task_cache(payloads[1]))
    self.assertEqual(6, task_queues.BotTaskDimensions.query().count())
    self.assertEqual(2, task_queues.TaskDimensions.query().count())
    self.assertEqual([], task_queues.get_queues(bot_root_key))

    # Get past _KEEP_DEAD.
    offset = (
        task_queues._EXTEND_VALIDITY +
        task_queues._KEEP_DEAD + datetime.timedelta(
            seconds=request_1.expiration_secs)
      ).total_seconds() + 1
    self.mock_now(now, offset)
    self.assertEqual([], task_queues.get_queues(bot_root_key))
    self.assertEqual(True, task_queues.rebuild_task_cache(payloads[0]))
    self.assertEqual(6, task_queues.BotTaskDimensions.query().count())
    self.assertEqual(1, task_queues.TaskDimensions.query().count())
    self.assertEqual([], task_queues.get_queues(bot_root_key))