Ejemplo n.º 1
0
    def test_or_dimensions_new_tasks(self):
        # Bots are already registered, then new tasks show up
        self.mock_now(datetime.datetime(2020, 1, 2, 3, 4, 5))
        self.assertEqual(
            0,
            _assert_bot(bot_id=u'bot1',
                        dimensions={
                            u'os': [u'v1', u'v2'],
                            u'gpu': [u'nv'],
                        }))
        self.assertEqual(
            0,
            _assert_bot(bot_id=u'bot2',
                        dimensions={
                            u'os': [u'v2'],
                            u'gpu': [u'amd'],
                        }))

        payloads = self._mock_enqueue_task_async_for_rebuild_task_cache()

        request1 = _gen_request(properties=_gen_properties(
            dimensions={
                u'pool': [u'default'],
                u'os': [u'v1|v2'],
                u'gpu': [u'nv|amd'],
            }))
        task_queues.assert_task_async(request1).get_result()
        self.assertEqual(1, len(payloads))
        f = task_queues.rebuild_task_cache_async(payloads[-1])
        self.assertEqual(True, f.get_result())
        payloads.pop()

        # Both bots should be able to handle |request1|
        self.assert_count(2, task_queues.BotDimensions)
        self.assert_count(2, task_queues.BotTaskDimensions)
        self.assert_count(1, task_queues.TaskDimensions)
        self.assertEqual(4, len(task_queues.TaskDimensions.query().get().sets))
        bot1_root_key = bot_management.get_root_key(u'bot1')
        bot2_root_key = bot_management.get_root_key(u'bot2')
        self.assertEqual(1, len(task_queues.get_queues(bot1_root_key)))
        self.assertEqual(1, len(task_queues.get_queues(bot2_root_key)))

        request2 = _gen_request(properties=_gen_properties(
            dimensions={
                u'pool': [u'default'],
                u'os': [u'v1'],
                u'gpu': [u'nv|amd'],
            }))
        task_queues.assert_task_async(request2).get_result()
        self.assertEqual(1, len(payloads))
        f = task_queues.rebuild_task_cache_async(payloads[-1])
        self.assertEqual(True, f.get_result())
        payloads.pop()

        # Only bot1 can handle |request2|
        self.assert_count(3, task_queues.BotTaskDimensions)
        self.assert_count(2, task_queues.TaskDimensions)
        self.assertEqual(2, len(task_queues.get_queues(bot1_root_key)))
        self.assertEqual(1, len(task_queues.get_queues(bot2_root_key)))
Ejemplo n.º 2
0
 def post(self):
     f = task_queues.rebuild_task_cache_async(self.request.body)
     if not f.get_result():
         # The task likely failed due to DB transaction contention,
         # so we can reply that the service has had too many requests (429).
         # Using a 400-level response also prevents failures here from causing
         # unactionable alerts due to a high rate of 500s.
         self.response.set_status(429, 'Need to retry')
Ejemplo n.º 3
0
    def test_or_dimensions_same_hash(self):
        self.mock_now(datetime.datetime(2020, 1, 2, 3, 4, 5))
        self.assertEqual(
            0, _assert_bot(bot_id=u'bot1', dimensions={u'os': [u'v1']}))
        self.assertEqual(
            0, _assert_bot(bot_id=u'bot2', dimensions={u'os': [u'v2']}))
        self.assertEqual(
            0, _assert_bot(bot_id=u'bot3', dimensions={u'os': [u'v3']}))

        payloads = self._mock_enqueue_task_async_for_rebuild_task_cache()
        # Both requests should have the same dimension_hash
        request1 = _gen_request(properties=_gen_properties(dimensions={
            u'pool': [u'default'],
            u'os': [u'v1|v2|v3'],
        }))
        request2 = _gen_request(properties=_gen_properties(dimensions={
            u'pool': [u'default'],
            u'os': [u'v3|v2|v1'],
        }))
        task_queues.assert_task_async(request1).get_result()
        task_queues.assert_task_async(request2).get_result()
        self.assertEqual(2, len(payloads))
        while payloads:
            f = task_queues.rebuild_task_cache_async(payloads[-1])
            self.assertEqual(True, f.get_result())
            payloads.pop()

        self.assert_count(3, task_queues.BotDimensions)
        self.assert_count(3, task_queues.BotTaskDimensions)
        self.assert_count(1, task_queues.TaskDimensions)
        self.assertEqual(3, len(task_queues.TaskDimensions.query().get().sets))
        bot1_root_key = bot_management.get_root_key(u'bot1')
        bot2_root_key = bot_management.get_root_key(u'bot2')
        bot3_root_key = bot_management.get_root_key(u'bot3')
        self.assertEqual(1, len(task_queues.get_queues(bot1_root_key)))
        self.assertEqual(1, len(task_queues.get_queues(bot2_root_key)))
        self.assertEqual(1, len(task_queues.get_queues(bot3_root_key)))
 def _enqueue_task_async(self, url, queue_name, payload):
   if queue_name == 'rebuild-task-cache':
     return task_queues.rebuild_task_cache_async(payload)
   self.fail(url)
Ejemplo n.º 5
0
    def test_rebuild_task_cache_async(self):
        # Assert that expiration works.
        now = datetime.datetime(2010, 1, 2, 3, 4, 5)
        self.mock_now(now)

        # We want _yield_BotTaskDimensions_keys() to return multiple
        # BotTaskDimensions ndb.Key to confirm that the inner loops work. This
        # requires a few bots.
        _assert_bot(bot_id=u'bot1')
        _assert_bot(bot_id=u'bot2')
        _assert_bot(bot_id=u'bot3')
        bot_root_key = bot_management.get_root_key(u'bot1')
        self.assertEqual(0, task_queues.BotTaskDimensions.query().count())
        self.assertEqual(0, task_queues.TaskDimensions.query().count())

        # Intentionally force the code to throttle the number of concurrent RPCs,
        # otherwise the inner loops wouldn't be reached with less than 50 bots, and
        # testing with 50 bots, would make the unit test slow.
        self.mock(task_queues, '_CAP_FUTURES_LIMIT', 1)

        payloads = self._mock_enqueue_task_async_for_rebuild_task_cache()

        # The equivalent of self._assert_task(tasks=1) except that we snapshot the
        # payload.
        # Trigger multiple task queues to go deeper in the code.
        request_1 = _gen_request(properties=_gen_properties(dimensions={
            u'cpu': [u'x86-64'],
            u'pool': [u'default'],
        }))
        task_queues.assert_task_async(request_1).get_result()
        self.assertEqual(1, len(payloads))
        f = task_queues.rebuild_task_cache_async(payloads[-1])
        self.assertEqual(True, f.get_result())
        self.assertEqual(3, task_queues.BotTaskDimensions.query().count())
        self.assertEqual(1, task_queues.TaskDimensions.query().count())
        self.assertEqual(60, request_1.expiration_secs)
        expected = now + task_queues._EXTEND_VALIDITY + datetime.timedelta(
            seconds=request_1.expiration_secs)
        self.assertEqual(
            expected,
            task_queues.TaskDimensions.query().get().valid_until_ts)

        request_2 = _gen_request(properties=_gen_properties(
            dimensions={
                u'os': [u'Ubuntu-16.04'],
                u'pool': [u'default'],
            }))
        task_queues.assert_task_async(request_2).get_result()
        self.assertEqual(2, len(payloads))
        f = task_queues.rebuild_task_cache_async(payloads[-1])
        self.assertEqual(True, f.get_result())
        self.assertEqual(6, task_queues.BotTaskDimensions.query().count())
        self.assertEqual(2, task_queues.TaskDimensions.query().count())
        self.assertEqual([227177418, 1843498234],
                         task_queues.get_queues(bot_root_key))
        memcache.flush_all()
        self.assertEqual([227177418, 1843498234],
                         task_queues.get_queues(bot_root_key))

        # Now expire the two TaskDimensions, one at a time, and rebuild the task
        # queue.
        offset = (task_queues._EXTEND_VALIDITY + datetime.timedelta(
            seconds=request_1.expiration_secs)).total_seconds() + 1
        self.mock_now(now, offset)
        f = task_queues.rebuild_task_cache_async(payloads[0])
        self.assertEqual(True, f.get_result())
        self.assertEqual(6, task_queues.BotTaskDimensions.query().count())
        self.assertEqual(2, task_queues.TaskDimensions.query().count())
        self.assertEqual([227177418, 1843498234],
                         task_queues.get_queues(bot_root_key))
        # Observe the effect of memcache. See comment in get_queues().
        memcache.flush_all()
        self.assertEqual([], task_queues.get_queues(bot_root_key))

        # Re-running still do not delete TaskDimensions because they are kept until
        # _KEEP_DEAD.
        f = task_queues.rebuild_task_cache_async(payloads[1])
        self.assertEqual(True, f.get_result())
        self.assertEqual(6, task_queues.BotTaskDimensions.query().count())
        self.assertEqual(2, task_queues.TaskDimensions.query().count())
        self.assertEqual([], task_queues.get_queues(bot_root_key))

        # Get past _KEEP_DEAD.
        offset = (task_queues._EXTEND_VALIDITY + task_queues._KEEP_DEAD +
                  datetime.timedelta(seconds=request_1.expiration_secs)
                  ).total_seconds() + 1
        self.mock_now(now, offset)
        self.assertEqual([], task_queues.get_queues(bot_root_key))
        f = task_queues.rebuild_task_cache_async(payloads[0])
        self.assertEqual(True, f.get_result())
        self.assertEqual(6, task_queues.BotTaskDimensions.query().count())
        self.assertEqual(1, task_queues.TaskDimensions.query().count())
        self.assertEqual([], task_queues.get_queues(bot_root_key))