def test_bot_update_pubsub_error(self): data = _gen_request( properties=dict(dimensions={u'OS': u'Windows-3.1.1'}), pubsub_topic='projects/abc/topics/def') request = task_request.make_request(data, True) task_scheduler.schedule_request(request) bot_dimensions = { u'OS': [u'Windows', u'Windows-3.1.1'], u'hostname': u'localhost', u'foo': u'bar', } _, run_result = task_scheduler.bot_reap_task( bot_dimensions, 'localhost', 'abc') self.assertEqual('localhost', run_result.bot_id) # Attempt to terminate the task with success, but make PubSub call fail. self.mock_pub_sub(publish_successful=False) self.assertEqual( (False, False), task_scheduler.bot_update_task( run_result.key, 'localhost', 'Foo1', 0, 0, 0.1, False, False, 0.1, None)) # Bot retries bot_update, now PubSub works and notification is sent. pub_sub_calls = self.mock_pub_sub(publish_successful=True) self.assertEqual( (True, True), task_scheduler.bot_update_task( run_result.key, 'localhost', 'Foo1', 0, 0, 0.1, False, False, 0.1, None)) self.assertEqual(1, len(pub_sub_calls)) # notification is sent
def test_bot_kill_task(self): self.mock(random, 'getrandbits', lambda _: 0x88) dimensions = {u'OS': u'Windows-3.1.1'} request = task_request.make_request( _gen_request(properties={'dimensions': dimensions}), True) result_summary = task_scheduler.schedule_request(request) reaped_request, run_result = task_scheduler.bot_reap_task( {'OS': 'Windows-3.1.1'}, 'localhost', 'abc') self.assertEqual( None, task_scheduler.bot_kill_task(run_result.key, 'localhost')) expected = { 'abandoned_ts': self.now, 'bot_dimensions': dimensions, 'bot_id': u'localhost', 'bot_version': u'abc', 'children_task_ids': [], 'completed_ts': None, 'costs_usd': [0.], 'cost_saved_usd': None, 'created_ts': self.now, 'deduped_from': None, 'durations': [], 'exit_codes': [], 'failure': False, 'id': '1d69b9f088008810', 'internal_failure': True, 'modified_ts': self.now, 'name': u'Request name', 'outputs_ref': None, 'properties_hash': None, 'server_versions': [u'v1a'], 'started_ts': self.now, 'state': State.BOT_DIED, 'tags': [u'OS:Windows-3.1.1', u'priority:50', u'tag:1', u'user:Jesus'], 'try_number': 1, 'user': u'Jesus', } self.assertEqual(expected, result_summary.key.get().to_dict()) expected = { 'abandoned_ts': self.now, 'bot_dimensions': dimensions, 'bot_id': u'localhost', 'bot_version': u'abc', 'children_task_ids': [], 'completed_ts': None, 'cost_usd': 0., 'durations': [], 'exit_codes': [], 'failure': False, 'id': '1d69b9f088008811', 'internal_failure': True, 'modified_ts': self.now, 'outputs_ref': None, 'server_versions': [u'v1a'], 'started_ts': self.now, 'state': State.BOT_DIED, 'try_number': 1, } self.assertEqual(expected, run_result.key.get().to_dict())
def terminate(self, request): """Asks a bot to terminate itself gracefully. The bot will stay in the DB, use 'delete' to remove it from the DB afterward. This request returns a pseudo-taskid that can be waited for to wait for the bot to turn down. This command is particularly useful when a privileged user needs to safely debug a machine specific issue. The user can trigger a terminate for one of the bot exhibiting the issue, wait for the pseudo-task to run then access the machine with the guarantee that the bot is not running anymore. """ # TODO(maruel): Disallow a terminate task when there's one currently # pending or if the bot is considered 'dead', e.g. no contact since 10 # minutes. logging.debug('%s', request) bot_id = unicode(request.bot_id) bot_key = bot_management.get_info_key(bot_id) get_or_raise(bot_key) # raises 404 if there is no such bot try: # Craft a special priority 0 task to tell the bot to shutdown. request = task_request.create_termination_task( bot_id, wait_for_capacity=True) except (datastore_errors.BadValueError, TypeError, ValueError) as e: raise endpoints.BadRequestException(e.message) result_summary = task_scheduler.schedule_request(request, secret_bytes=None) return swarming_rpcs.TerminateResponse( task_id=task_pack.pack_result_summary_key(result_summary.key))
def test_cron_abort_expired_task_to_run(self): self.mock(random, 'getrandbits', lambda _: 0x88) data = _gen_request_data(properties=dict( dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) abandoned_ts = self.mock_now(self.now, data['scheduling_expiration_secs'] + 1) self.assertEqual(1, task_scheduler.cron_abort_expired_task_to_run()) self.assertEqual([], task_result.TaskRunResult.query().fetch()) expected = { 'abandoned_ts': abandoned_ts, 'bot_id': None, 'bot_version': None, 'children_task_ids': [], 'completed_ts': None, 'costs_usd': [], 'cost_saved_usd': None, 'created_ts': self.now, 'deduped_from': None, 'durations': [], 'exit_codes': [], 'failure': False, 'id': '1d69b9f088008810', 'internal_failure': False, 'modified_ts': abandoned_ts, 'name': u'Request name', 'properties_hash': None, 'server_versions': [], 'started_ts': None, 'state': task_result.State.EXPIRED, 'try_number': None, 'user': u'Jesus', } self.assertEqual(expected, result_summary.key.get().to_dict())
def test_cron_abort_expired_task_to_run(self): self.mock(random, 'getrandbits', lambda _: 0x88) data = _gen_request_data( properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) abandoned_ts = self.mock_now(self.now, data['scheduling_expiration_secs']+1) self.assertEqual(1, task_scheduler.cron_abort_expired_task_to_run()) self.assertEqual([], task_result.TaskRunResult.query().fetch()) expected = { 'abandoned_ts': abandoned_ts, 'bot_id': None, 'bot_version': None, 'children_task_ids': [], 'completed_ts': None, 'costs_usd': [], 'cost_saved_usd': None, 'created_ts': self.now, 'deduped_from': None, 'durations': [], 'exit_codes': [], 'failure': False, 'id': '1d69b9f088008810', 'internal_failure': False, 'modified_ts': abandoned_ts, 'name': u'Request name', 'properties_hash': None, 'server_versions': [], 'started_ts': None, 'state': task_result.State.EXPIRED, 'try_number': None, 'user': u'Jesus', } self.assertEqual(expected, result_summary.key.get().to_dict())
def new(self, request): """Creates a new task. The task will be enqueued in the tasks list and will be executed at the earliest opportunity by a bot that has at least the dimensions as described in the task request. """ logging.info('%s', request) try: request = message_conversion.new_task_request_from_rpc( request, utils.utcnow()) posted_request = task_request.make_request(request, acl.is_bot_or_admin()) except (datastore_errors.BadValueError, TypeError, ValueError) as e: raise endpoints.BadRequestException(e.message) result_summary = task_scheduler.schedule_request(posted_request) previous_result = None if result_summary.deduped_from: previous_result = message_conversion.task_result_to_rpc(result_summary) return swarming_rpcs.TaskRequestMetadata( request=message_conversion.task_request_to_rpc(posted_request), task_id=task_pack.pack_result_summary_key(result_summary.key), task_result=previous_result)
def test_notify_request_with_tq_batch_mode_false(self): request = _gen_request() result_summary = task_scheduler.schedule_request(request, None) self.cfg.enable_batch_es_notifications = True self.assertEqual(1, self.execute_tasks()) self._setup_client() # Since use_tq is false, the requests below should be sent out immediately. external_scheduler.notify_requests(self.cfg_foo, [(request, result_summary)], False, False, batch_mode=True) external_scheduler.notify_requests(self.cfg_hoe, [(request, result_summary)], False, False, batch_mode=True) called_with = self._client.called_with_requests self.assertEqual(len(called_with), 2) called_with.sort(key=lambda x: x.scheduler_id) # Should have 1 notification and its id is foo. self.assertEqual(len(called_with[0].notifications), 1) self.assertEqual(called_with[0].scheduler_id, u'foo') # Should have 1 notification and its id is hoe. self.assertEqual(len(called_with[1].notifications), 1) self.assertEqual(called_with[1].scheduler_id, u'hoe')
def test_search_by_name_broken_tasks(self): # Create tasks where task_scheduler.schedule_request() fails in the middle. # This is done by mocking the functions to fail every SKIP call and running # it in a loop. class RandomFailure(Exception): pass # First call fails ndb.put_multi(), second call fails search.Index.put(), # third call work. index = [0] SKIP = 3 def put_multi(*args, **kwargs): callers = [i[3] for i in inspect.stack()] self.assertTrue( 'make_request' in callers or 'schedule_request' in callers, callers) if (index[0] % SKIP) == 1: raise RandomFailure() return old_put_multi(*args, **kwargs) def put_async(*args, **kwargs): callers = [i[3] for i in inspect.stack()] self.assertIn('schedule_request', callers) out = ndb.Future() if (index[0] % SKIP) == 2: out.set_exception(search.Error()) else: out.set_result(old_put_async(*args, **kwargs).get_result()) return out old_put_multi = self.mock(ndb, 'put_multi', put_multi) old_put_async = self.mock(search.Index, 'put_async', put_async) saved = [] for i in xrange(100): index[0] = i data = _gen_request_data( name='Request %d' % i, properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) try: request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) saved.append(result_summary) except RandomFailure: pass self.assertEqual(67, len(saved)) self.assertEqual(67, task_request.TaskRequest.query().count()) self.assertEqual(67, task_result.TaskResultSummary.query().count()) # Now the DB is full of half-corrupted entities. cursor = None actual, cursor = task_result.search_by_name('Request', cursor, 31) self.assertEqual(31, len(actual)) actual, cursor = task_result.search_by_name('Request', cursor, 31) self.assertEqual(3, len(actual)) actual, cursor = task_result.search_by_name('Request', cursor, 31) self.assertEqual(0, len(actual))
def new(self, request): """Creates a new task. The task will be enqueued in the tasks list and will be executed at the earliest opportunity by a bot that has at least the dimensions as described in the task request. """ logging.info('%s', request) try: request = message_conversion.new_task_request_from_rpc( request, utils.utcnow()) posted_request = task_request.make_request(request, acl.is_bot_or_admin()) except (datastore_errors.BadValueError, TypeError, ValueError) as e: raise endpoints.BadRequestException(e.message) result_summary = task_scheduler.schedule_request(posted_request) previous_result = None if result_summary.deduped_from: previous_result = message_conversion.task_result_to_rpc( result_summary) return swarming_rpcs.TaskRequestMetadata( request=message_conversion.task_request_to_rpc(posted_request), task_id=task_pack.pack_result_summary_key(result_summary.key), task_result=previous_result)
def test_task_parent_isolated(self): request = task_request.make_request( _gen_request( properties={ 'commands': None, 'dimensions': {u'OS': u'Windows-3.1.1'}, 'inputs_ref': { 'isolated': '1' * 40, 'isolatedserver': 'http://localhost:1', 'namespace': 'default-gzip', }, }), True) _result_summary = task_scheduler.schedule_request(request) bot_dimensions = { u'OS': [u'Windows', u'Windows-3.1.1'], u'hostname': u'localhost', u'foo': u'bar', } actual_request, run_result = task_scheduler.bot_reap_task( bot_dimensions, 'localhost', 'abc') self.assertEqual(request, actual_request) self.assertEqual('localhost', run_result.bot_id) self.assertEqual(None, task_to_run.TaskToRun.query().get().queue_number) # It's important to terminate the task with success. self.assertEqual( (True, True), task_scheduler.bot_update_task( run_result.key, 'localhost', 'Foo1', 0, 0, 0.1, False, False, 0.1, None)) parent_id = run_result.task_id request = task_request.make_request( _gen_request( parent_task_id=parent_id, properties={'dimensions':{u'OS': u'Windows-3.1.1'}}), True) result_summary = task_scheduler.schedule_request(request) self.assertEqual([], result_summary.children_task_ids) self.assertEqual(parent_id, request.parent_task_id) parent_run_result_key = task_pack.unpack_run_result_key(parent_id) parent_res_summary_key = task_pack.run_result_key_to_result_summary_key( parent_run_result_key) expected = [result_summary.task_id] self.assertEqual(expected, parent_run_result_key.get().children_task_ids) self.assertEqual(expected, parent_res_summary_key.get().children_task_ids)
def post(self, task_id): original_request, _ = self.get_request_and_result(task_id) # Retrying a task is essentially reusing the same task request as the # original one, but with new parameters. new_request = task_request.new_request_clone( original_request, allow_high_priority=acl.is_admin()) result_summary = task_scheduler.schedule_request(new_request) self.redirect('/user/task/%s' % result_summary.task_id)
def test_cron_handle_bot_died_second(self): # Test two tries internal_failure's leading to a BOT_DIED status. self.mock(random, 'getrandbits', lambda _: 0x88) now = utils.utcnow() data = _gen_request( properties=dict(dimensions={u'OS': u'Windows-3.1.1'}), created_ts=now, expiration_ts=now+datetime.timedelta(seconds=600)) request = task_request.make_request(data, True) _result_summary = task_scheduler.schedule_request(request) bot_dimensions = { u'OS': [u'Windows', u'Windows-3.1.1'], u'hostname': u'localhost', u'foo': u'bar', } _request, run_result = task_scheduler.bot_reap_task( bot_dimensions, 'localhost', 'abc') self.assertEqual(1, run_result.try_number) self.assertEqual(task_result.State.RUNNING, run_result.state) self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1) self.assertEqual(([], 1, 0), task_scheduler.cron_handle_bot_died('f.local')) now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2) # It must be a different bot. _request, run_result = task_scheduler.bot_reap_task( bot_dimensions, 'localhost-second', 'abc') now_2 = self.mock_now(self.now + 2 * task_result.BOT_PING_TOLERANCE, 3) self.assertEqual( (['1d69b9f088008812'], 0, 0), task_scheduler.cron_handle_bot_died('f.local')) self.assertEqual(([], 0, 0), task_scheduler.cron_handle_bot_died('f.local')) expected = { 'abandoned_ts': now_2, 'bot_dimensions': bot_dimensions, 'bot_id': u'localhost-second', 'bot_version': u'abc', 'children_task_ids': [], 'completed_ts': None, 'costs_usd': [0., 0.], 'cost_saved_usd': None, 'created_ts': self.now, 'deduped_from': None, 'durations': [], 'exit_codes': [], 'failure': False, 'id': '1d69b9f088008810', 'internal_failure': True, 'modified_ts': now_2, 'name': u'Request name', 'outputs_ref': None, 'properties_hash': None, 'server_versions': [u'v1a'], 'started_ts': now_1, 'state': task_result.State.BOT_DIED, 'tags': [u'OS:Windows-3.1.1', u'priority:50', u'tag:1', u'user:Jesus'], 'try_number': 2, 'user': u'Jesus', } self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
def test_search_by_name_broken_tasks(self): # Create tasks where task_scheduler.schedule_request() fails in the middle. # This is done by mocking the functions to fail every SKIP call and running # it in a loop. class RandomFailure(Exception): pass # First call fails ndb.put_multi(), second call fails search.Index.put(), # third call work. index = [0] SKIP = 3 def put_multi(*args, **kwargs): callers = [i[3] for i in inspect.stack()] self.assertTrue( 'make_request' in callers or 'schedule_request' in callers, callers) if (index[0] % SKIP) == 1: raise RandomFailure() return old_put_multi(*args, **kwargs) def put_async(*args, **kwargs): callers = [i[3] for i in inspect.stack()] self.assertIn('schedule_request', callers) out = ndb.Future() if (index[0] % SKIP) == 2: out.set_exception(search.Error()) else: out.set_result(old_put_async(*args, **kwargs).get_result()) return out old_put_multi = self.mock(ndb, 'put_multi', put_multi) old_put_async = self.mock(search.Index, 'put_async', put_async) saved = [] for i in xrange(100): index[0] = i data = _gen_request( name='Request %d' % i, properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) try: request = task_request.make_request(data, True) result_summary = task_scheduler.schedule_request(request) saved.append(result_summary) except RandomFailure: pass self.assertEqual(67, len(saved)) self.assertEqual(67, task_request.TaskRequest.query().count()) self.assertEqual(67, task_result.TaskResultSummary.query().count()) # Now the DB is full of half-corrupted entities. cursor = None actual, cursor = task_result._search_by_name('Request', cursor, 31) self.assertEqual(31, len(actual)) actual, cursor = task_result._search_by_name('Request', cursor, 31) self.assertEqual(3, len(actual)) actual, cursor = task_result._search_by_name('Request', cursor, 31) self.assertEqual(0, len(actual))
def _bot_update_timeouts(self, hard, io): self.mock(random, 'getrandbits', lambda _: 0x88) data = _gen_request_data( properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) reaped_request, run_result = task_scheduler.bot_reap_task( {'OS': 'Windows-3.1.1'}, 'localhost', 'abc') self.assertEqual( (True, True), task_scheduler.bot_update_task( run_result.key, 'localhost', 'hi', 0, 0, 0.1, hard, io, 0.1)) expected = { 'abandoned_ts': None, 'bot_id': u'localhost', 'bot_version': u'abc', 'children_task_ids': [], 'completed_ts': self.now, 'costs_usd': [0.1], 'cost_saved_usd': None, 'created_ts': self.now, 'deduped_from': None, 'durations': [0.1], 'exit_codes': [0], 'failure': True, 'id': '1d69b9f088008810', 'internal_failure': False, 'modified_ts': self.now, 'name': u'Request name', 'properties_hash': None, 'server_versions': [u'v1a'], 'started_ts': self.now, 'state': State.TIMED_OUT, 'tags': [u'OS:Windows-3.1.1', u'priority:50', u'tag:1', u'user:Jesus'], 'try_number': 1, 'user': u'Jesus', } self.assertEqual(expected, result_summary.key.get().to_dict()) expected = { 'abandoned_ts': None, 'bot_id': u'localhost', 'bot_version': u'abc', 'children_task_ids': [], 'completed_ts': self.now, 'cost_usd': 0.1, 'durations': [0.1], 'exit_codes': [0], 'failure': True, 'id': '1d69b9f088008811', 'internal_failure': False, 'modified_ts': self.now, 'server_versions': [u'v1a'], 'started_ts': self.now, 'state': State.TIMED_OUT, 'try_number': 1, } self.assertEqual(expected, run_result.key.get().to_dict())
def _quick_reap(): """Reaps a task.""" data = _gen_request_data(properties=dict( dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) _result_summary = task_scheduler.schedule_request(request) reaped_request, run_result = task_scheduler.bot_reap_task( {'OS': 'Windows-3.1.1'}, 'localhost', 'abc') return run_result
def _quick_reap(): """Reaps a task.""" data = _gen_request( properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data, True) _result_summary = task_scheduler.schedule_request(request) reaped_request, run_result = task_scheduler.bot_reap_task( {'OS': 'Windows-3.1.1'}, 'localhost', 'abc') return run_result
def _bot_update_timeouts(self, hard, io): self.mock(random, 'getrandbits', lambda _: 0x88) data = _gen_request_data(properties=dict( dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) reaped_request, run_result = task_scheduler.bot_reap_task( {'OS': 'Windows-3.1.1'}, 'localhost', 'abc') self.assertEqual( (True, True), task_scheduler.bot_update_task(run_result.key, 'localhost', 'hi', 0, 0, 0.1, hard, io, 0.1)) expected = { 'abandoned_ts': None, 'bot_id': u'localhost', 'bot_version': u'abc', 'children_task_ids': [], 'completed_ts': self.now, 'costs_usd': [0.1], 'cost_saved_usd': None, 'created_ts': self.now, 'deduped_from': None, 'durations': [0.1], 'exit_codes': [0], 'failure': True, 'id': '1d69b9f088008810', 'internal_failure': False, 'modified_ts': self.now, 'name': u'Request name', 'properties_hash': None, 'server_versions': [u'v1a'], 'started_ts': self.now, 'state': State.TIMED_OUT, 'try_number': 1, 'user': u'Jesus', } self.assertEqual(expected, result_summary.key.get().to_dict()) expected = { 'abandoned_ts': None, 'bot_id': u'localhost', 'bot_version': u'abc', 'children_task_ids': [], 'completed_ts': self.now, 'cost_usd': 0.1, 'durations': [0.1], 'exit_codes': [0], 'failure': True, 'id': '1d69b9f088008811', 'internal_failure': False, 'modified_ts': self.now, 'server_versions': [u'v1a'], 'started_ts': self.now, 'state': State.TIMED_OUT, 'try_number': 1, } self.assertEqual(expected, run_result.key.get().to_dict())
def test_cron_abort_expired_task_to_run_retry(self): self.mock(random, 'getrandbits', lambda _: 0x88) now = utils.utcnow() data = _gen_request( properties=dict(dimensions={u'OS': u'Windows-3.1.1'}), created_ts=now, expiration_ts=now+datetime.timedelta(seconds=600)) request = task_request.make_request(data, True) result_summary = task_scheduler.schedule_request(request) # Fake first try bot died. bot_dimensions = { u'OS': [u'Windows', u'Windows-3.1.1'], u'hostname': u'localhost', u'foo': u'bar', } _request, run_result = task_scheduler.bot_reap_task( bot_dimensions, 'localhost', 'abc') now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1) self.assertEqual((0, 1, 0), task_scheduler.cron_handle_bot_died()) self.assertEqual(task_result.State.BOT_DIED, run_result.key.get().state) self.assertEqual( task_result.State.PENDING, run_result.result_summary_key.get().state) # BOT_DIED is kept instead of EXPIRED. abandoned_ts = self.mock_now(self.now, request.expiration_secs+1) self.assertEqual(1, task_scheduler.cron_abort_expired_task_to_run()) self.assertEqual(1, len(task_result.TaskRunResult.query().fetch())) expected = { 'abandoned_ts': abandoned_ts, 'bot_dimensions': bot_dimensions, 'bot_id': u'localhost', 'bot_version': u'abc', 'children_task_ids': [], 'completed_ts': None, 'costs_usd': [0.], 'cost_saved_usd': None, 'created_ts': self.now, 'deduped_from': None, 'durations': [], 'exit_codes': [], 'failure': False, 'id': '1d69b9f088008810', 'internal_failure': True, 'modified_ts': abandoned_ts, 'name': u'Request name', 'outputs_ref': None, 'properties_hash': None, 'server_versions': [u'v1a'], 'started_ts': self.now, 'state': task_result.State.BOT_DIED, 'tags': [u'OS:Windows-3.1.1', u'priority:50', u'tag:1', u'user:Jesus'], 'try_number': 1, 'user': u'Jesus', } self.assertEqual(expected, result_summary.key.get().to_dict())
def test_cron_handle_bot_died_second(self): # Test two tries internal_failure's leading to a BOT_DIED status. self.mock(random, 'getrandbits', lambda _: 0x88) now = utils.utcnow() data = _gen_request( properties=dict(dimensions={u'OS': u'Windows-3.1.1'}), created_ts=now, expiration_ts=now+datetime.timedelta(seconds=600)) request = task_request.make_request(data, True) _result_summary = task_scheduler.schedule_request(request) bot_dimensions = { u'OS': [u'Windows', u'Windows-3.1.1'], u'hostname': u'localhost', u'foo': u'bar', } _request, run_result = task_scheduler.bot_reap_task( bot_dimensions, 'localhost', 'abc') self.assertEqual(1, run_result.try_number) self.assertEqual(task_result.State.RUNNING, run_result.state) self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 1) self.assertEqual((0, 1, 0), task_scheduler.cron_handle_bot_died()) now_1 = self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 2) # It must be a different bot. _request, run_result = task_scheduler.bot_reap_task( bot_dimensions, 'localhost-second', 'abc') now_2 = self.mock_now(self.now + 2 * task_result.BOT_PING_TOLERANCE, 3) self.assertEqual((1, 0, 0), task_scheduler.cron_handle_bot_died()) self.assertEqual((0, 0, 0), task_scheduler.cron_handle_bot_died()) expected = { 'abandoned_ts': now_2, 'bot_dimensions': bot_dimensions, 'bot_id': u'localhost-second', 'bot_version': u'abc', 'children_task_ids': [], 'completed_ts': None, 'costs_usd': [0., 0.], 'cost_saved_usd': None, 'created_ts': self.now, 'deduped_from': None, 'durations': [], 'exit_codes': [], 'failure': False, 'id': '1d69b9f088008810', 'internal_failure': True, 'modified_ts': now_2, 'name': u'Request name', 'outputs_ref': None, 'properties_hash': None, 'server_versions': [u'v1a'], 'started_ts': now_1, 'state': task_result.State.BOT_DIED, 'tags': [u'OS:Windows-3.1.1', u'priority:50', u'tag:1', u'user:Jesus'], 'try_number': 2, 'user': u'Jesus', } self.assertEqual(expected, run_result.result_summary_key.get().to_dict())
def test_cancel_task(self): data = _gen_request_data(properties=dict( dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) ok, was_running = task_scheduler.cancel_task(result_summary.key) self.assertEqual(True, ok) self.assertEqual(False, was_running) result_summary = result_summary.key.get() self.assertEqual(task_result.State.CANCELED, result_summary.state)
def test_cancel_task(self): data = _gen_request_data( properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) ok, was_running = task_scheduler.cancel_task(result_summary.key) self.assertEqual(True, ok) self.assertEqual(False, was_running) result_summary = result_summary.key.get() self.assertEqual(task_result.State.CANCELED, result_summary.state)
def test_search_by_name(self): data = _gen_request_data( properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) # Assert that search is not case-sensitive by using unexpected casing. actual, _cursor = task_result.search_by_name('requEST', None, 10) self.assertEqual([result_summary], actual) actual, _cursor = task_result.search_by_name('name', None, 10) self.assertEqual([result_summary], actual)
def test_search_by_name_failures(self): data = _gen_request_data(properties=dict( dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) actual, _cursor = task_result.search_by_name('foo', None, 10) self.assertEqual([], actual) # Partial match doesn't work. actual, _cursor = task_result.search_by_name('nam', None, 10) self.assertEqual([], actual)
def test_search_by_name_failures(self): data = _gen_request( properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data, True) result_summary = task_scheduler.schedule_request(request) actual, _cursor = task_result._search_by_name('foo', None, 10) self.assertEqual([], actual) # Partial match doesn't work. actual, _cursor = task_result._search_by_name('nam', None, 10) self.assertEqual([], actual)
def test_search_by_name(self): data = _gen_request_data(properties=dict( dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) # Assert that search is not case-sensitive by using unexpected casing. actual, _cursor = task_result.search_by_name('requEST', None, 10) self.assertEqual([result_summary], actual) actual, _cursor = task_result.search_by_name('name', None, 10) self.assertEqual([result_summary], actual)
def post(self): logging.error('Unexpected old client') data = self.parse_body() msg = log_unexpected_subset_keys(self._EXPECTED_DATA_KEYS, self._REQUIRED_DATA_KEYS, data, self.request, 'client', 'request keys') if msg: self.abort_with_error(400, error=msg) data_properties = data['properties'] msg = log_unexpected_subset_keys(self._EXPECTED_PROPERTIES_KEYS, self._REQUIRED_PROPERTIES_KEYS, data_properties, self.request, 'client', 'request properties keys') if msg: self.abort_with_error(400, error=msg) # Class TaskProperties takes care of making everything deterministic. properties = task_request.TaskProperties( commands=data_properties['commands'], data=data_properties['data'], dimensions=data_properties['dimensions'], env=data_properties['env'], execution_timeout_secs=data_properties['execution_timeout_secs'], grace_period_secs=data_properties.get('grace_period_secs', 30), idempotent=data_properties.get('idempotent', False), io_timeout_secs=data_properties['io_timeout_secs']) now = utils.utcnow() expiration_ts = now + datetime.timedelta( seconds=data['scheduling_expiration_secs']) request = task_request.TaskRequest( created_ts=now, expiration_ts=expiration_ts, name=data['name'], parent_task_id=data.get('parent_task_id'), priority=data['priority'], properties=properties, tags=data['tags'], user=data['user'] or '') try: request = task_request.make_request(request, acl.is_bot_or_admin()) except (AttributeError, datastore_errors.BadValueError, TypeError, ValueError) as e: self.abort_with_error(400, error=str(e)) result_summary = task_scheduler.schedule_request(request) data = { 'request': request.to_dict(), 'task_id': task_pack.pack_result_summary_key(result_summary.key), } self.send_response(utils.to_json_encodable(data))
def test_cancel_task_running(self): data = _gen_request_data( properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) reaped_request, run_result = task_scheduler.bot_reap_task( {'OS': 'Windows-3.1.1'}, 'localhost', 'abc') ok, was_running = task_scheduler.cancel_task(result_summary.key) self.assertEqual(False, ok) self.assertEqual(True, was_running) result_summary = result_summary.key.get() self.assertEqual(task_result.State.RUNNING, result_summary.state)
def test_cancel_task_running(self): data = _gen_request_data(properties=dict( dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) reaped_request, run_result = task_scheduler.bot_reap_task( {'OS': 'Windows-3.1.1'}, 'localhost', 'abc') ok, was_running = task_scheduler.cancel_task(result_summary.key) self.assertEqual(False, ok) self.assertEqual(True, was_running) result_summary = result_summary.key.get() self.assertEqual(task_result.State.RUNNING, result_summary.state)
def post(self): logging.error('Unexpected old client') data = self.parse_body() msg = log_unexpected_subset_keys( self._EXPECTED_DATA_KEYS, self._REQUIRED_DATA_KEYS, data, self.request, 'client', 'request keys') if msg: self.abort_with_error(400, error=msg) data_properties = data['properties'] msg = log_unexpected_subset_keys( self._EXPECTED_PROPERTIES_KEYS, self._REQUIRED_PROPERTIES_KEYS, data_properties, self.request, 'client', 'request properties keys') if msg: self.abort_with_error(400, error=msg) # Class TaskProperties takes care of making everything deterministic. properties = task_request.TaskProperties( commands=data_properties['commands'], data=data_properties['data'], dimensions=data_properties['dimensions'], env=data_properties['env'], execution_timeout_secs=data_properties['execution_timeout_secs'], grace_period_secs=data_properties.get('grace_period_secs', 30), idempotent=data_properties.get('idempotent', False), io_timeout_secs=data_properties['io_timeout_secs']) now = utils.utcnow() expiration_ts = now + datetime.timedelta( seconds=data['scheduling_expiration_secs']) request = task_request.TaskRequest( created_ts=now, expiration_ts=expiration_ts, name=data['name'], parent_task_id=data.get('parent_task_id'), priority=data['priority'], properties=properties, tags=data['tags'], user=data['user'] or '') try: request = task_request.make_request(request, acl.is_bot_or_admin()) except ( AttributeError, datastore_errors.BadValueError, TypeError, ValueError) as e: self.abort_with_error(400, error=str(e)) result_summary = task_scheduler.schedule_request(request) data = { 'request': request.to_dict(), 'task_id': task_pack.pack_result_summary_key(result_summary.key), } self.send_response(utils.to_json_encodable(data))
def _task_deduped(self, new_ts, deduped_from, task_id='1d8dc670a0008810', now=None): data = _gen_request_data(name='yay', user='******', properties=dict( dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True)) request = task_request.make_request(data) _result_summary = task_scheduler.schedule_request(request) bot_dimensions = { u'OS': [u'Windows', u'Windows-3.1.1'], u'hostname': u'localhost', u'foo': u'bar', } self.assertEqual(None, task_to_run.TaskToRun.query().get().queue_number) actual_request_2, run_result_2 = task_scheduler.bot_reap_task( bot_dimensions, 'localhost', 'abc') self.assertEqual(None, actual_request_2) result_summary_duped, run_results_duped = get_results(request.key) expected = { 'abandoned_ts': None, 'bot_id': u'localhost', 'bot_version': u'abc', 'children_task_ids': [], 'completed_ts': now or self.now, 'costs_usd': [], 'cost_saved_usd': 0.1, 'created_ts': new_ts, 'deduped_from': deduped_from, 'durations': [0.1], 'exit_codes': [0], 'failure': False, 'id': task_id, 'internal_failure': False, # Only this value is updated to 'now', the rest uses the previous run # timestamps. 'modified_ts': new_ts, 'name': u'yay', # A deduped task cannot be deduped against. 'properties_hash': None, 'server_versions': [u'v1a'], 'started_ts': now or self.now, 'state': State.COMPLETED, 'try_number': 0, 'user': u'Raoul', } self.assertEqual(expected, result_summary_duped.to_dict()) self.assertEqual([], run_results_duped)
def test_bot_kill_task_wrong_bot(self): self.mock(random, 'getrandbits', lambda _: 0x88) data = _gen_request( properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data, True) result_summary = task_scheduler.schedule_request(request) reaped_request, run_result = task_scheduler.bot_reap_task( {'OS': 'Windows-3.1.1'}, 'localhost', 'abc') expected = ( 'Bot bot1 sent task kill for task 1d69b9f088008811 owned by bot ' 'localhost') self.assertEqual( expected, task_scheduler.bot_kill_task(run_result.key, 'bot1'))
def test_bot_kill_task_wrong_bot(self): self.mock(random, 'getrandbits', lambda _: 0x88) data = _gen_request_data(properties=dict( dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data) result_summary = task_scheduler.schedule_request(request) reaped_request, run_result = task_scheduler.bot_reap_task( {'OS': 'Windows-3.1.1'}, 'localhost', 'abc') expected = ( 'Bot bot1 sent task kill for task 1d69b9f088008811 owned by bot ' 'localhost') self.assertEqual(expected, task_scheduler.bot_kill_task(run_result.key, 'bot1'))
def test_cancel_task(self): data = _gen_request( properties=dict(dimensions={u'OS': u'Windows-3.1.1'}), pubsub_topic='projects/abc/topics/def') pub_sub_calls = self.mock_pub_sub() request = task_request.make_request(data, True) result_summary = task_scheduler.schedule_request(request) ok, was_running = task_scheduler.cancel_task(result_summary.key) self.assertEqual(True, ok) self.assertEqual(False, was_running) result_summary = result_summary.key.get() self.assertEqual(task_result.State.CANCELED, result_summary.state) self.assertEqual(1, len(pub_sub_calls)) # sent completion notification
def test_search_by_name(self): # This is awkward but it's because _search_by_name() depends on # functionality saved by task_scheduler. (There's a layering issue). data = _gen_request( properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data, True) result_summary = task_scheduler.schedule_request(request) # Assert that search is not case-sensitive by using unexpected casing. actual, _cursor = task_result._search_by_name('requEST', None, 10) self.assertEqual([result_summary], actual) actual, _cursor = task_result._search_by_name('name', None, 10) self.assertEqual([result_summary], actual)
def _task_deduped( self, new_ts, deduped_from, task_id='1d8dc670a0008810', now=None): data = _gen_request( name='yay', user='******', properties=dict(dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True)) request = task_request.make_request(data, True) _result_summary = task_scheduler.schedule_request(request) bot_dimensions = { u'OS': [u'Windows', u'Windows-3.1.1'], u'hostname': u'localhost', u'foo': u'bar', } self.assertEqual(None, task_to_run.TaskToRun.query().get().queue_number) actual_request_2, run_result_2 = task_scheduler.bot_reap_task( bot_dimensions, 'localhost', 'abc') self.assertEqual(None, actual_request_2) result_summary_duped, run_results_duped = get_results(request.key) expected = { 'abandoned_ts': None, 'bot_dimensions': bot_dimensions, 'bot_id': u'localhost', 'bot_version': u'abc', 'children_task_ids': [], 'completed_ts': now or self.now, 'costs_usd': [], 'cost_saved_usd': 0.1, 'created_ts': new_ts, 'deduped_from': deduped_from, 'durations': [0.1], 'exit_codes': [0], 'failure': False, 'id': task_id, 'internal_failure': False, # Only this value is updated to 'now', the rest uses the previous run # timestamps. 'modified_ts': new_ts, 'name': u'yay', 'outputs_ref': None, # A deduped task cannot be deduped against. 'properties_hash': None, 'server_versions': [u'v1a'], 'started_ts': now or self.now, 'state': State.COMPLETED, 'tags': [u'OS:Windows-3.1.1', u'priority:50', u'tag:1', u'user:Raoul'], 'try_number': 0, 'user': u'Raoul', } self.assertEqual(expected, result_summary_duped.to_dict()) self.assertEqual([], run_results_duped)
def test_cancel_task_running(self): data = _gen_request( properties=dict(dimensions={u'OS': u'Windows-3.1.1'}), pubsub_topic='projects/abc/topics/def') pub_sub_calls = self.mock_pub_sub() request = task_request.make_request(data, True) result_summary = task_scheduler.schedule_request(request) reaped_request, run_result = task_scheduler.bot_reap_task( {'OS': 'Windows-3.1.1'}, 'localhost', 'abc') ok, was_running = task_scheduler.cancel_task(result_summary.key) self.assertEqual(False, ok) self.assertEqual(True, was_running) result_summary = result_summary.key.get() self.assertEqual(task_result.State.RUNNING, result_summary.state) self.assertEqual(0, len(pub_sub_calls)) # no notifications
def test_bot_reap_task(self): data = _gen_request( properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data, True) _result_summary = task_scheduler.schedule_request(request) bot_dimensions = { u'OS': [u'Windows', u'Windows-3.1.1'], u'hostname': u'localhost', u'foo': u'bar', } actual_request, run_result = task_scheduler.bot_reap_task( bot_dimensions, 'localhost', 'abc') self.assertEqual(request, actual_request) self.assertEqual('localhost', run_result.bot_id) self.assertEqual(None, task_to_run.TaskToRun.query().get().queue_number)
def test_task_idempotent_old(self): self.mock(random, 'getrandbits', lambda _: 0x88) # First task is idempotent. self._task_ran_successfully() # Second task is scheduled, first task is too old to be reused. new_ts = self.mock_now(self.now, config.settings().reusable_task_age_secs) data = _gen_request( name='yay', user='******', properties=dict(dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True)) request = task_request.make_request(data, True) _result_summary = task_scheduler.schedule_request(request) # The task was enqueued for execution. self.assertNotEqual(None, task_to_run.TaskToRun.query().get().queue_number)
def handle_early_release(machine_lease): """Handles the early release of a leased machine. Args: machine_lease: MachineLease instance. """ if machine_lease.lease_expiration_ts <= utils.utcnow( ) + datetime.timedelta(seconds=machine_lease.early_release_secs): logging.info('MachineLease ready to be released: %s', machine_lease.key) task_result_summary = task_scheduler.schedule_request( task_request.create_termination_task(machine_lease.hostname, True), check_acls=False, ) associate_termination_task(machine_lease.key, machine_lease.hostname, task_result_summary.task_id)
def test_notify_request_with_tq_batch_mode(self): request = _gen_request() result_summary = task_scheduler.schedule_request(request, None) self.assertEqual(1, self.execute_tasks()) # Create requests with different scheduler IDs. external_scheduler.notify_requests(self.cfg_foo, [(request, result_summary)], True, False, batch_mode=True) external_scheduler.notify_requests(self.cfg_foo, [(request, result_summary)], True, False, batch_mode=True) external_scheduler.notify_requests(self.cfg_hoe, [(request, result_summary)], True, False, batch_mode=True) self._setup_client() # There should have been no call in _get_client yet. self.assertEqual(len(self._client.called_with_requests), 0) # Execute the kicker to call the pull queue worker. # 3 tasks(kickers) were added to es-notify-kickers. # 2 tasks(batched request) will be added to es-notify-tasks # once the kicker is done. self.assertEqual(5, self.execute_tasks()) called_with = self._client.called_with_requests # There should have 2 calls to the external scheduler. self.assertEqual(len(called_with), 2) called_with.sort(key=lambda x: x.scheduler_id) # Request foo should have 2 notifications. self.assertEqual(len(called_with[0].notifications), 2) self.assertEqual(called_with[0].scheduler_id, u'foo') # Request hoe should have 1 notification. self.assertEqual(len(called_with[1].notifications), 1) self.assertEqual(called_with[1].scheduler_id, u'hoe') # There should be no task remained in the pull queue. stats = taskqueue.QueueStatistics.fetch('es-notify-tasks-batch') self.assertEqual(0, stats.tasks)
def test_task_parent_children(self): # Parent task creates a child task. parent_id = self._task_ran_successfully() data = _gen_request( parent_task_id=parent_id, properties=dict(dimensions={u'OS': u'Windows-3.1.1'})) request = task_request.make_request(data, True) result_summary = task_scheduler.schedule_request(request) self.assertEqual([], result_summary.children_task_ids) self.assertEqual(parent_id, request.parent_task_id) parent_run_result_key = task_pack.unpack_run_result_key(parent_id) parent_res_summary_key = task_pack.run_result_key_to_result_summary_key( parent_run_result_key) expected = [result_summary.task_id] self.assertEqual(expected, parent_run_result_key.get().children_task_ids) self.assertEqual(expected, parent_res_summary_key.get().children_task_ids)
def post(self): request_data = self.parse_body() # If the priority is below 100, make the the user has right to do so. if request_data.get('priority', 255) < 100 and not acl.is_bot_or_admin(): # Silently drop the priority of normal users. request_data['priority'] = 100 try: request = task_request.make_request(request_data) except (datastore_errors.BadValueError, TypeError, ValueError) as e: self.abort_with_error(400, error=str(e)) result_summary = task_scheduler.schedule_request(request) data = { 'request': request.to_dict(), 'task_id': task_pack.pack_result_summary_key(result_summary.key), } self.send_response(utils.to_json_encodable(data))
def test_cron_handle_bot_died_ignored_expired(self): self.mock(random, 'getrandbits', lambda _: 0x88) data = _gen_request_data( properties=dict(dimensions={u'OS': u'Windows-3.1.1'}), scheduling_expiration_secs=600) request = task_request.make_request(data) _result_summary = task_scheduler.schedule_request(request) bot_dimensions = { u'OS': [u'Windows', u'Windows-3.1.1'], u'hostname': u'localhost', u'foo': u'bar', } _request, run_result = task_scheduler.bot_reap_task( bot_dimensions, 'localhost', 'abc') self.assertEqual(1, run_result.try_number) self.assertEqual(task_result.State.RUNNING, run_result.state) self.mock_now(self.now + task_result.BOT_PING_TOLERANCE, 601) self.assertEqual((1, 0, 0), task_scheduler.cron_handle_bot_died())
def test_cron_abort_expired_task_to_run(self): self.mock(random, 'getrandbits', lambda _: 0x88) request = task_request.make_request( _gen_request( properties={'dimensions': {u'OS': u'Windows-3.1.1'}}, pubsub_topic='projects/abc/topics/def'), True) pub_sub_calls = self.mock_pub_sub() result_summary = task_scheduler.schedule_request(request) abandoned_ts = self.mock_now(self.now, request.expiration_secs+1) self.assertEqual( ['1d69b9f088008810'], task_scheduler.cron_abort_expired_task_to_run('f.local')) self.assertEqual([], task_result.TaskRunResult.query().fetch()) expected = { 'abandoned_ts': abandoned_ts, 'bot_dimensions': None, 'bot_id': None, 'bot_version': None, 'children_task_ids': [], 'completed_ts': None, 'costs_usd': [], 'cost_saved_usd': None, 'created_ts': self.now, 'deduped_from': None, 'durations': [], 'exit_codes': [], 'failure': False, 'id': '1d69b9f088008810', 'internal_failure': False, 'modified_ts': abandoned_ts, 'name': u'Request name', 'outputs_ref': None, 'properties_hash': None, 'server_versions': [], 'started_ts': None, 'state': task_result.State.EXPIRED, 'tags': [u'OS:Windows-3.1.1', u'priority:50', u'tag:1', u'user:Jesus'], 'try_number': None, 'user': u'Jesus', } self.assertEqual(expected, result_summary.key.get().to_dict()) self.assertEqual(1, len(pub_sub_calls)) # pubsub completion notification
def terminate(self, request): """Asks a bot to terminate itself gracefully. The bot will stay in the DB, use 'delete' to remove it from the DB afterward. This request returns a pseudo-taskid that can be waited for to wait for the bot to turn down. This command is particularly useful when a privileged user needs to safely debug a machine specific issue. The user can trigger a terminate for one of the bot exhibiting the issue, wait for the pseudo-task to run then access the machine with the guarantee that the bot is not running anymore. """ # TODO(maruel): Disallow a terminate task when there's one currently # pending or if the bot is considered 'dead', e.g. no contact since 10 # minutes. logging.info('%s', request) bot_key = bot_management.get_info_key(request.bot_id) get_or_raise(bot_key) # raises 404 if there is no such bot try: # Craft a special priority 0 task to tell the bot to shutdown. properties = task_request.TaskProperties( dimensions={u'id': request.bot_id}, execution_timeout_secs=0, grace_period_secs=0, io_timeout_secs=0) now = utils.utcnow() request = task_request.TaskRequest( created_ts=now, expiration_ts=now + datetime.timedelta(days=1), name='Terminate %s' % request.bot_id, priority=0, properties=properties, tags=['terminate:1'], user=auth.get_current_identity().to_bytes()) assert request.properties.is_terminate posted_request = task_request.make_request(request, acl.is_bot_or_admin()) except (datastore_errors.BadValueError, TypeError, ValueError) as e: raise endpoints.BadRequestException(e.message) result_summary = task_scheduler.schedule_request(posted_request) return swarming_rpcs.TerminateResponse( task_id=task_pack.pack_result_summary_key(result_summary.key))
def test_notify_requests(self): request = _gen_request() result_summary = task_scheduler.schedule_request(request, None) external_scheduler.notify_requests(self.es_cfg, [(request, result_summary)], False, False) self.assertEqual(len(self._client.called_with_requests), 1) called_with = self._client.called_with_requests[0] self.assertEqual(len(called_with.notifications), 1) notification = called_with.notifications[0] self.assertEqual(request.created_ts, notification.task.enqueued_time.ToDatetime()) self.assertEqual(request.task_id, notification.task.id) self.assertEqual(request.num_task_slices, len(notification.task.slices)) self.execute_tasks()
def new(self, request): """Provides a TaskRequest and receives its metadata.""" request_dict = json.loads(remote.protojson.encode_message(request)) _transform_request(request_dict) # If the priority is below 100, make the the user has right to do so. if request_dict.get('priority', 255) < 100 and not acl.is_bot_or_admin(): # Silently drop the priority of normal users. request_dict['priority'] = 100 try: posted_request = task_request.make_request(request_dict) except (datastore_errors.BadValueError, TypeError, ValueError) as e: raise endpoints.BadRequestException(e.message) result_summary = task_scheduler.schedule_request(posted_request) posted_dict = utils.to_json_encodable(posted_request) return swarming_rpcs.TaskRequestMetadata( request=message_conversion.task_request_from_dict(posted_dict), task_id=task_pack.pack_result_summary_key(result_summary.key))
def post(self, task_id): try: key = task_pack.unpack_result_summary_key(task_id) request_key = task_pack.result_summary_key_to_request_key(key) except ValueError: try: key = task_pack.unpack_run_result_key(task_id) request_key = task_pack.result_summary_key_to_request_key( task_pack.run_result_key_to_result_summary_key(key)) except (NotImplementedError, ValueError): self.abort(404, 'Invalid key format.') # Retrying a task is essentially reusing the same task request as the # original one, but with new parameters. original_request = request_key.get() if not original_request: self.abort(404, 'Invalid request key.') new_request = task_request.make_request_clone(original_request) result_summary = task_scheduler.schedule_request(new_request) self.redirect('/user/task/%s' % result_summary.task_id)
def test_task_idempotent_three(self): self.mock(random, 'getrandbits', lambda _: 0x88) # First task is idempotent. task_id = self._task_ran_successfully() # Second task is deduped against first task. new_ts = self.mock_now(self.now, config.settings().reusable_task_age_secs-1) self._task_deduped(new_ts, task_id) # Third task is scheduled, second task is not dedupable, first task is too # old. new_ts = self.mock_now(self.now, config.settings().reusable_task_age_secs) data = _gen_request_data( name='yay', user='******', properties=dict(dimensions={u'OS': u'Windows-3.1.1'}, idempotent=True)) request = task_request.make_request(data) _result_summary = task_scheduler.schedule_request(request) # The task was enqueued for execution. self.assertNotEqual(None, task_to_run.TaskToRun.query().get().queue_number)
def new(self, request): """Provides a TaskRequest and receive its metadata.""" request_dict = json.loads(remote.protojson.encode_message(request)) _transform_request(request_dict) # If the priority is below 100, make the the user has right to do so. if request_dict.get('priority', 255) < 100 and not acl.is_bot_or_admin(): # Silently drop the priority of normal users. request_dict['priority'] = 100 try: posted_request = task_request.make_request(request_dict) except (datastore_errors.BadValueError, TypeError, ValueError) as e: raise endpoints.BadRequestException(e.message) result_summary = task_scheduler.schedule_request(posted_request) posted_dict = utils.to_json_encodable(posted_request) return swarming_rpcs.TaskRequestMetadata( request=message_conversion.task_request_from_dict(posted_dict), task_id=task_pack.pack_result_summary_key(result_summary.key))