def __init__(self, job): super(Evaluator, self).__init__( evaluators=( evaluators.TaskPayloadLiftingEvaluator(), evaluators.FilteringEvaluator( predicate=evaluators.All( evaluators.TaskTypeEq('run_test'), evaluators.TaskIsEventTarget(), ), delegate=evaluators.DispatchByEventTypeEvaluator({ 'initiate': evaluators.FilteringEvaluator( predicate=evaluators.Not( evaluators.TaskStatusIn( {'ongoing', 'failed', 'completed'})), delegate=InitiateEvaluator(job)), 'update': evaluators.FilteringEvaluator( predicate=evaluators.TaskStatusIn({'ongoing'}), delegate=UpdateEvaluator(job)), })), ))
def IsDone(job_id): """Transactionally check whether a job is done executing. Returns True IFF the job is executing under the execution engine and that internal state is consistent with the job being "done". Raises any errors encountered in execution engine evaluation. """ job = JobFromId(job_id) if not job.use_execution_engine: return False # This comes from an eventually consistent read, but we can treat that as a # relaxed load -- if this ever is true, then that means a transaction before # this call had already marked the job "done". if job.done: return True try: context = task_module.Evaluate( job, event_module.SelectEvent(), evaluators.DispatchByTaskType({ 'find_culprit': evaluators.TaskPayloadLiftingEvaluator( exclude_keys=['commits']) })) if not context: return False for payload in context.values(): status = payload.get('status') if status in {'pending', 'ongoing'}: return False return True except task_module.Error as error: logging.error('Evaluation error: %s', error) raise
def testEvaluateFailedDependency(self, *_): self.PopulateTaskGraph(benchmark='some_benchmark', chart='chart', trace='must_not_be_found', mode='graph_json') self.assertNotEqual( {}, task_module.Evaluate( self.job, event_module.Event(type='initiate', target_task=None, payload={}), evaluators.SequenceEvaluator(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator(evaluators=( bisection_test_util.FakeFoundIsolate(self.job), evaluators.TaskPayloadLiftingEvaluator()))), evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('run_test'), delegate=evaluators.SequenceEvaluator(evaluators=( bisection_test_util.FakeFailedRunTest(self.job), evaluators.TaskPayloadLiftingEvaluator()))), read_value.Evaluator(self.job), )))) self.assertEqual( { 'read_value_chromium@aaaaaaa_%s' % (attempt, ): { 'benchmark': 'some_benchmark', 'change': mock.ANY, 'mode': 'graph_json', 'results_filename': 'some_benchmark/perf_results.json', 'histogram_options': { 'grouping_label': None, 'story': None, 'statistic': None, }, 'graph_json_options': { 'chart': 'chart', 'trace': 'must_not_be_found', }, 'errors': [{ 'reason': 'DependencyFailed', 'message': mock.ANY, }], 'status': 'failed', 'tries': 1, 'index': attempt, } for attempt in range(10) }, task_module.Evaluate( self.job, event_module.Event(type='select', target_task=None, payload={}), evaluators.Selector(task_type='read_value')))
def testMissingDependencyInputs(self): job = job_module.Job.New((), ()) task_module.PopulateTaskGraph( job, task_module.TaskGraph( vertices=[ task_module.TaskVertex(id='build_aaaaaaa', vertex_type='find_isolate', payload={ 'builder': 'Some Builder', 'target': 'telemetry_perf_tests', 'bucket': 'luci.bucket', 'change': { 'commits': [{ 'repository': 'chromium', 'git_hash': 'aaaaaaa', }] } }), task_module.TaskVertex(id='run_test_aaaaaaa_0', vertex_type='run_test', payload={ 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'extra_args': [], }), ], edges=[ task_module.Dependency(from_='run_test_aaaaaaa_0', to='build_aaaaaaa') ], )) # This time we're fine, there should be no errors. self.assertEqual({}, task_module.Evaluate( job, event_module.Event(type='validate', target_task=None, payload={}), run_test.Validator())) # Send an initiate message then catch that we've not provided the required # payload in the task when it's ongoing. self.assertEqual( { 'build_aaaaaaa': mock.ANY, 'run_test_aaaaaaa_0': { 'errors': [{ 'cause': 'MissingDependencyInputs', 'message': mock.ANY }] } }, task_module.Evaluate( job, event_module.Event(type='initiate', target_task=None, payload={}), evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator(evaluators=( functools.partial(FakeNotFoundIsolate, job), evaluators.TaskPayloadLiftingEvaluator(), )), alternative=run_test.Validator()), ))
def testEvaluateHandleFailures_Expired(self, swarming_task_result, swarming_tasks_new): swarming_tasks_new.return_value = {'task_id': 'task id'} evaluator = evaluators.SequenceEvaluator(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator( evaluators=(functools.partial(FakeFoundIsolate, self.job), evaluators.TaskPayloadLiftingEvaluator()))), run_test.Evaluator(self.job), )) self.assertNotEqual({}, task_module.Evaluate( self.job, event_module.Event(type='initiate', target_task=None, payload={}), evaluator)) swarming_task_result.return_value = { 'state': 'EXPIRED', } for attempt in range(11): self.assertNotEqual( {}, task_module.Evaluate( self.job, event_module.Event( type='update', target_task='run_test_aaaaaaa_%s' % (attempt, ), payload={ 'kind': 'pubsub_message', 'action': 'poll' }), evaluator), 'Attempt #%s' % (attempt, )) self.assertEqual( { 'run_test_aaaaaaa_%s' % (attempt, ): { 'status': 'failed', 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'errors': [ { 'reason': 'SwarmingExpired', 'message': mock.ANY }, ], 'extra_args': [], 'swarming_request_body': { 'name': mock.ANY, 'user': mock.ANY, 'priority': mock.ANY, 'task_slices': mock.ANY, 'tags': mock.ANY, 'pubsub_auth_token': mock.ANY, 'pubsub_topic': mock.ANY, 'pubsub_userdata': mock.ANY, }, 'swarming_task_result': { 'state': 'EXPIRED', }, 'swarming_task_id': 'task id', 'tries': 1, } for attempt in range(11) }, task_module.Evaluate( self.job, event_module.Event(type='select', target_task=None, payload={}), Selector(task_type='run_test')))
def testEvaluateHandleFailures_Hard(self, swarming_task_stdout, swarming_task_result, swarming_tasks_new): swarming_tasks_new.return_value = {'task_id': 'task id'} evaluator = evaluators.SequenceEvaluator(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator( evaluators=(functools.partial(FakeFoundIsolate, self.job), evaluators.TaskPayloadLiftingEvaluator()))), run_test.Evaluator(self.job), )) self.assertNotEqual({}, task_module.Evaluate( self.job, event_module.Event(type='initiate', target_task=None, payload={}), evaluator)) # We set it up so that when we poll the swarming task, that we're going to # get an error status. We're expecting that hard failures are detected. swarming_task_stdout.return_value = { 'output': """Traceback (most recent call last): File "../../testing/scripts/run_performance_tests.py", line 282, in <module> sys.exit(main()) File "../../testing/scripts/run_performance_tests.py", line 226, in main benchmarks = args.benchmark_names.split(',') AttributeError: 'Namespace' object has no attribute 'benchmark_names'""" } swarming_task_result.return_value = { 'bot_id': 'bot id', 'exit_code': 1, 'failure': True, 'outputs_ref': { 'isolatedserver': 'output isolate server', 'isolated': 'output isolate hash', }, 'state': 'COMPLETED', } for attempt in range(11): self.assertNotEqual( {}, task_module.Evaluate( self.job, event_module.Event( type='update', target_task='run_test_aaaaaaa_%s' % (attempt, ), payload={ 'kind': 'pubsub_message', 'action': 'poll' }), evaluator), 'Attempt #%s' % (attempt, )) self.assertEqual( { 'run_test_aaaaaaa_%s' % (attempt, ): { 'status': 'failed', 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'errors': mock.ANY, 'extra_args': [], 'swarming_request_body': { 'name': mock.ANY, 'user': mock.ANY, 'priority': mock.ANY, 'task_slices': mock.ANY, 'tags': mock.ANY, 'pubsub_auth_token': mock.ANY, 'pubsub_topic': mock.ANY, 'pubsub_userdata': mock.ANY, }, 'swarming_task_result': { 'bot_id': mock.ANY, 'state': 'COMPLETED', 'failure': True, }, 'isolate_server': 'output isolate server', 'isolate_hash': 'output isolate hash', 'swarming_task_id': 'task id', 'tries': 1, } for attempt in range(11) }, task_module.Evaluate( self.job, event_module.Event(type='select', target_task=None, payload={}), Selector(task_type='run_test')))
def testEvaluateToCompletion(self, swarming_task_result, swarming_tasks_new): swarming_tasks_new.return_value = {'task_id': 'task id'} evaluator = evaluators.SequenceEvaluator(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator( evaluators=(functools.partial(FakeFoundIsolate, self.job), evaluators.TaskPayloadLiftingEvaluator()))), run_test.Evaluator(self.job), )) self.assertNotEqual({}, task_module.Evaluate( self.job, event_module.Event(type='initiate', target_task=None, payload={}), evaluator)) # Ensure that we've found all the 'run_test' tasks. self.assertEqual( { 'run_test_aaaaaaa_%s' % (attempt, ): { 'status': 'ongoing', 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'extra_args': [], 'swarming_request_body': { 'name': mock.ANY, 'user': mock.ANY, 'priority': mock.ANY, 'task_slices': mock.ANY, 'tags': mock.ANY, 'pubsub_auth_token': mock.ANY, 'pubsub_topic': mock.ANY, 'pubsub_userdata': mock.ANY, }, 'swarming_task_id': 'task id', 'tries': 1, } for attempt in range(11) }, task_module.Evaluate( self.job, event_module.Event(type='select', target_task=None, payload={}), Selector(task_type='run_test'))) # Ensure that we've actually made the calls to the Swarming service. swarming_tasks_new.assert_called() self.assertGreaterEqual(swarming_tasks_new.call_count, 10) # Then we propagate an event for each of the run_test tasks in the graph. swarming_task_result.return_value = { 'bot_id': 'bot id', 'exit_code': 0, 'failure': False, 'outputs_ref': { 'isolatedserver': 'output isolate server', 'isolated': 'output isolate hash', }, 'state': 'COMPLETED', } for attempt in range(11): self.assertNotEqual( {}, task_module.Evaluate( self.job, event_module.Event( type='update', target_task='run_test_aaaaaaa_%s' % (attempt, ), payload={}), evaluator), 'Attempt #%s' % (attempt, )) # Ensure that we've polled the status of each of the tasks, and that we've # marked the tasks completed. self.assertEqual( { 'run_test_aaaaaaa_%s' % (attempt, ): { 'status': 'completed', 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'extra_args': [], 'swarming_request_body': { 'name': mock.ANY, 'user': mock.ANY, 'priority': mock.ANY, 'task_slices': mock.ANY, 'tags': mock.ANY, 'pubsub_auth_token': mock.ANY, 'pubsub_topic': mock.ANY, 'pubsub_userdata': mock.ANY, }, 'swarming_task_result': { 'bot_id': mock.ANY, 'state': 'COMPLETED', 'failure': False, }, 'isolate_server': 'output isolate server', 'isolate_hash': 'output isolate hash', 'swarming_task_id': 'task id', 'tries': 1, } for attempt in range(11) }, task_module.Evaluate( self.job, event_module.Event(type='select', target_task=None, payload={}), Selector(task_type='run_test'))) # Ensure that we've actually made the calls to the Swarming service. swarming_task_result.assert_called() self.assertGreaterEqual(swarming_task_result.call_count, 10)
def testSerializeJob(self): self.PopulateSimpleBisectionGraph(self.job) task_module.Evaluate( self.job, bisection_test_util.SelectEvent(), evaluators.SequenceEvaluator([ evaluators.DispatchByTaskType({ 'find_isolate': bisection_test_util.FakeFoundIsolate(self.job), 'run_test': bisection_test_util.FakeSuccessfulRunTest(self.job), 'read_value': bisection_test_util.FakeReadValueSameResult(self.job, 1.0), 'find_culprit': performance_bisection.Evaluator(self.job), }), evaluators.TaskPayloadLiftingEvaluator() ])) logging.debug('Finished evaluating job state.') job_dict = self.job.AsDict(options=[job_module.OPTION_STATE]) logging.debug('Job = %s', pprint.pformat(job_dict)) self.assertTrue(self.job.use_execution_engine) self.assertEqual( { 'arguments': mock.ANY, 'bug_id': None, 'cancel_reason': None, 'comparison_mode': 'performance', 'configuration': 'some_configuration', 'created': mock.ANY, 'difference_count': 0, 'exception': None, 'job_id': mock.ANY, 'metric': 'some_benchmark', 'name': mock.ANY, 'quests': ['Build', 'Test', 'Get results'], 'results_url': mock.ANY, 'status': mock.ANY, 'updated': mock.ANY, 'user': None, # NOTE: Here we're asseessing the structure of the results, not the # actual contents. We'll reserve more specific content form testing # in other test cases, but for now we're ensuring that we're able to # get the shape of the data in a certain way. 'state': [{ 'attempts': [{ 'executions': [mock.ANY] * 3 }] + [{ 'executions': [None, mock.ANY, mock.ANY] }] * 9, 'change': self.start_change.AsDict(), 'comparisons': { 'prev': None, 'next': 'same', }, 'result_values': [mock.ANY] * 10, }, { 'attempts': [{ 'executions': [mock.ANY] * 3 }] + [{ 'executions': [None, mock.ANY, mock.ANY] }] * 9, 'change': self.end_change.AsDict(), 'comparisons': { 'prev': 'same', 'next': None, }, 'result_values': [mock.ANY] * 10, }] }, job_dict)