def testEvaluateFailedDependency(self, *_): evaluator = evaluators.SequenceEvaluator(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator(evaluators=( functools.partial(FakeFindIsolateFailed, self.job), evaluators.TaskPayloadLiftingEvaluator()))), run_test.Evaluator(self.job), )) # When we initiate the run_test tasks, we should immediately see the tasks # failing because the dependency has a hard failure status. self.assertEqual( dict([('build_aaaaaaa', mock.ANY)] + [('run_test_aaaaaaa_%s' % (attempt, ), { 'status': 'failed', 'errors': mock.ANY, 'dimensions': DIMENSIONS, 'extra_args': [], 'swarming_server': 'some_server', }) for attempt in range(11)]), task_module.Evaluate( self.job, event_module.Event(type='initiate', target_task=None, payload={}), evaluator))
def testEvaluateFailedDependency(self, *_): self.PopulateTaskGraph(benchmark='some_benchmark', chart='chart', trace='must_not_be_found', mode='graph_json') self.assertNotEqual( {}, task_module.Evaluate( self.job, event_module.Event(type='initiate', target_task=None, payload={}), evaluators.SequenceEvaluator(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator(evaluators=( functools.partial(FakeFoundIsolate, self.job), evaluators.TaskPayloadLiftingEvaluator()))), evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('run_test'), delegate=evaluators.SequenceEvaluator(evaluators=( functools.partial(FakeFailedRunTest, self.job), evaluators.TaskPayloadLiftingEvaluator()))), read_value.Evaluator(self.job), )))) self.assertEqual( { 'read_value_chromium@aaaaaaa_%s' % (attempt, ): { 'benchmark': 'some_benchmark', 'mode': 'graph_json', 'results_filename': 'some_benchmark/perf_results.json', 'histogram_options': { 'tir_label': None, 'story': None, 'statistic': None, }, 'graph_json_options': { 'chart': 'chart', 'trace': 'must_not_be_found', }, 'errors': [{ 'reason': 'DependencyFailed', 'message': mock.ANY, }], 'status': 'failed', 'tries': 1, } for attempt in range(10) }, task_module.Evaluate( self.job, event_module.Event(type='select', target_task=None, payload={}), evaluators.Selector(task_type='read_value')))
def CompoundEvaluatorForTesting(self, fake_evaluator): return evaluators.SequenceEvaluator([ evaluators.FilteringEvaluator( predicate=evaluators.All(evaluators.TaskTypeEq('read_value'), evaluators.TaskStatusIn({'pending'})), delegate=evaluators.SequenceEvaluator( [fake_evaluator, evaluators.TaskPayloadLiftingEvaluator()])), evaluators.SequenceEvaluator([ performance_bisection.Evaluator(self.job), evaluators.TaskPayloadLiftingEvaluator( exclude_keys={'commits'}) ]), ])
def __init__(self): super(Serializer, self).__init__({ 'find_isolate': evaluators.SequenceEvaluator( [find_isolate.Serializer(), TaskTransformer]), 'run_test': evaluators.SequenceEvaluator( [run_test.Serializer(), TaskTransformer]), 'read_value': evaluators.SequenceEvaluator( [read_value.Serializer(), TaskTransformer]), 'find_culprit': evaluators.SequenceEvaluator( [performance_bisection.Serializer(), AnalysisTransformer]), })
def __init__(self, job): super(Evaluator, self).__init__(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.All(evaluators.TaskTypeEq('run_test'), ), delegate=evaluators.DispatchByEventTypeEvaluator({ 'initiate': evaluators.FilteringEvaluator(predicate=evaluators.Not( evaluators.TaskStatusIn( {'ongoing', 'failed', 'completed'})), delegate=InitiateEvaluator( job)), # For updates, we want to ensure that the initiate evaluator # has a chance to run on 'pending' tasks. 'update': evaluators.SequenceEvaluator([ evaluators.FilteringEvaluator( predicate=evaluators.Not( evaluators.TaskStatusIn( {'ongoing', 'failed', 'completed'})), delegate=InitiateEvaluator(job)), evaluators.FilteringEvaluator( predicate=evaluators.TaskStatusIn({'ongoing'}), delegate=UpdateEvaluator(job)), ]) })), evaluators.TaskPayloadLiftingEvaluator(), ))
def __init__(self, job): super(Evaluator, self).__init__( predicate=evaluators.All(evaluators.TaskTypeEq('read_value'), evaluators.TaskStatusIn({'pending'})), delegate=evaluators.SequenceEvaluator( evaluators=(evaluators.TaskPayloadLiftingEvaluator(), ReadValueEvaluator(job))))
def testSequenceEvaluator(self): def FirstEvaluator(*args): args[2].update({'value': 1}) return ['First Action'] def SecondEvaluator(*args): args[2].update({'value': accumulator.get('value') + 1}) return ['Second Action'] task = task_module.InMemoryTask( id='test_id', task_type='test', payload={}, status='pending', dependencies=[]) evaluator = evaluators.SequenceEvaluator( evaluators=(FirstEvaluator, SecondEvaluator)) event = event_module.Event(type='test', target_task=None, payload={}) accumulator = {} # Test that we're collecting the actions returned by the nested evaluators. self.assertEqual(['First Action', 'Second Action'], evaluator(task, event, accumulator)) # Test that the operations happened in sequence. self.assertEqual({'value': 2}, accumulator)
def BisectionEvaluatorForTesting(self, *seeded_evaluators): """Creates an evaluator for bisection with the provided evaluators. This is a utility function for creating a bisection evaluator which has the list of evaluators nested within the sequence of evaluators. """ return evaluators.SequenceEvaluator([ evaluators.FilteringEvaluator( predicate=evaluators.All(evaluators.TaskStatusIn({'pending'})), delegate=evaluators.SequenceEvaluator( list(seeded_evaluators) + [evaluators.TaskPayloadLiftingEvaluator()])), evaluators.SequenceEvaluator([ performance_bisection.Evaluator(self.job), evaluators.TaskPayloadLiftingEvaluator(exclude_keys={'commits'}) ]), ])
def setUp(self): super(EvaluatorTest, self).setUp() self.maxDiff = None self.job = job_module.Job.New((), ()) # Set up a common evaluator for all the test cases. self.evaluator = evaluators.SequenceEvaluator(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator( evaluators=(bisection_test_util.FakeFoundIsolate(self.job), evaluators.TaskPayloadLiftingEvaluator()))), evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('run_test'), delegate=evaluators.SequenceEvaluator(evaluators=( bisection_test_util.FakeSuccessfulRunTest(self.job), evaluators.TaskPayloadLiftingEvaluator()))), read_value.Evaluator(self.job), ))
def testMissingDependencyInputs(self): job = job_module.Job.New((), ()) task_module.PopulateTaskGraph( job, task_module.TaskGraph( vertices=[ task_module.TaskVertex(id='build_aaaaaaa', vertex_type='find_isolate', payload={ 'builder': 'Some Builder', 'target': 'telemetry_perf_tests', 'bucket': 'luci.bucket', 'change': { 'commits': [{ 'repository': 'chromium', 'git_hash': 'aaaaaaa', }] } }), task_module.TaskVertex(id='run_test_aaaaaaa_0', vertex_type='run_test', payload={ 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'extra_args': [], }), ], edges=[ task_module.Dependency(from_='run_test_aaaaaaa_0', to='build_aaaaaaa') ], )) # This time we're fine, there should be no errors. self.assertEqual({}, task_module.Evaluate( job, event_module.Event(type='validate', target_task=None, payload={}), run_test.Validator())) # Send an initiate message then catch that we've not provided the required # payload in the task when it's ongoing. self.assertEqual( { 'build_aaaaaaa': mock.ANY, 'run_test_aaaaaaa_0': { 'errors': [{ 'cause': 'MissingDependencyInputs', 'message': mock.ANY }] } }, task_module.Evaluate( job, event_module.Event(type='initiate', target_task=None, payload={}), evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator(evaluators=( functools.partial(FakeNotFoundIsolate, job), evaluators.TaskPayloadLiftingEvaluator(), )), alternative=run_test.Validator()), ))
def testEvaluateHandleFailures_Expired(self, swarming_task_result, swarming_tasks_new): swarming_tasks_new.return_value = {'task_id': 'task id'} evaluator = evaluators.SequenceEvaluator(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator( evaluators=(functools.partial(FakeFoundIsolate, self.job), evaluators.TaskPayloadLiftingEvaluator()))), run_test.Evaluator(self.job), )) self.assertNotEqual({}, task_module.Evaluate( self.job, event_module.Event(type='initiate', target_task=None, payload={}), evaluator)) swarming_task_result.return_value = { 'state': 'EXPIRED', } for attempt in range(11): self.assertNotEqual( {}, task_module.Evaluate( self.job, event_module.Event( type='update', target_task='run_test_aaaaaaa_%s' % (attempt, ), payload={ 'kind': 'pubsub_message', 'action': 'poll' }), evaluator), 'Attempt #%s' % (attempt, )) self.assertEqual( { 'run_test_aaaaaaa_%s' % (attempt, ): { 'status': 'failed', 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'errors': [ { 'reason': 'SwarmingExpired', 'message': mock.ANY }, ], 'extra_args': [], 'swarming_request_body': { 'name': mock.ANY, 'user': mock.ANY, 'priority': mock.ANY, 'task_slices': mock.ANY, 'tags': mock.ANY, 'pubsub_auth_token': mock.ANY, 'pubsub_topic': mock.ANY, 'pubsub_userdata': mock.ANY, }, 'swarming_task_result': { 'state': 'EXPIRED', }, 'swarming_task_id': 'task id', 'tries': 1, } for attempt in range(11) }, task_module.Evaluate( self.job, event_module.Event(type='select', target_task=None, payload={}), Selector(task_type='run_test')))
def testEvaluateHandleFailures_Hard(self, swarming_task_stdout, swarming_task_result, swarming_tasks_new): swarming_tasks_new.return_value = {'task_id': 'task id'} evaluator = evaluators.SequenceEvaluator(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator( evaluators=(functools.partial(FakeFoundIsolate, self.job), evaluators.TaskPayloadLiftingEvaluator()))), run_test.Evaluator(self.job), )) self.assertNotEqual({}, task_module.Evaluate( self.job, event_module.Event(type='initiate', target_task=None, payload={}), evaluator)) # We set it up so that when we poll the swarming task, that we're going to # get an error status. We're expecting that hard failures are detected. swarming_task_stdout.return_value = { 'output': """Traceback (most recent call last): File "../../testing/scripts/run_performance_tests.py", line 282, in <module> sys.exit(main()) File "../../testing/scripts/run_performance_tests.py", line 226, in main benchmarks = args.benchmark_names.split(',') AttributeError: 'Namespace' object has no attribute 'benchmark_names'""" } swarming_task_result.return_value = { 'bot_id': 'bot id', 'exit_code': 1, 'failure': True, 'outputs_ref': { 'isolatedserver': 'output isolate server', 'isolated': 'output isolate hash', }, 'state': 'COMPLETED', } for attempt in range(11): self.assertNotEqual( {}, task_module.Evaluate( self.job, event_module.Event( type='update', target_task='run_test_aaaaaaa_%s' % (attempt, ), payload={ 'kind': 'pubsub_message', 'action': 'poll' }), evaluator), 'Attempt #%s' % (attempt, )) self.assertEqual( { 'run_test_aaaaaaa_%s' % (attempt, ): { 'status': 'failed', 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'errors': mock.ANY, 'extra_args': [], 'swarming_request_body': { 'name': mock.ANY, 'user': mock.ANY, 'priority': mock.ANY, 'task_slices': mock.ANY, 'tags': mock.ANY, 'pubsub_auth_token': mock.ANY, 'pubsub_topic': mock.ANY, 'pubsub_userdata': mock.ANY, }, 'swarming_task_result': { 'bot_id': mock.ANY, 'state': 'COMPLETED', 'failure': True, }, 'isolate_server': 'output isolate server', 'isolate_hash': 'output isolate hash', 'swarming_task_id': 'task id', 'tries': 1, } for attempt in range(11) }, task_module.Evaluate( self.job, event_module.Event(type='select', target_task=None, payload={}), Selector(task_type='run_test')))
def testEvaluateToCompletion(self, swarming_task_result, swarming_tasks_new): swarming_tasks_new.return_value = {'task_id': 'task id'} evaluator = evaluators.SequenceEvaluator(evaluators=( evaluators.FilteringEvaluator( predicate=evaluators.TaskTypeEq('find_isolate'), delegate=evaluators.SequenceEvaluator( evaluators=(functools.partial(FakeFoundIsolate, self.job), evaluators.TaskPayloadLiftingEvaluator()))), run_test.Evaluator(self.job), )) self.assertNotEqual({}, task_module.Evaluate( self.job, event_module.Event(type='initiate', target_task=None, payload={}), evaluator)) # Ensure that we've found all the 'run_test' tasks. self.assertEqual( { 'run_test_aaaaaaa_%s' % (attempt, ): { 'status': 'ongoing', 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'extra_args': [], 'swarming_request_body': { 'name': mock.ANY, 'user': mock.ANY, 'priority': mock.ANY, 'task_slices': mock.ANY, 'tags': mock.ANY, 'pubsub_auth_token': mock.ANY, 'pubsub_topic': mock.ANY, 'pubsub_userdata': mock.ANY, }, 'swarming_task_id': 'task id', 'tries': 1, } for attempt in range(11) }, task_module.Evaluate( self.job, event_module.Event(type='select', target_task=None, payload={}), Selector(task_type='run_test'))) # Ensure that we've actually made the calls to the Swarming service. swarming_tasks_new.assert_called() self.assertGreaterEqual(swarming_tasks_new.call_count, 10) # Then we propagate an event for each of the run_test tasks in the graph. swarming_task_result.return_value = { 'bot_id': 'bot id', 'exit_code': 0, 'failure': False, 'outputs_ref': { 'isolatedserver': 'output isolate server', 'isolated': 'output isolate hash', }, 'state': 'COMPLETED', } for attempt in range(11): self.assertNotEqual( {}, task_module.Evaluate( self.job, event_module.Event( type='update', target_task='run_test_aaaaaaa_%s' % (attempt, ), payload={}), evaluator), 'Attempt #%s' % (attempt, )) # Ensure that we've polled the status of each of the tasks, and that we've # marked the tasks completed. self.assertEqual( { 'run_test_aaaaaaa_%s' % (attempt, ): { 'status': 'completed', 'swarming_server': 'some_server', 'dimensions': DIMENSIONS, 'extra_args': [], 'swarming_request_body': { 'name': mock.ANY, 'user': mock.ANY, 'priority': mock.ANY, 'task_slices': mock.ANY, 'tags': mock.ANY, 'pubsub_auth_token': mock.ANY, 'pubsub_topic': mock.ANY, 'pubsub_userdata': mock.ANY, }, 'swarming_task_result': { 'bot_id': mock.ANY, 'state': 'COMPLETED', 'failure': False, }, 'isolate_server': 'output isolate server', 'isolate_hash': 'output isolate hash', 'swarming_task_id': 'task id', 'tries': 1, } for attempt in range(11) }, task_module.Evaluate( self.job, event_module.Event(type='select', target_task=None, payload={}), Selector(task_type='run_test'))) # Ensure that we've actually made the calls to the Swarming service. swarming_task_result.assert_called() self.assertGreaterEqual(swarming_task_result.call_count, 10)
def testSerializeJob(self): self.PopulateSimpleBisectionGraph(self.job) task_module.Evaluate( self.job, bisection_test_util.SelectEvent(), evaluators.SequenceEvaluator([ evaluators.DispatchByTaskType({ 'find_isolate': bisection_test_util.FakeFoundIsolate(self.job), 'run_test': bisection_test_util.FakeSuccessfulRunTest(self.job), 'read_value': bisection_test_util.FakeReadValueSameResult(self.job, 1.0), 'find_culprit': performance_bisection.Evaluator(self.job), }), evaluators.TaskPayloadLiftingEvaluator() ])) logging.debug('Finished evaluating job state.') job_dict = self.job.AsDict(options=[job_module.OPTION_STATE]) logging.debug('Job = %s', pprint.pformat(job_dict)) self.assertTrue(self.job.use_execution_engine) self.assertEqual( { 'arguments': mock.ANY, 'bug_id': None, 'cancel_reason': None, 'comparison_mode': 'performance', 'configuration': 'some_configuration', 'created': mock.ANY, 'difference_count': 0, 'exception': None, 'job_id': mock.ANY, 'metric': 'some_benchmark', 'name': mock.ANY, 'quests': ['Build', 'Test', 'Get results'], 'results_url': mock.ANY, 'status': mock.ANY, 'updated': mock.ANY, 'user': None, # NOTE: Here we're asseessing the structure of the results, not the # actual contents. We'll reserve more specific content form testing # in other test cases, but for now we're ensuring that we're able to # get the shape of the data in a certain way. 'state': [{ 'attempts': [{ 'executions': [mock.ANY] * 3 }] + [{ 'executions': [None, mock.ANY, mock.ANY] }] * 9, 'change': self.start_change.AsDict(), 'comparisons': { 'prev': None, 'next': 'same', }, 'result_values': [mock.ANY] * 10, }, { 'attempts': [{ 'executions': [mock.ANY] * 3 }] + [{ 'executions': [None, mock.ANY, mock.ANY] }] * 9, 'change': self.end_change.AsDict(), 'comparisons': { 'prev': 'same', 'next': None, }, 'result_values': [mock.ANY] * 10, }] }, job_dict)