Пример #1
0
 def testEvaluateFailedDependency(self, *_):
     self.PopulateTaskGraph(benchmark='some_benchmark',
                            chart='chart',
                            trace='must_not_be_found',
                            mode='graph_json')
     self.assertNotEqual(
         {},
         task_module.Evaluate(
             self.job,
             event_module.Event(type='initiate',
                                target_task=None,
                                payload={}),
             evaluators.SequenceEvaluator(evaluators=(
                 evaluators.FilteringEvaluator(
                     predicate=evaluators.TaskTypeEq('find_isolate'),
                     delegate=evaluators.SequenceEvaluator(evaluators=(
                         functools.partial(FakeFoundIsolate, self.job),
                         evaluators.TaskPayloadLiftingEvaluator()))),
                 evaluators.FilteringEvaluator(
                     predicate=evaluators.TaskTypeEq('run_test'),
                     delegate=evaluators.SequenceEvaluator(evaluators=(
                         functools.partial(FakeFailedRunTest, self.job),
                         evaluators.TaskPayloadLiftingEvaluator()))),
                 read_value.Evaluator(self.job),
             ))))
     self.assertEqual(
         {
             'read_value_chromium@aaaaaaa_%s' % (attempt, ): {
                 'benchmark':
                 'some_benchmark',
                 'mode':
                 'graph_json',
                 'results_filename':
                 'some_benchmark/perf_results.json',
                 'histogram_options': {
                     'tir_label': None,
                     'story': None,
                     'statistic': None,
                 },
                 'graph_json_options': {
                     'chart': 'chart',
                     'trace': 'must_not_be_found',
                 },
                 'errors': [{
                     'reason': 'DependencyFailed',
                     'message': mock.ANY,
                 }],
                 'status':
                 'failed',
                 'tries':
                 1,
             }
             for attempt in range(10)
         },
         task_module.Evaluate(
             self.job,
             event_module.Event(type='select', target_task=None,
                                payload={}),
             evaluators.Selector(task_type='read_value')))
Пример #2
0
 def __init__(self):
     super(Serializer, self).__init__(predicate=evaluators.All(
         evaluators.TaskTypeEq('read_value'),
         evaluators.TaskStatusIn(
             {'ongoing', 'failed', 'completed', 'cancelled'}),
     ),
                                      delegate=ResultSerializer)
Пример #3
0
    def testEvaluateFailedDependency(self, *_):
        evaluator = evaluators.SequenceEvaluator(evaluators=(
            evaluators.FilteringEvaluator(
                predicate=evaluators.TaskTypeEq('find_isolate'),
                delegate=evaluators.SequenceEvaluator(evaluators=(
                    functools.partial(FakeFindIsolateFailed, self.job),
                    evaluators.TaskPayloadLiftingEvaluator()))),
            run_test.Evaluator(self.job),
        ))

        # When we initiate the run_test tasks, we should immediately see the tasks
        # failing because the dependency has a hard failure status.
        self.assertEqual(
            dict([('build_aaaaaaa', mock.ANY)] +
                 [('run_test_aaaaaaa_%s' % (attempt, ), {
                     'status': 'failed',
                     'errors': mock.ANY,
                     'dimensions': DIMENSIONS,
                     'extra_args': [],
                     'swarming_server': 'some_server',
                 }) for attempt in range(11)]),
            task_module.Evaluate(
                self.job,
                event_module.Event(type='initiate',
                                   target_task=None,
                                   payload={}), evaluator))
Пример #4
0
 def __init__(self, job):
     super(Evaluator, self).__init__(evaluators=(
         evaluators.FilteringEvaluator(
             predicate=evaluators.All(evaluators.TaskTypeEq('run_test'), ),
             delegate=evaluators.DispatchByEventTypeEvaluator({
                 'initiate':
                 evaluators.FilteringEvaluator(predicate=evaluators.Not(
                     evaluators.TaskStatusIn(
                         {'ongoing', 'failed', 'completed'})),
                                               delegate=InitiateEvaluator(
                                                   job)),
                 # For updates, we want to ensure that the initiate evaluator
                 # has a chance to run on 'pending' tasks.
                 'update':
                 evaluators.SequenceEvaluator([
                     evaluators.FilteringEvaluator(
                         predicate=evaluators.Not(
                             evaluators.TaskStatusIn(
                                 {'ongoing', 'failed', 'completed'})),
                         delegate=InitiateEvaluator(job)),
                     evaluators.FilteringEvaluator(
                         predicate=evaluators.TaskStatusIn({'ongoing'}),
                         delegate=UpdateEvaluator(job)),
                 ])
             })),
         evaluators.TaskPayloadLiftingEvaluator(),
     ))
Пример #5
0
 def __init__(self):
     super(Serializer, self).__init__(predicate=evaluators.All(
         evaluators.TaskTypeEq('find_culprit'),
         evaluators.TaskStatusIn(
             {'ongoing', 'failed', 'completed', 'cancelled'}),
     ),
                                      delegate=AnalysisSerializer)
Пример #6
0
 def __init__(self, job):
     super(Evaluator, self).__init__(
         predicate=evaluators.All(evaluators.TaskTypeEq('read_value'),
                                  evaluators.TaskStatusIn({'pending'})),
         delegate=evaluators.SequenceEvaluator(
             evaluators=(evaluators.TaskPayloadLiftingEvaluator(),
                         ReadValueEvaluator(job))))
Пример #7
0
 def setUp(self):
     super(EvaluatorTest, self).setUp()
     self.maxDiff = None
     self.job = job_module.Job.New((), ())
     # Set up a common evaluator for all the test cases.
     self.evaluator = evaluators.SequenceEvaluator(evaluators=(
         evaluators.FilteringEvaluator(
             predicate=evaluators.TaskTypeEq('find_isolate'),
             delegate=evaluators.SequenceEvaluator(
                 evaluators=(bisection_test_util.FakeFoundIsolate(self.job),
                             evaluators.TaskPayloadLiftingEvaluator()))),
         evaluators.FilteringEvaluator(
             predicate=evaluators.TaskTypeEq('run_test'),
             delegate=evaluators.SequenceEvaluator(evaluators=(
                 bisection_test_util.FakeSuccessfulRunTest(self.job),
                 evaluators.TaskPayloadLiftingEvaluator()))),
         read_value.Evaluator(self.job),
     ))
Пример #8
0
 def CompoundEvaluatorForTesting(self, fake_evaluator):
     return evaluators.SequenceEvaluator([
         evaluators.FilteringEvaluator(
             predicate=evaluators.All(evaluators.TaskTypeEq('read_value'),
                                      evaluators.TaskStatusIn({'pending'})),
             delegate=evaluators.SequenceEvaluator(
                 [fake_evaluator,
                  evaluators.TaskPayloadLiftingEvaluator()])),
         evaluators.SequenceEvaluator([
             performance_bisection.Evaluator(self.job),
             evaluators.TaskPayloadLiftingEvaluator(
                 exclude_keys={'commits'})
         ]),
     ])
Пример #9
0
 def __init__(self, job):
   super(Evaluator, self).__init__(
       evaluators=(
           evaluators.TaskPayloadLiftingEvaluator(),
           evaluators.FilteringEvaluator(
               predicate=evaluators.All(
                   evaluators.TaskTypeEq('find_isolate'),
                   evaluators.TaskIsEventTarget(),
                   evaluators.Not(
                       evaluators.TaskStatusIn(
                           {'completed', 'failed', 'cancelled'})),
               ),
               delegate=evaluators.DispatchByEventTypeEvaluator({
                   'initiate': InitiateEvaluator(job),
                   'update': UpdateEvaluator(job),
               })),
       ))
Пример #10
0
 def __init__(self, job):
     super(Evaluator, self).__init__(evaluators=(
         evaluators.TaskPayloadLiftingEvaluator(),
         evaluators.FilteringEvaluator(
             predicate=evaluators.All(
                 evaluators.TaskTypeEq('run_test'),
                 evaluators.TaskIsEventTarget(),
             ),
             delegate=evaluators.DispatchByEventTypeEvaluator({
                 'initiate':
                 evaluators.FilteringEvaluator(predicate=evaluators.Not(
                     evaluators.TaskStatusIn(
                         {'ongoing', 'failed', 'completed'})),
                                               delegate=InitiateEvaluator(
                                                   job)),
                 'update':
                 evaluators.FilteringEvaluator(
                     predicate=evaluators.TaskStatusIn({'ongoing'}),
                     delegate=UpdateEvaluator(job)),
             })),
     ))
Пример #11
0
    def testMissingDependencyInputs(self):
        job = job_module.Job.New((), ())
        task_module.PopulateTaskGraph(
            job,
            task_module.TaskGraph(
                vertices=[
                    task_module.TaskVertex(id='build_aaaaaaa',
                                           vertex_type='find_isolate',
                                           payload={
                                               'builder': 'Some Builder',
                                               'target':
                                               'telemetry_perf_tests',
                                               'bucket': 'luci.bucket',
                                               'change': {
                                                   'commits': [{
                                                       'repository':
                                                       'chromium',
                                                       'git_hash':
                                                       'aaaaaaa',
                                                   }]
                                               }
                                           }),
                    task_module.TaskVertex(id='run_test_aaaaaaa_0',
                                           vertex_type='run_test',
                                           payload={
                                               'swarming_server':
                                               'some_server',
                                               'dimensions': DIMENSIONS,
                                               'extra_args': [],
                                           }),
                ],
                edges=[
                    task_module.Dependency(from_='run_test_aaaaaaa_0',
                                           to='build_aaaaaaa')
                ],
            ))

        # This time we're fine, there should be no errors.
        self.assertEqual({},
                         task_module.Evaluate(
                             job,
                             event_module.Event(type='validate',
                                                target_task=None,
                                                payload={}),
                             run_test.Validator()))

        # Send an initiate message then catch that we've not provided the required
        # payload in the task when it's ongoing.
        self.assertEqual(
            {
                'build_aaaaaaa': mock.ANY,
                'run_test_aaaaaaa_0': {
                    'errors': [{
                        'cause': 'MissingDependencyInputs',
                        'message': mock.ANY
                    }]
                }
            },
            task_module.Evaluate(
                job,
                event_module.Event(type='initiate',
                                   target_task=None,
                                   payload={}),
                evaluators.FilteringEvaluator(
                    predicate=evaluators.TaskTypeEq('find_isolate'),
                    delegate=evaluators.SequenceEvaluator(evaluators=(
                        functools.partial(FakeNotFoundIsolate, job),
                        evaluators.TaskPayloadLiftingEvaluator(),
                    )),
                    alternative=run_test.Validator()),
            ))
Пример #12
0
 def __init__(self, job):
   super(Evaluator, self).__init__(
       predicate=evaluators.All(
           evaluators.TaskTypeEq('find_culprit'),
           evaluators.Not(evaluators.TaskStatusIn({'completed', 'failed'}))),
       delegate=FindCulprit(job))
Пример #13
0
 def __init__(self):
     super(Serializer,
           self).__init__(predicate=evaluators.TaskTypeEq('run_test'),
                          delegate=TestSerializer)
Пример #14
0
 def __init__(self):
     super(Validator,
           self).__init__(predicate=evaluators.TaskTypeEq('run_test'),
                          delegate=ReportError)
Пример #15
0
    def testEvaluateToCompletion(self, swarming_task_result,
                                 swarming_tasks_new):
        swarming_tasks_new.return_value = {'task_id': 'task id'}
        evaluator = evaluators.SequenceEvaluator(evaluators=(
            evaluators.FilteringEvaluator(
                predicate=evaluators.TaskTypeEq('find_isolate'),
                delegate=evaluators.SequenceEvaluator(
                    evaluators=(functools.partial(FakeFoundIsolate, self.job),
                                evaluators.TaskPayloadLiftingEvaluator()))),
            run_test.Evaluator(self.job),
        ))
        self.assertNotEqual({},
                            task_module.Evaluate(
                                self.job,
                                event_module.Event(type='initiate',
                                                   target_task=None,
                                                   payload={}), evaluator))

        # Ensure that we've found all the 'run_test' tasks.
        self.assertEqual(
            {
                'run_test_aaaaaaa_%s' % (attempt, ): {
                    'status': 'ongoing',
                    'swarming_server': 'some_server',
                    'dimensions': DIMENSIONS,
                    'extra_args': [],
                    'swarming_request_body': {
                        'name': mock.ANY,
                        'user': mock.ANY,
                        'priority': mock.ANY,
                        'task_slices': mock.ANY,
                        'tags': mock.ANY,
                        'pubsub_auth_token': mock.ANY,
                        'pubsub_topic': mock.ANY,
                        'pubsub_userdata': mock.ANY,
                    },
                    'swarming_task_id': 'task id',
                    'tries': 1,
                }
                for attempt in range(11)
            },
            task_module.Evaluate(
                self.job,
                event_module.Event(type='select',
                                   target_task=None, payload={}),
                Selector(task_type='run_test')))

        # Ensure that we've actually made the calls to the Swarming service.
        swarming_tasks_new.assert_called()
        self.assertGreaterEqual(swarming_tasks_new.call_count, 10)

        # Then we propagate an event for each of the run_test tasks in the graph.
        swarming_task_result.return_value = {
            'bot_id': 'bot id',
            'exit_code': 0,
            'failure': False,
            'outputs_ref': {
                'isolatedserver': 'output isolate server',
                'isolated': 'output isolate hash',
            },
            'state': 'COMPLETED',
        }
        for attempt in range(11):
            self.assertNotEqual(
                {},
                task_module.Evaluate(
                    self.job,
                    event_module.Event(
                        type='update',
                        target_task='run_test_aaaaaaa_%s' % (attempt, ),
                        payload={}), evaluator), 'Attempt #%s' % (attempt, ))

        # Ensure that we've polled the status of each of the tasks, and that we've
        # marked the tasks completed.
        self.assertEqual(
            {
                'run_test_aaaaaaa_%s' % (attempt, ): {
                    'status': 'completed',
                    'swarming_server': 'some_server',
                    'dimensions': DIMENSIONS,
                    'extra_args': [],
                    'swarming_request_body': {
                        'name': mock.ANY,
                        'user': mock.ANY,
                        'priority': mock.ANY,
                        'task_slices': mock.ANY,
                        'tags': mock.ANY,
                        'pubsub_auth_token': mock.ANY,
                        'pubsub_topic': mock.ANY,
                        'pubsub_userdata': mock.ANY,
                    },
                    'swarming_task_result': {
                        'bot_id': mock.ANY,
                        'state': 'COMPLETED',
                        'failure': False,
                    },
                    'isolate_server': 'output isolate server',
                    'isolate_hash': 'output isolate hash',
                    'swarming_task_id': 'task id',
                    'tries': 1,
                }
                for attempt in range(11)
            },
            task_module.Evaluate(
                self.job,
                event_module.Event(type='select',
                                   target_task=None, payload={}),
                Selector(task_type='run_test')))

        # Ensure that we've actually made the calls to the Swarming service.
        swarming_task_result.assert_called()
        self.assertGreaterEqual(swarming_task_result.call_count, 10)
Пример #16
0
    def testEvaluateHandleFailures_Hard(self, swarming_task_stdout,
                                        swarming_task_result,
                                        swarming_tasks_new):
        swarming_tasks_new.return_value = {'task_id': 'task id'}
        evaluator = evaluators.SequenceEvaluator(evaluators=(
            evaluators.FilteringEvaluator(
                predicate=evaluators.TaskTypeEq('find_isolate'),
                delegate=evaluators.SequenceEvaluator(
                    evaluators=(functools.partial(FakeFoundIsolate, self.job),
                                evaluators.TaskPayloadLiftingEvaluator()))),
            run_test.Evaluator(self.job),
        ))
        self.assertNotEqual({},
                            task_module.Evaluate(
                                self.job,
                                event_module.Event(type='initiate',
                                                   target_task=None,
                                                   payload={}), evaluator))

        # We set it up so that when we poll the swarming task, that we're going to
        # get an error status. We're expecting that hard failures are detected.
        swarming_task_stdout.return_value = {
            'output':
            """Traceback (most recent call last):
  File "../../testing/scripts/run_performance_tests.py", line 282, in <module>
    sys.exit(main())
  File "../../testing/scripts/run_performance_tests.py", line 226, in main
    benchmarks = args.benchmark_names.split(',')
AttributeError: 'Namespace' object has no attribute 'benchmark_names'"""
        }
        swarming_task_result.return_value = {
            'bot_id': 'bot id',
            'exit_code': 1,
            'failure': True,
            'outputs_ref': {
                'isolatedserver': 'output isolate server',
                'isolated': 'output isolate hash',
            },
            'state': 'COMPLETED',
        }
        for attempt in range(11):
            self.assertNotEqual(
                {},
                task_module.Evaluate(
                    self.job,
                    event_module.Event(
                        type='update',
                        target_task='run_test_aaaaaaa_%s' % (attempt, ),
                        payload={
                            'kind': 'pubsub_message',
                            'action': 'poll'
                        }), evaluator), 'Attempt #%s' % (attempt, ))
        self.assertEqual(
            {
                'run_test_aaaaaaa_%s' % (attempt, ): {
                    'status': 'failed',
                    'swarming_server': 'some_server',
                    'dimensions': DIMENSIONS,
                    'errors': mock.ANY,
                    'extra_args': [],
                    'swarming_request_body': {
                        'name': mock.ANY,
                        'user': mock.ANY,
                        'priority': mock.ANY,
                        'task_slices': mock.ANY,
                        'tags': mock.ANY,
                        'pubsub_auth_token': mock.ANY,
                        'pubsub_topic': mock.ANY,
                        'pubsub_userdata': mock.ANY,
                    },
                    'swarming_task_result': {
                        'bot_id': mock.ANY,
                        'state': 'COMPLETED',
                        'failure': True,
                    },
                    'isolate_server': 'output isolate server',
                    'isolate_hash': 'output isolate hash',
                    'swarming_task_id': 'task id',
                    'tries': 1,
                }
                for attempt in range(11)
            },
            task_module.Evaluate(
                self.job,
                event_module.Event(type='select',
                                   target_task=None, payload={}),
                Selector(task_type='run_test')))
Пример #17
0
    def testEvaluateHandleFailures_Expired(self, swarming_task_result,
                                           swarming_tasks_new):
        swarming_tasks_new.return_value = {'task_id': 'task id'}
        evaluator = evaluators.SequenceEvaluator(evaluators=(
            evaluators.FilteringEvaluator(
                predicate=evaluators.TaskTypeEq('find_isolate'),
                delegate=evaluators.SequenceEvaluator(
                    evaluators=(functools.partial(FakeFoundIsolate, self.job),
                                evaluators.TaskPayloadLiftingEvaluator()))),
            run_test.Evaluator(self.job),
        ))
        self.assertNotEqual({},
                            task_module.Evaluate(
                                self.job,
                                event_module.Event(type='initiate',
                                                   target_task=None,
                                                   payload={}), evaluator))
        swarming_task_result.return_value = {
            'state': 'EXPIRED',
        }
        for attempt in range(11):
            self.assertNotEqual(
                {},
                task_module.Evaluate(
                    self.job,
                    event_module.Event(
                        type='update',
                        target_task='run_test_aaaaaaa_%s' % (attempt, ),
                        payload={
                            'kind': 'pubsub_message',
                            'action': 'poll'
                        }), evaluator), 'Attempt #%s' % (attempt, ))

        self.assertEqual(
            {
                'run_test_aaaaaaa_%s' % (attempt, ): {
                    'status': 'failed',
                    'swarming_server': 'some_server',
                    'dimensions': DIMENSIONS,
                    'errors': [
                        {
                            'reason': 'SwarmingExpired',
                            'message': mock.ANY
                        },
                    ],
                    'extra_args': [],
                    'swarming_request_body': {
                        'name': mock.ANY,
                        'user': mock.ANY,
                        'priority': mock.ANY,
                        'task_slices': mock.ANY,
                        'tags': mock.ANY,
                        'pubsub_auth_token': mock.ANY,
                        'pubsub_topic': mock.ANY,
                        'pubsub_userdata': mock.ANY,
                    },
                    'swarming_task_result': {
                        'state': 'EXPIRED',
                    },
                    'swarming_task_id': 'task id',
                    'tries': 1,
                }
                for attempt in range(11)
            },
            task_module.Evaluate(
                self.job,
                event_module.Event(type='select',
                                   target_task=None, payload={}),
                Selector(task_type='run_test')))
Пример #18
0
 def __init__(self):
   super(Serializer, self).__init__(
       predicate=evaluators.TaskTypeEq('find_isolate'),
       delegate=BuildSerializer)