def testEvaluateFailedDependency(self, *_):
   self.PopulateTaskGraph(
       benchmark='some_benchmark',
       chart='chart',
       trace='must_not_be_found',
       mode='graph_json')
   self.assertNotEqual(
       {},
       task_module.Evaluate(
           self.job,
           event_module.Event(type='initiate', target_task=None, payload={}),
           evaluators.SequenceEvaluator(
               evaluators=(
                   evaluators.FilteringEvaluator(
                       predicate=evaluators.TaskTypeEq('find_isolate'),
                       delegate=evaluators.SequenceEvaluator(
                           evaluators=(
                               bisection_test_util.FakeFoundIsolate(self.job),
                               evaluators.TaskPayloadLiftingEvaluator()))),
                   evaluators.FilteringEvaluator(
                       predicate=evaluators.TaskTypeEq('run_test'),
                       delegate=evaluators.SequenceEvaluator(
                           evaluators=(
                               bisection_test_util.FakeFailedRunTest(self.job),
                               evaluators.TaskPayloadLiftingEvaluator()))),
                   read_value.Evaluator(self.job),
               ))))
   self.assertEqual(
       {
           'read_value_chromium@aaaaaaa_%s' % (attempt,): {
               'benchmark': 'some_benchmark',
               'change': mock.ANY,
               'mode': 'graph_json',
               'results_filename': 'some_benchmark/perf_results.json',
               'results_path': ['some_benchmark', 'perf_results.json'],
               'histogram_options': {
                   'grouping_label': None,
                   'story': None,
                   'statistic': None,
                   'histogram_name': 'chart',
               },
               'graph_json_options': {
                   'chart': 'chart',
                   'trace': 'must_not_be_found',
               },
               'errors': [{
                   'reason': 'DependencyFailed',
                   'message': mock.ANY,
               }],
               'status': 'failed',
               'tries': 1,
               'index': attempt,
           } for attempt in range(10)
       },
       task_module.Evaluate(
           self.job,
           event_module.Event(type='select', target_task=None, payload={}),
           evaluators.Selector(task_type='read_value')))
 def setUp(self):
     super(EvaluatorTest, self).setUp()
     self.maxDiff = None
     self.job = job_module.Job.New((), ())
     # Set up a common evaluator for all the test cases.
     self.evaluator = evaluators.SequenceEvaluator(evaluators=(
         evaluators.FilteringEvaluator(
             predicate=evaluators.TaskTypeEq('find_isolate'),
             delegate=evaluators.SequenceEvaluator(
                 evaluators=(bisection_test_util.FakeFoundIsolate(self.job),
                             evaluators.TaskPayloadLiftingEvaluator()))),
         evaluators.FilteringEvaluator(
             predicate=evaluators.TaskTypeEq('run_test'),
             delegate=evaluators.SequenceEvaluator(evaluators=(
                 bisection_test_util.FakeSuccessfulRunTest(self.job),
                 evaluators.TaskPayloadLiftingEvaluator()))),
         read_value.Evaluator(self.job),
     ))
    def testEvaluateToCompletion(self, swarming_task_result,
                                 swarming_tasks_new):
        swarming_tasks_new.return_value = {'task_id': 'task id'}
        evaluator = evaluators.SequenceEvaluator(evaluators=(
            evaluators.FilteringEvaluator(
                predicate=evaluators.TaskTypeEq('find_isolate'),
                delegate=evaluators.SequenceEvaluator(
                    evaluators=(bisection_test_util.FakeFoundIsolate(self.job),
                                evaluators.TaskPayloadLiftingEvaluator()))),
            run_test.Evaluator(self.job),
        ))
        self.assertNotEqual({},
                            task_module.Evaluate(
                                self.job,
                                event_module.Event(type='initiate',
                                                   target_task=None,
                                                   payload={}), evaluator))

        # Ensure that we've found all the 'run_test' tasks.
        self.assertEqual(
            {
                'run_test_chromium@aaaaaaa_%s' % (attempt, ): {
                    'status': 'ongoing',
                    'swarming_server': 'some_server',
                    'dimensions': DIMENSIONS,
                    'extra_args': [],
                    'swarming_request_body': {
                        'name': mock.ANY,
                        'user': mock.ANY,
                        'priority': mock.ANY,
                        'task_slices': mock.ANY,
                        'tags': mock.ANY,
                        'pubsub_auth_token': mock.ANY,
                        'pubsub_topic': mock.ANY,
                        'pubsub_userdata': mock.ANY,
                        'service_account': mock.ANY,
                    },
                    'swarming_task_id': 'task id',
                    'tries': 1,
                    'change': mock.ANY,
                    'index': attempt,
                }
                for attempt in range(10)
            },
            task_module.Evaluate(
                self.job,
                event_module.Event(type='select', target_task=None,
                                   payload={}),
                evaluators.Selector(task_type='run_test')))

        # Ensure that we've actually made the calls to the Swarming service.
        swarming_tasks_new.assert_called()
        self.assertGreaterEqual(swarming_tasks_new.call_count, 10)

        # Then we propagate an event for each of the run_test tasks in the graph.
        swarming_task_result.return_value = {
            'bot_id': 'bot id',
            'exit_code': 0,
            'failure': False,
            'outputs_ref': {
                'isolatedserver': 'output isolate server',
                'isolated': 'output isolate hash',
            },
            'state': 'COMPLETED',
        }
        for attempt in range(10):
            self.assertNotEqual(
                {},
                task_module.Evaluate(
                    self.job,
                    event_module.Event(
                        type='update',
                        target_task='run_test_chromium@aaaaaaa_%s' %
                        (attempt, ),
                        payload={}), evaluator), 'Attempt #%s' % (attempt, ))

        # Ensure that we've polled the status of each of the tasks, and that we've
        # marked the tasks completed.
        self.assertEqual(
            {
                'run_test_chromium@aaaaaaa_%s' % (attempt, ): {
                    'status': 'completed',
                    'swarming_server': 'some_server',
                    'dimensions': DIMENSIONS,
                    'extra_args': [],
                    'swarming_request_body': {
                        'name': mock.ANY,
                        'user': mock.ANY,
                        'priority': mock.ANY,
                        'task_slices': mock.ANY,
                        'tags': mock.ANY,
                        'pubsub_auth_token': mock.ANY,
                        'pubsub_topic': mock.ANY,
                        'pubsub_userdata': mock.ANY,
                        'service_account': mock.ANY,
                    },
                    'swarming_task_result': {
                        'bot_id': mock.ANY,
                        'state': 'COMPLETED',
                        'failure': False,
                    },
                    'isolate_server': 'output isolate server',
                    'isolate_hash': 'output isolate hash',
                    'swarming_task_id': 'task id',
                    'tries': 1,
                    'change': mock.ANY,
                    'index': attempt,
                }
                for attempt in range(10)
            },
            task_module.Evaluate(
                self.job,
                event_module.Event(type='select', target_task=None,
                                   payload={}),
                evaluators.Selector(task_type='run_test')))

        # Ensure that we've actually made the calls to the Swarming service.
        swarming_task_result.assert_called()
        self.assertGreaterEqual(swarming_task_result.call_count, 10)
    def testEvaluateHandleFailures_Expired(self, swarming_task_result,
                                           swarming_tasks_new):
        swarming_tasks_new.return_value = {'task_id': 'task id'}
        evaluator = evaluators.SequenceEvaluator(evaluators=(
            evaluators.FilteringEvaluator(
                predicate=evaluators.TaskTypeEq('find_isolate'),
                delegate=evaluators.SequenceEvaluator(
                    evaluators=(bisection_test_util.FakeFoundIsolate(self.job),
                                evaluators.TaskPayloadLiftingEvaluator()))),
            run_test.Evaluator(self.job),
        ))
        self.assertNotEqual({},
                            task_module.Evaluate(
                                self.job,
                                event_module.Event(type='initiate',
                                                   target_task=None,
                                                   payload={}), evaluator))
        swarming_task_result.return_value = {
            'state': 'EXPIRED',
        }
        for attempt in range(10):
            self.assertNotEqual(
                {},
                task_module.Evaluate(
                    self.job,
                    event_module.Event(
                        type='update',
                        target_task='run_test_chromium@aaaaaaa_%s' %
                        (attempt, ),
                        payload={
                            'kind': 'pubsub_message',
                            'action': 'poll'
                        }), evaluator), 'Attempt #%s' % (attempt, ))

        self.assertEqual(
            {
                'run_test_chromium@aaaaaaa_%s' % (attempt, ): {
                    'status': 'failed',
                    'swarming_server': 'some_server',
                    'dimensions': DIMENSIONS,
                    'errors': [
                        {
                            'reason': 'SwarmingExpired',
                            'message': mock.ANY
                        },
                    ],
                    'extra_args': [],
                    'swarming_request_body': {
                        'name': mock.ANY,
                        'user': mock.ANY,
                        'priority': mock.ANY,
                        'task_slices': mock.ANY,
                        'tags': mock.ANY,
                        'pubsub_auth_token': mock.ANY,
                        'pubsub_topic': mock.ANY,
                        'pubsub_userdata': mock.ANY,
                        'service_account': mock.ANY,
                    },
                    'swarming_task_result': {
                        'state': 'EXPIRED',
                    },
                    'swarming_task_id': 'task id',
                    'tries': 1,
                    'change': mock.ANY,
                    'index': attempt,
                }
                for attempt in range(10)
            },
            task_module.Evaluate(
                self.job,
                event_module.Event(type='select', target_task=None,
                                   payload={}),
                evaluators.Selector(task_type='run_test')))
    def testEvaluateHandleFailures_Hard(self, swarming_task_stdout,
                                        swarming_task_result,
                                        swarming_tasks_new):
        swarming_tasks_new.return_value = {'task_id': 'task id'}
        evaluator = evaluators.SequenceEvaluator(evaluators=(
            evaluators.FilteringEvaluator(
                predicate=evaluators.TaskTypeEq('find_isolate'),
                delegate=evaluators.SequenceEvaluator(
                    evaluators=(bisection_test_util.FakeFoundIsolate(self.job),
                                evaluators.TaskPayloadLiftingEvaluator()))),
            run_test.Evaluator(self.job),
        ))
        self.assertNotEqual({},
                            task_module.Evaluate(
                                self.job,
                                event_module.Event(type='initiate',
                                                   target_task=None,
                                                   payload={}), evaluator))

        # We set it up so that when we poll the swarming task, that we're going to
        # get an error status. We're expecting that hard failures are detected.
        swarming_task_stdout.return_value = {
            'output':
            """Traceback (most recent call last):
  File "../../testing/scripts/run_performance_tests.py", line 282, in <module>
    sys.exit(main())
  File "../../testing/scripts/run_performance_tests.py", line 226, in main
    benchmarks = args.benchmark_names.split(',')
AttributeError: 'Namespace' object has no attribute 'benchmark_names'"""
        }
        swarming_task_result.return_value = {
            'bot_id': 'bot id',
            'exit_code': 1,
            'failure': True,
            'outputs_ref': {
                'isolatedserver': 'output isolate server',
                'isolated': 'output isolate hash',
            },
            'state': 'COMPLETED',
        }
        for attempt in range(10):
            self.assertNotEqual(
                {},
                task_module.Evaluate(
                    self.job,
                    event_module.Event(
                        type='update',
                        target_task='run_test_chromium@aaaaaaa_%s' %
                        (attempt, ),
                        payload={
                            'kind': 'pubsub_message',
                            'action': 'poll'
                        }), evaluator), 'Attempt #%s' % (attempt, ))
        self.assertEqual(
            {
                'run_test_chromium@aaaaaaa_%s' % (attempt, ): {
                    'status': 'failed',
                    'swarming_server': 'some_server',
                    'dimensions': DIMENSIONS,
                    'errors': mock.ANY,
                    'extra_args': [],
                    'swarming_request_body': {
                        'name': mock.ANY,
                        'user': mock.ANY,
                        'priority': mock.ANY,
                        'task_slices': mock.ANY,
                        'tags': mock.ANY,
                        'pubsub_auth_token': mock.ANY,
                        'pubsub_topic': mock.ANY,
                        'pubsub_userdata': mock.ANY,
                        'service_account': mock.ANY,
                    },
                    'swarming_task_result': {
                        'bot_id': mock.ANY,
                        'state': 'COMPLETED',
                        'failure': True,
                    },
                    'isolate_server': 'output isolate server',
                    'isolate_hash': 'output isolate hash',
                    'swarming_task_id': 'task id',
                    'tries': 1,
                    'change': mock.ANY,
                    'index': attempt,
                }
                for attempt in range(10)
            },
            task_module.Evaluate(
                self.job,
                event_module.Event(type='select', target_task=None,
                                   payload={}),
                evaluators.Selector(task_type='run_test')))
Exemple #6
0
 def testSerializeJob(self):
     self.PopulateSimpleBisectionGraph(self.job)
     task_module.Evaluate(
         self.job, bisection_test_util.SelectEvent(),
         evaluators.SequenceEvaluator([
             evaluators.DispatchByTaskType({
                 'find_isolate':
                 bisection_test_util.FakeFoundIsolate(self.job),
                 'run_test':
                 bisection_test_util.FakeSuccessfulRunTest(self.job),
                 'read_value':
                 bisection_test_util.FakeReadValueSameResult(self.job, 1.0),
                 'find_culprit':
                 performance_bisection.Evaluator(self.job),
             }),
             evaluators.TaskPayloadLiftingEvaluator()
         ]))
     logging.debug('Finished evaluating job state.')
     job_dict = self.job.AsDict(options=[job_module.OPTION_STATE])
     logging.debug('Job = %s', pprint.pformat(job_dict))
     self.assertTrue(self.job.use_execution_engine)
     self.assertEqual(
         {
             'arguments':
             mock.ANY,
             'bug_id':
             None,
             'cancel_reason':
             None,
             'comparison_mode':
             'performance',
             'configuration':
             'some_configuration',
             'created':
             mock.ANY,
             'difference_count':
             0,
             'exception':
             None,
             'job_id':
             mock.ANY,
             'metric':
             'some_benchmark',
             'name':
             mock.ANY,
             'quests': ['Build', 'Test', 'Get results'],
             'results_url':
             mock.ANY,
             'status':
             mock.ANY,
             'updated':
             mock.ANY,
             'user':
             None,
             # NOTE: Here we're asseessing the structure of the results, not the
             # actual contents. We'll reserve more specific content form testing
             # in other test cases, but for now we're ensuring that we're able to
             # get the shape of the data in a certain way.
             'state': [{
                 'attempts': [{
                     'executions': [mock.ANY] * 3
                 }] + [{
                     'executions': [None, mock.ANY, mock.ANY]
                 }] * 9,
                 'change':
                 self.start_change.AsDict(),
                 'comparisons': {
                     'prev': None,
                     'next': 'same',
                 },
                 'result_values': [mock.ANY] * 10,
             }, {
                 'attempts': [{
                     'executions': [mock.ANY] * 3
                 }] + [{
                     'executions': [None, mock.ANY, mock.ANY]
                 }] * 9,
                 'change':
                 self.end_change.AsDict(),
                 'comparisons': {
                     'prev': 'same',
                     'next': None,
                 },
                 'result_values': [mock.ANY] * 10,
             }]
         },
         job_dict)