예제 #1
0
 def CompoundEvaluatorForTesting(self, fake_evaluator):
     return evaluators.SequenceEvaluator([
         evaluators.FilteringEvaluator(
             predicate=evaluators.All(evaluators.TaskTypeEq('read_value'),
                                      evaluators.TaskStatusIn({'pending'})),
             delegate=evaluators.SequenceEvaluator(
                 [fake_evaluator,
                  evaluators.TaskPayloadLiftingEvaluator()])),
         evaluators.SequenceEvaluator([
             performance_bisection.Evaluator(self.job),
             evaluators.TaskPayloadLiftingEvaluator(
                 exclude_keys={'commits'})
         ]),
     ])
예제 #2
0
    def __init__(self, job):
        # We gather all the evaluators from the modules we know.
        super(ExecutionEngine, self).__init__(evaluators=[
            evaluators.DispatchByTaskType(
                {
                    'find_isolate': find_isolate.Evaluator(job),
                    'find_culprit': performance_bisection.Evaluator(job),
                    'read_value': read_value.Evaluator(job),
                    'run_test': run_test.Evaluator(job),
                }),

            # We then always lift the task payload up, skipping some of the
            # larger objects that we know we are not going to need when deciding
            # what the end result is.
            evaluators.TaskPayloadLiftingEvaluator(
                exclude_keys=EXCLUDED_PAYLOAD_KEYS)
        ])
예제 #3
0
  def BisectionEvaluatorForTesting(self, *seeded_evaluators):
    """Creates an evaluator for bisection with the provided evaluators.

    This is a utility function for creating a bisection evaluator which has the
    list of evaluators nested within the sequence of evaluators.
    """
    return evaluators.SequenceEvaluator([
        evaluators.FilteringEvaluator(
            predicate=evaluators.All(evaluators.TaskStatusIn({'pending'})),
            delegate=evaluators.SequenceEvaluator(
                list(seeded_evaluators) +
                [evaluators.TaskPayloadLiftingEvaluator()])),
        evaluators.SequenceEvaluator([
            performance_bisection.Evaluator(self.job),
            evaluators.TaskPayloadLiftingEvaluator(exclude_keys={'commits'})
        ]),
    ])
예제 #4
0
 def testSerializeJob(self):
     self.PopulateSimpleBisectionGraph(self.job)
     task_module.Evaluate(
         self.job, bisection_test_util.SelectEvent(),
         evaluators.SequenceEvaluator([
             evaluators.DispatchByTaskType({
                 'find_isolate':
                 bisection_test_util.FakeFoundIsolate(self.job),
                 'run_test':
                 bisection_test_util.FakeSuccessfulRunTest(self.job),
                 'read_value':
                 bisection_test_util.FakeReadValueSameResult(self.job, 1.0),
                 'find_culprit':
                 performance_bisection.Evaluator(self.job),
             }),
             evaluators.TaskPayloadLiftingEvaluator()
         ]))
     logging.debug('Finished evaluating job state.')
     job_dict = self.job.AsDict(options=[job_module.OPTION_STATE])
     logging.debug('Job = %s', pprint.pformat(job_dict))
     self.assertTrue(self.job.use_execution_engine)
     self.assertEqual(
         {
             'arguments':
             mock.ANY,
             'bug_id':
             None,
             'cancel_reason':
             None,
             'comparison_mode':
             'performance',
             'configuration':
             'some_configuration',
             'created':
             mock.ANY,
             'difference_count':
             0,
             'exception':
             None,
             'job_id':
             mock.ANY,
             'metric':
             'some_benchmark',
             'name':
             mock.ANY,
             'quests': ['Build', 'Test', 'Get results'],
             'results_url':
             mock.ANY,
             'status':
             mock.ANY,
             'updated':
             mock.ANY,
             'user':
             None,
             # NOTE: Here we're asseessing the structure of the results, not the
             # actual contents. We'll reserve more specific content form testing
             # in other test cases, but for now we're ensuring that we're able to
             # get the shape of the data in a certain way.
             'state': [{
                 'attempts': [{
                     'executions': [mock.ANY] * 3
                 }] + [{
                     'executions': [None, mock.ANY, mock.ANY]
                 }] * 9,
                 'change':
                 self.start_change.AsDict(),
                 'comparisons': {
                     'prev': None,
                     'next': 'same',
                 },
                 'result_values': [mock.ANY] * 10,
             }, {
                 'attempts': [{
                     'executions': [mock.ANY] * 3
                 }] + [{
                     'executions': [None, mock.ANY, mock.ANY]
                 }] * 9,
                 'change':
                 self.end_change.AsDict(),
                 'comparisons': {
                     'prev': 'same',
                     'next': None,
                 },
                 'result_values': [mock.ANY] * 10,
             }]
         },
         job_dict)