Ejemplo n.º 1
0
  def testInitiate_ScheduleBuild(self, put, _):
    # We then need to make sure that the buildbucket put was called.
    put.return_value = {'build': {'id': '345982437987234'}}

    # This time we don't seed the isolate for the change to force the build.
    self.assertDictEqual(
        {
            'build_7c7e90be': {
                'bucket': 'luci.bucket',
                'buildbucket_result': {
                    'build': {
                        'id': '345982437987234'
                    },
                },
                'builder': 'Mac Builder',
                'change': mock.ANY,
                'status': 'ongoing',
                'target': 'telemetry_perf_tests',
                'tries': 1,
            },
        },
        task_module.Evaluate(
            self.job,
            event_module.Event(
                type='initiate', target_task='build_7c7e90be', payload={}),
            find_isolate.Evaluator(self.job)))
    self.assertEqual(1, put.call_count)
Ejemplo n.º 2
0
  def testDispatchEvaluator_Default(self):

    def MustNeverCall(*_):
      self.fail('Dispatch failure!')

    def DefaultEvaluator(*_):
      return [0]

    task = task_module.InMemoryTask(
        id='test_id',
        task_type='test',
        payload={},
        status='pending',
        dependencies=[])
    evaluator = evaluators.DispatchByEventTypeEvaluator(
        evaluator_map={
            'match_nothing': MustNeverCall,
        },
        default_evaluator=DefaultEvaluator)
    accumulator = {}
    self.assertEqual([0],
                     evaluator(
                         task,
                         event_module.Event(
                             type='unrecognised', target_task=None, payload={}),
                         accumulator))
Ejemplo n.º 3
0
  def Start(self):
    """Starts the Job and updates it in the Datastore.

    This method is designed to return fast, so that Job creation is responsive
    to the user. It schedules the Job on the task queue without running
    anything. It also posts a bug comment, and updates the Datastore.
    """
    if self.use_execution_engine:
      # Treat this as if it's a poll, and run the handler here.
      try:
        task_module.Evaluate(
            self,
            event_module.Event(type='initiate', target_task=None, payload={}),
            task_evaluator.ExecutionEngine(self)),
      except task_module.Error as error:
        logging.error('Failed: %s', error)
        self.Fail()
        self.put()
        return
    else:
      self._Schedule()
    self.started = True
    self.started_time = datetime.datetime.now()
    self.put()

    title = _ROUND_PUSHPIN + ' Pinpoint job started.'
    comment = '\n'.join((title, self.url))
    deferred.defer(
        _PostBugCommentDeferred,
        self.bug_id,
        comment,
        send_email=True,
        _retry_options=RETRY_OPTIONS)
Ejemplo n.º 4
0
    def testEvaluateFailedDependency(self, *_):
        evaluator = evaluators.SequenceEvaluator(evaluators=(
            evaluators.FilteringEvaluator(
                predicate=evaluators.TaskTypeEq('find_isolate'),
                delegate=evaluators.SequenceEvaluator(evaluators=(
                    bisection_test_util.FakeFindIsolateFailed(self.job),
                    evaluators.TaskPayloadLiftingEvaluator()))),
            run_test.Evaluator(self.job),
        ))

        # When we initiate the run_test tasks, we should immediately see the tasks
        # failing because the dependency has a hard failure status.
        self.assertEqual(
            dict([('find_isolate_chromium@aaaaaaa', mock.ANY)] +
                 [('run_test_chromium@aaaaaaa_%s' % (attempt, ), {
                     'status': 'failed',
                     'errors': mock.ANY,
                     'dimensions': DIMENSIONS,
                     'extra_args': [],
                     'swarming_server': 'some_server',
                     'change': mock.ANY,
                     'index': attempt,
                 }) for attempt in range(10)]),
            task_module.Evaluate(
                self.job,
                event_module.Event(type='initiate',
                                   target_task=None,
                                   payload={}), evaluator))
Ejemplo n.º 5
0
  def testSequenceEvaluator(self):

    def FirstEvaluator(*args):
      args[2].update({'value': 1})
      return ['First Action']

    def SecondEvaluator(*args):
      args[2].update({'value': accumulator.get('value') + 1})
      return ['Second Action']

    task = task_module.InMemoryTask(
        id='test_id',
        task_type='test',
        payload={},
        status='pending',
        dependencies=[])
    evaluator = evaluators.SequenceEvaluator(
        evaluators=(FirstEvaluator, SecondEvaluator))
    event = event_module.Event(type='test', target_task=None, payload={})
    accumulator = {}
    # Test that we're collecting the actions returned by the nested evaluators.
    self.assertEqual(['First Action', 'Second Action'],
                     evaluator(task, event, accumulator))

    # Test that the operations happened in sequence.
    self.assertEqual({'value': 2}, accumulator)
Ejemplo n.º 6
0
 def testMissingDependency(self):
     job = job_module.Job.New((), ())
     task_module.PopulateTaskGraph(
         job,
         task_module.TaskGraph(vertices=[
             task_module.TaskVertex(id='run_test_bbbbbbb_0',
                                    vertex_type='run_test',
                                    payload={
                                        'swarming_server': 'some_server',
                                        'dimensions': DIMENSIONS,
                                        'extra_args': [],
                                    }),
         ],
                               edges=[]))
     self.assertEqual(
         {
             'run_test_bbbbbbb_0': {
                 'errors': [{
                     'cause': 'DependencyError',
                     'message': mock.ANY
                 }]
             }
         },
         task_module.Evaluate(
             job,
             event_module.Event(type='validate',
                                target_task=None,
                                payload={}), run_test.Validator()))
Ejemplo n.º 7
0
    def setUp(self):
        super(FindIsolateEvaluatorUpdateTests, self).setUp()

        # Here we set up the pre-requisite for polling, where we've already had a
        # successful build scheduled.
        with mock.patch('dashboard.services.buildbucket_service.Put') as put:
            put.return_value = {'build': {'id': '345982437987234'}}
            self.assertDictEqual(
                {
                    'find_isolate_chromium@7c7e90be': {
                        'buildbucket_result': {
                            'build': {
                                'id': '345982437987234'
                            }
                        },
                        'status': 'ongoing',
                        'builder': 'Mac Builder',
                        'bucket': 'luci.bucket',
                        'change': mock.ANY,
                        'target': 'telemetry_perf_tests',
                        'tries': 1,
                    },
                },
                task_module.Evaluate(
                    self.job,
                    event_module.Event(
                        type='initiate',
                        target_task='find_isolate_chromium@7c7e90be',
                        payload={}), find_isolate.Evaluator(self.job)))
            self.assertEqual(1, put.call_count)
Ejemplo n.º 8
0
 def testUpdate_MissingIsolates_InvalidJson(self, get_build_status):
     json = '{ invalid }'
     get_build_status.return_value = {
         'build': {
             'status': 'COMPLETED',
             'result': 'SUCCESS',
             'result_details_json': json,
         }
     }
     self.assertDictEqual(
         {
             'find_isolate_chromium@7c7e90be': {
                 'bucket': 'luci.bucket',
                 'buildbucket_result': {
                     'build': {
                         'id': '345982437987234'
                     }
                 },
                 'buildbucket_job_status': mock.ANY,
                 'builder': 'Mac Builder',
                 'change': mock.ANY,
                 'status': 'failed',
                 'errors': mock.ANY,
                 'target': 'telemetry_perf_tests',
                 'tries': 1,
             },
         },
         task_module.Evaluate(
             self.job,
             event_module.Event(
                 type='update',
                 target_task='find_isolate_chromium@7c7e90be',
                 payload={'status': 'build_completed'}),
             find_isolate.Evaluator(self.job)))
     self.assertEqual(1, get_build_status.call_count)
Ejemplo n.º 9
0
    def testInitiate_FoundIsolate(self, *_):
        # Seed the isolate for this change.
        change = change_module.Change(
            commits=[change_module.Commit('chromium', '7c7e90be')])
        isolate.Put((('Mac Builder', change, 'telemetry_perf_tests',
                      'https://isolate.server', '7c7e90be'), ))

        # Then ensure that we can find the seeded isolate for the specified
        # revision.
        self.assertDictEqual(
            {
                'find_isolate_chromium@7c7e90be': {
                    'bucket': 'luci.bucket',
                    'builder': 'Mac Builder',
                    'change': mock.ANY,
                    'isolate_hash': '7c7e90be',
                    'isolate_server': 'https://isolate.server',
                    'status': 'completed',
                    'target': 'telemetry_perf_tests',
                },
            },
            task_module.Evaluate(
                self.job,
                event_module.Event(
                    type='initiate',
                    target_task='find_isolate_chromium@7c7e90be',
                    payload={}), find_isolate.Evaluator(self.job)))
Ejemplo n.º 10
0
 def testEvaluatePendingDependency(self, *_):
     # Ensure that tasks stay pending in the event of an update.
     self.assertEqual(
         dict([('find_isolate_chromium@aaaaaaa', {
             'builder': 'Some Builder',
             'target': 'telemetry_perf_tests',
             'bucket': 'luci.bucket',
             'change': mock.ANY,
             'status': 'pending',
         })] + [('run_test_chromium@aaaaaaa_%s' % (attempt, ), {
             'status': 'pending',
             'dimensions': DIMENSIONS,
             'extra_args': [],
             'swarming_server': 'some_server',
             'change': mock.ANY,
             'index': attempt,
         }) for attempt in range(10)]),
         task_module.Evaluate(
             self.job,
             event_module.Event(type='update',
                                target_task=None,
                                payload={
                                    'kind': 'synthetic',
                                    'action': 'poll'
                                }), run_test.Evaluator(self.job)))
Ejemplo n.º 11
0
 def testUpdate_BuildFailed_HardFailure(self, get_build_status):
   get_build_status.return_value = {
       'build': {
           'status': 'COMPLETED',
           'result': 'FAILURE',
           'result_details_json': '{}',
       }
   }
   self.assertDictEqual(
       {
           'build_7c7e90be': {
               'bucket': 'luci.bucket',
               'buildbucket_result': {
                   'build': {
                       'id': '345982437987234'
                   },
               },
               'buildbucket_job_status': mock.ANY,
               'builder': 'Mac Builder',
               'change': mock.ANY,
               'status': 'failed',
               'target': 'telemetry_perf_tests',
               'tries': 1,
           },
       },
       task_module.Evaluate(
           self.job,
           event_module.Event(
               type='update',
               target_task='build_7c7e90be',
               payload={'status': 'build_completed'}),
           find_isolate.Evaluator(self.job)))
   self.assertEqual(1, get_build_status.call_count)
Ejemplo n.º 12
0
 def testEvaluateFail_GraphJsonMissingChart(self, isolate_retrieve):
   isolate_retrieve.side_effect = itertools.chain(
       *itertools.repeat([('{"files": {"some_benchmark/perf_results.json": '
                           '{"h": "394890891823812873798734a"}}}'),
                          json.dumps({})], 10))
   self.PopulateTaskGraph(
       benchmark='some_benchmark',
       chart='chart',
       trace='trace',
       mode='graph_json')
   self.assertNotEqual({},
                       task_module.Evaluate(
                           self.job,
                           event_module.Event(
                               type='initiate', target_task=None, payload={}),
                           self.evaluator))
   self.assertEqual(
       {
           'read_value_chromium@aaaaaaa_%s' % (attempt,): {
               'benchmark': 'some_benchmark',
               'change': mock.ANY,
               'mode': 'graph_json',
               'results_filename': 'some_benchmark/perf_results.json',
               'results_path': ['some_benchmark', 'perf_results.json'],
               'histogram_options': {
                   'grouping_label': None,
                   'story': None,
                   'statistic': None,
                   'histogram_name': 'chart',
               },
               'graph_json_options': {
                   'chart': 'chart',
                   'trace': 'trace',
               },
               'errors': [{
                   'reason': 'ReadValueChartNotFound',
                   'message': mock.ANY,
               }],
               'status': 'failed',
               'tries': 1,
               'index': attempt,
           } for attempt in range(10)
       },
       task_module.Evaluate(
           self.job,
           event_module.Event(type='select', target_task=None, payload={}),
           evaluators.Selector(task_type='read_value')))
Ejemplo n.º 13
0
 def testEvaluateFailure_GraphJsonMissingFile(self, isolate_retrieve):
     isolate_retrieve.return_value = '{"files": {}}'
     self.PopulateTaskGraph(benchmark='some_benchmark',
                            chart='chart',
                            trace='trace',
                            mode='graph_json')
     self.assertNotEqual({},
                         task_module.Evaluate(
                             self.job,
                             event_module.Event(type='initiate',
                                                target_task=None,
                                                payload={}),
                             self.evaluator))
     self.assertEqual(
         {
             'read_value_chromium@aaaaaaa_%s' % (attempt, ): {
                 'benchmark':
                 'some_benchmark',
                 'mode':
                 'graph_json',
                 'results_filename':
                 'some_benchmark/perf_results.json',
                 'histogram_options': {
                     'tir_label': None,
                     'story': None,
                     'statistic': None,
                 },
                 'graph_json_options': {
                     'chart': 'chart',
                     'trace': 'trace',
                 },
                 'errors': [{
                     'reason': 'ReadValueNoFile',
                     'message': mock.ANY,
                 }],
                 'status':
                 'failed',
                 'tries':
                 1,
             }
             for attempt in range(10)
         },
         task_module.Evaluate(
             self.job,
             event_module.Event(type='select', target_task=None,
                                payload={}),
             evaluators.Selector(task_type='read_value')))
Ejemplo n.º 14
0
 def testEvaluateFailure_HistogramNoValues(self, isolate_retrieve):
   isolate_retrieve.side_effect = itertools.chain(*itertools.repeat(
       [('{"files": {"some_benchmark/perf_results.json": '
         '{"h": "394890891823812873798734a"}}}'),
        json.dumps(
            histogram_set.HistogramSet([
                histogram_module.Histogram('some_benchmark', 'count')
            ]).AsDicts())], 10))
   self.PopulateTaskGraph(
       benchmark='some_benchmark',
       chart='some_chart',
       grouping_label='label',
       story='https://story')
   self.assertNotEqual({},
                       task_module.Evaluate(
                           self.job,
                           event_module.Event(
                               type='initiate', target_task=None, payload={}),
                           self.evaluator))
   self.assertEqual(
       {
           'read_value_chromium@aaaaaaa_%s' % (attempt,): {
               'benchmark': 'some_benchmark',
               'change': mock.ANY,
               'mode': 'histogram_sets',
               'results_filename': 'some_benchmark/perf_results.json',
               'histogram_options': {
                   'grouping_label': 'label',
                   'story': 'https://story',
                   'statistic': None,
               },
               'graph_json_options': {
                   'chart': 'some_chart',
                   'trace': 'some_trace',
               },
               'status': 'failed',
               'errors': [{
                   'reason': 'ReadValueNotFound',
                   'message': mock.ANY,
               }],
               'tries': 1,
           } for attempt in range(10)
       },
       task_module.Evaluate(
           self.job,
           event_module.Event(type='select', target_task=None, payload={}),
           evaluators.Selector(task_type='read_value')))
Ejemplo n.º 15
0
 def testEvaluateSuccess_GraphJson(self, isolate_retrieve):
     isolate_retrieve.side_effect = itertools.chain(*itertools.repeat(
         [('{"files": {"some_benchmark/perf_results.json": '
           '{"h": "394890891823812873798734a"}}}'),
          json.dumps(
              {'chart': {
                  'traces': {
                      'trace': ['126444.869721', '0.0']
                  }
              }})], 10))
     self.PopulateTaskGraph(benchmark='some_benchmark',
                            chart='chart',
                            trace='trace',
                            mode='graph_json')
     self.assertNotEqual({},
                         task_module.Evaluate(
                             self.job,
                             event_module.Event(type='initiate',
                                                target_task=None,
                                                payload={}),
                             self.evaluator))
     self.assertEqual(
         {
             'read_value_chromium@aaaaaaa_%s' % (attempt, ): {
                 'benchmark': 'some_benchmark',
                 'mode': 'graph_json',
                 'results_filename': 'some_benchmark/perf_results.json',
                 'histogram_options': {
                     'tir_label': None,
                     'story': None,
                     'statistic': None,
                 },
                 'graph_json_options': {
                     'chart': 'chart',
                     'trace': 'trace',
                 },
                 'result_values': [126444.869721],
                 'status': 'completed',
                 'tries': 1,
             }
             for attempt in range(10)
         },
         task_module.Evaluate(
             self.job,
             event_module.Event(type='select', target_task=None,
                                payload={}),
             evaluators.Selector(task_type='read_value')))
Ejemplo n.º 16
0
    def testEvaluateSuccess_NeedToRefineAttempts(self):
        self.PopulateSimpleBisectionGraph()
        task_module.Evaluate(
            self.job,
            event_module.Event(type='initiate', target_task=None, payload={}),
            self.CompoundEvaluatorForTesting(
                FakeReadValueMapResult(
                    self.job, {
                        change_module.Change.FromDict({
                            'commits': [{
                                'repository': 'chromium',
                                'git_hash': commit
                            }]
                        }): values
                        for commit, values in (
                            ('commit_0', range(10)),
                            ('commit_1', range(10)),
                            ('commit_2', range(4, 14)),
                            ('commit_3', range(3, 13)),
                            ('commit_4', range(3, 13)),
                            ('commit_5', range(3, 13)),
                        )
                    })))

        # Here we test that we have more than the minimum attempts for the change
        # between commit_1 and commit_2.
        evaluate_result = task_module.Evaluate(
            self.job, SelectEvent(),
            evaluators.Selector(task_type='read_value'))
        attempt_counts = {}
        for payload in evaluate_result.values():
            change = change_module.Change.FromDict(payload.get('change'))
            attempt_counts[change] = attempt_counts.get(change, 0) + 1
        self.assertGreater(
            attempt_counts[change_module.Change.FromDict({
                'commits': [{
                    'repository': 'chromium',
                    'git_hash': 'commit_2',
                }]
            })], 10)
        self.assertLess(
            attempt_counts[change_module.Change.FromDict({
                'commits': [{
                    'repository': 'chromium',
                    'git_hash': 'commit_2',
                }]
            })], 100)

        # We know that we will never get a deterministic answer, so we ensure that
        # we don't inadvertently blame the wrong changes at the end of the
        # refinement.
        evaluate_result = task_module.Evaluate(
            self.job, SelectEvent(),
            evaluators.Selector(task_type='find_culprit'))
        self.assertIn('performance_bisection', evaluate_result)
        logging.info('Results: %s', evaluate_result['performance_bisection'])
        self.assertEquals(evaluate_result['performance_bisection']['culprits'],
                          [])
    def testEvaluateSuccess_NeedToRefineAttempts(self):
        self.PopulateSimpleBisectionGraph(self.job)
        task_module.Evaluate(
            self.job,
            event_module.Event(type='initiate', target_task=None, payload={}),
            self.BisectionEvaluatorForTesting(
                bisection_test_util.FakeReadValueMapResult(
                    self.job, {
                        change_module.Change.FromDict({
                            'commits': [{
                                'repository': 'chromium',
                                'git_hash': commit
                            }]
                        }): values
                        for commit, values in (
                            ('commit_0', range(10)),
                            ('commit_1', range(1, 11)),
                            ('commit_2', range(2, 12)),
                            ('commit_3', range(3, 13)),
                            ('commit_4', range(3, 13)),
                            ('commit_5', range(3, 13)),
                        )
                    })))

        # Here we test that we have more than the minimum attempts for the change
        # between commit_1 and commit_2.
        evaluate_result = task_module.Evaluate(
            self.job, bisection_test_util.SelectEvent(),
            evaluators.Selector(task_type='read_value'))
        attempt_counts = {}
        for payload in evaluate_result.values():
            change = change_module.Change.FromDict(payload.get('change'))
            attempt_counts[change] = attempt_counts.get(change, 0) + 1
        self.assertGreater(
            attempt_counts[change_module.Change.FromDict({
                'commits': [{
                    'repository': 'chromium',
                    'git_hash': 'commit_2',
                }]
            })], 10)
        self.assertLess(
            attempt_counts[change_module.Change.FromDict({
                'commits': [{
                    'repository': 'chromium',
                    'git_hash': 'commit_2',
                }]
            })], 100)

        # We know that we will refine the graph until we see the progression from
        # commit_0 -> commit_1 -> commit_2 -> commit_3 and stabilize.
        evaluate_result = task_module.Evaluate(
            self.job, bisection_test_util.SelectEvent(),
            evaluators.Selector(task_type='find_culprit'))
        self.assertIn('performance_bisection', evaluate_result)
        self.assertEquals(evaluate_result['performance_bisection']['culprits'],
                          [mock.ANY, mock.ANY, mock.ANY])
Ejemplo n.º 18
0
 def testSelector_EventType(self):
     task = task_module.InMemoryTask(id='test_id',
                                     task_type='test',
                                     payload={},
                                     status='pending',
                                     dependencies=[])
     accumulator = {}
     evaluators.Selector(event_type='select')(task,
                                              event_module.Event(
                                                  type='unmatched',
                                                  target_task=None,
                                                  payload={}), accumulator)
     self.assertEqual({}, accumulator)
     evaluators.Selector(event_type='select')(task,
                                              event_module.Event(
                                                  type='select',
                                                  target_task=None,
                                                  payload={}), accumulator)
     self.assertEqual({'test_id': mock.ANY}, accumulator)
Ejemplo n.º 19
0
 def post(self, job_id):
     job = job_module.JobFromId(job_id)
     if job.use_execution_engine:
         event = event_module.Event(type='initiate',
                                    target_task=None,
                                    payload={})
         logging.info('Execution Engine: Evaluating initiate event.')
         task_module.Evaluate(job, event, evaluator.ExecutionEngine(job))
         logging.info('Execution Engine: Evaluation done.')
     else:
         job.Run()
Ejemplo n.º 20
0
 def testEvaluateFailure_DependenciesFailed(self):
   self.PopulateSimpleBisectionGraph()
   task_module.Evaluate(
       self.job,
       event_module.Event(type='initiate', target_task=None, payload={}),
       self.CompoundEvaluatorForTesting(FakeReadValueFails(self.job)))
   evaluate_result = task_module.Evaluate(
       self.job, SelectEvent(), evaluators.Selector(task_type='find_culprit'))
   self.assertIn('performance_bisection', evaluate_result)
   self.assertEqual(evaluate_result['performance_bisection']['status'],
                    'failed')
   self.assertNotEqual([], evaluate_result['performance_bisection']['errors'])
Ejemplo n.º 21
0
 def testSelector_Predicate(self):
     task = task_module.InMemoryTask(id='test_id',
                                     task_type='test',
                                     payload={},
                                     status='pending',
                                     dependencies=[])
     accumulator = {}
     evaluators.Selector(predicate=lambda *_: True)(
         task,
         event_module.Event(type='unimportant',
                            target_task=None,
                            payload={}), accumulator)
     self.assertEqual({'test_id': mock.ANY}, accumulator)
Ejemplo n.º 22
0
    def testFilteringEvaluator_DoesNotMatch(self):
        def ThrowingEvaluator(*_):
            raise ValueError('This must never be raised.')

        task = task_module.InMemoryTask(id='test_id',
                                        task_type='test',
                                        payload={},
                                        status='pending',
                                        dependencies=[])
        evaluator = evaluators.FilteringEvaluator(predicate=lambda *_: False,
                                                  delegate=ThrowingEvaluator)
        event = event_module.Event(type='test', target_task=None, payload={})
        accumulator = {}
        evaluator(task, event, accumulator)
Ejemplo n.º 23
0
 def testPayloadLiftingEvaluator_ExcludeEventTypes(self):
     task = task_module.InMemoryTask(id='test_id',
                                     task_type='test',
                                     payload={
                                         'key_must_not_show': 'value0',
                                     },
                                     status='pending',
                                     dependencies=[])
     evaluator = evaluators.TaskPayloadLiftingEvaluator(
         exclude_event_types={'test'})
     event = event_module.Event(type='test', target_task=None, payload={})
     accumulator = {}
     self.assertEqual(None, evaluator(task, event, accumulator))
     self.assertEqual({}, accumulator)
Ejemplo n.º 24
0
    def testFilteringEvaluator_Matches(self):
        def ThrowingEvaluator(*_):
            raise ValueError('Expect this exception.')

        task = task_module.InMemoryTask(id='test_id',
                                        task_type='test',
                                        payload={},
                                        status='pending',
                                        dependencies=[])
        evaluator = evaluators.FilteringEvaluator(predicate=lambda *_: True,
                                                  delegate=ThrowingEvaluator)
        event = event_module.Event(type='test', target_task=None, payload={})
        accumulator = {}
        with self.assertRaises(ValueError):
            evaluator(task, event, accumulator)
Ejemplo n.º 25
0
    def testEvaluateSuccess_SpeculateBisection(self):
        self.PopulateSimpleBisectionGraph()
        task_module.Evaluate(
            self.job,
            event_module.Event(type='initiate', target_task=None, payload={}),
            self.CompoundEvaluatorForTesting(
                FakeReadValueMapResult(
                    self.job, {
                        change_module.Change.FromDict({
                            'commits': [{
                                'repository': 'chromium',
                                'git_hash': commit
                            }]
                        }): values
                        for commit, values in (
                            ('commit_0', [1.0] * 10),
                            ('commit_1', [1.0] * 10),
                            ('commit_2', [2.0] * 10),
                            ('commit_3', [2.0] * 10),
                            ('commit_4', [2.0] * 10),
                            ('commit_5', [2.0] * 10),
                        )
                    })))
        evaluate_result = task_module.Evaluate(
            self.job, SelectEvent(),
            evaluators.Selector(task_type='find_culprit'))
        self.assertIn('performance_bisection', evaluate_result)
        logging.info('Results: %s', evaluate_result['performance_bisection'])

        # Here we're testing that we can find the change between commit_1 and
        # commit_2 in the values we seed above.
        self.assertEquals(evaluate_result['performance_bisection']['culprits'],
                          [[
                              change_module.Change.FromDict({
                                  'commits': [{
                                      'repository': 'chromium',
                                      'git_hash': 'commit_1'
                                  }]
                              }).AsDict(),
                              change_module.Change.FromDict({
                                  'commits': [{
                                      'repository': 'chromium',
                                      'git_hash': 'commit_2'
                                  }]
                              }).AsDict()
                          ]])
Ejemplo n.º 26
0
    def AsDict(self, options=None):
        def IsoFormatOrNone(attr):
            time = getattr(self, attr, None)
            if time:
                return time.isoformat()
            return None

        d = {
            'job_id': self.job_id,
            'configuration': self.configuration,
            'results_url': self.results_url,
            'arguments': self.arguments,
            'bug_id': self.bug_id,
            'project': self.project,
            'comparison_mode': self.comparison_mode,
            'name': self.auto_name,
            'user': self.user,
            'created': IsoFormatOrNone('created'),
            'updated': IsoFormatOrNone('updated'),
            'started_time': IsoFormatOrNone('started_time'),
            'difference_count': self.difference_count,
            'exception': self.exception_details_dict,
            'status': self.status,
            'cancel_reason': self.cancel_reason,
        }

        if not options:
            return d

        if OPTION_STATE in options:
            if self.use_execution_engine:
                d.update(
                    task_module.Evaluate(
                        self,
                        event_module.Event(
                            type='serialize', target_task=None, payload={}),
                        job_serializer.Serializer()) or {})
            else:
                d.update(self.state.AsDict())
        if OPTION_ESTIMATE in options and not self.started:
            d.update(self._GetRunTimeEstimate())
        if OPTION_TAGS in options:
            d['tags'] = {'tags': self.tags}
        return d
Ejemplo n.º 27
0
 def testPayloadLiftingEvaluator_ExcludeKeys(self):
     task = task_module.InMemoryTask(id='test_id',
                                     task_type='test',
                                     payload={
                                         'key_included': 'value0',
                                         'key_excluded': 'value1'
                                     },
                                     status='pending',
                                     dependencies=[])
     evaluator = evaluators.TaskPayloadLiftingEvaluator(
         exclude_keys={'key_excluded'})
     event = event_module.Event(type='test', target_task=None, payload={})
     accumulator = {}
     evaluator(task, event, accumulator)
     self.assertEqual(
         {'test_id': {
             'key_included': 'value0',
             'status': 'pending'
         }}, accumulator)
Ejemplo n.º 28
0
 def testUpdate_MissingIsolates_Revision(self, get_build_status):
   json = """
   {
     "properties": {
         "isolate_server": "https://isolate.server",
         "swarm_hashes_refs/heads/master(at)7c7e90be_without_patch":
             {"telemetry_perf_tests": "192923affe212adf"}
     }
   }"""
   get_build_status.return_value = {
       'build': {
           'status': 'COMPLETED',
           'result': 'SUCCESS',
           'result_details_json': json,
       }
   }
   self.assertDictEqual(
       {
           'find_isolate_chromium@7c7e90be': {
               'bucket': 'luci.bucket',
               'builder': 'Mac Builder',
               'build_url': mock.ANY,
               'buildbucket_result': {
                   'build': {
                       'id': '345982437987234'
                   }
               },
               'buildbucket_job_status': mock.ANY,
               'change': mock.ANY,
               'status': 'failed',
               'target': 'telemetry_perf_tests',
               'tries': 1,
               'errors': mock.ANY,
           },
       },
       task_module.Evaluate(
           self.job,
           event_module.Event(
               type='update',
               target_task='find_isolate_chromium@7c7e90be',
               payload={'status': 'build_completed'}),
           find_isolate.Evaluator(self.job)))
   self.assertEqual(1, get_build_status.call_count)
Ejemplo n.º 29
0
  def testEvaluateSuccess_HistogramSummary(self, isolate_retrieve):
    samples = []
    hists = []
    for i in range(10):
      hist = histogram_module.Histogram('some_chart', 'count')
      hist.AddSample(0)
      hist.AddSample(1)
      hist.AddSample(2)
      hist.diagnostics[reserved_infos.STORIES.name] = (
          generic_set.GenericSet(['story%d' % i]))
      hist.diagnostics[reserved_infos.STORY_TAGS.name] = (
          generic_set.GenericSet(['group:label1']))
      hists.append(hist)
      samples.extend(hist.sample_values)

    for i in range(10):
      hist = histogram_module.Histogram('some_chart', 'count')
      hist.AddSample(0)
      hist.AddSample(1)
      hist.AddSample(2)
      hist.diagnostics[reserved_infos.STORIES.name] = (
          generic_set.GenericSet(['another_story%d' % i]))
      hist.diagnostics[reserved_infos.STORY_TAGS.name] = (
          generic_set.GenericSet(['group:label2']))
      hists.append(hist)
      samples.extend(hist.sample_values)

    histograms = histogram_set.HistogramSet(hists)
    histograms.AddSharedDiagnosticToAllHistograms(
        reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
    isolate_retrieve.side_effect = itertools.chain(
        *itertools.repeat([('{"files": {"some_benchmark/perf_results.json": '
                            '{"h": "394890891823812873798734a"}}}'),
                           json.dumps(histograms.AsDicts())], 10))
    self.PopulateTaskGraph(benchmark='some_benchmark', chart='some_chart')
    self.assertNotEqual({},
                        task_module.Evaluate(
                            self.job,
                            event_module.Event(
                                type='initiate', target_task=None, payload={}),
                            self.evaluator))
    self.assertEqual(
        {
            'read_value_chromium@aaaaaaa_%s' % (attempt,): {
                'benchmark': 'some_benchmark',
                'change': mock.ANY,
                'mode': 'histogram_sets',
                'results_filename': 'some_benchmark/perf_results.json',
                'results_path': ['some_benchmark', 'perf_results.json'],
                'histogram_options': {
                    'grouping_label': None,
                    'story': None,
                    'statistic': None,
                    'histogram_name': 'some_chart',
                },
                'graph_json_options': {
                    'chart': 'some_chart',
                    'trace': 'some_trace'
                },
                'result_values': [sum(samples)],
                'status': 'completed',
                'tries': 1,
                'index': attempt,
            } for attempt in range(10)
        },
        task_module.Evaluate(
            self.job,
            event_module.Event(type='select', target_task=None, payload={}),
            evaluators.Selector(task_type='read_value')))
Ejemplo n.º 30
0
 def testEvaluateSuccess_HistogramsTraceUrls(self, isolate_retrieve):
   hist = histogram_module.Histogram('some_chart', 'count')
   hist.AddSample(0)
   hist.diagnostics[reserved_infos.TRACE_URLS.name] = (
       generic_set.GenericSet(['trace_url1', 'trace_url2']))
   hist2 = histogram_module.Histogram('hist2', 'count')
   hist2.diagnostics[reserved_infos.TRACE_URLS.name] = (
       generic_set.GenericSet(['trace_url3']))
   hist3 = histogram_module.Histogram('hist3', 'count')
   hist3.diagnostics[reserved_infos.TRACE_URLS.name] = (
       generic_set.GenericSet(['trace_url2']))
   histograms = histogram_set.HistogramSet([hist, hist2, hist3])
   isolate_retrieve.side_effect = itertools.chain(
       *itertools.repeat([('{"files": {"some_benchmark/perf_results.json": '
                           '{"h": "394890891823812873798734a"}}}'),
                          json.dumps(histograms.AsDicts())], 10))
   self.PopulateTaskGraph(benchmark='some_benchmark', chart='some_chart')
   self.assertNotEqual({},
                       task_module.Evaluate(
                           self.job,
                           event_module.Event(
                               type='initiate', target_task=None, payload={}),
                           self.evaluator))
   self.assertEqual(
       {
           'read_value_chromium@aaaaaaa_%s' % (attempt,): {
               'benchmark': 'some_benchmark',
               'change': mock.ANY,
               'mode': 'histogram_sets',
               'results_filename': 'some_benchmark/perf_results.json',
               'results_path': ['some_benchmark', 'perf_results.json'],
               'histogram_options': {
                   'grouping_label': None,
                   'story': None,
                   'statistic': None,
                   'histogram_name': 'some_chart',
               },
               'graph_json_options': {
                   'chart': 'some_chart',
                   'trace': 'some_trace'
               },
               'result_values': [0],
               'status': 'completed',
               'tries': 1,
               'trace_urls': [{
                   'key': 'trace',
                   'value': 'trace_url1',
                   'url': 'trace_url1'
               }, {
                   'key': 'trace',
                   'value': 'trace_url2',
                   'url': 'trace_url2',
               }, {
                   'key': 'trace',
                   'value': 'trace_url3',
                   'url': 'trace_url3',
               }],
               'index': attempt,
           } for attempt in range(10)
       },
       task_module.Evaluate(
           self.job,
           event_module.Event(type='select', target_task=None, payload={}),
           evaluators.Selector(task_type='read_value')))