예제 #1
0
 def testEvaluateSuccess_HistogramSkipRefTraceUrls(self, isolate_retrieve):
   hist = histogram_module.Histogram('some_benchmark', 'count')
   hist.AddSample(0)
   hist.diagnostics[reserved_infos.TRACE_URLS.name] = (
       generic_set.GenericSet(['trace_url1', 'trace_url2']))
   hist2 = histogram_module.Histogram('hist2', 'count')
   hist2.diagnostics[reserved_infos.TRACE_URLS.name] = (
       generic_set.GenericSet(['trace_url3']))
   hist2.diagnostics[reserved_infos.TRACE_URLS.name].guid = 'foo'
   histograms = histogram_set.HistogramSet([hist, hist2])
   isolate_retrieve.side_effect = itertools.chain(
       *itertools.repeat([('{"files": {"some_benchmark/perf_results.json": '
                           '{"h": "394890891823812873798734a"}}}'),
                          json.dumps(histograms.AsDicts())], 10))
   self.PopulateTaskGraph(benchmark='some_benchmark')
   self.assertNotEqual({},
                       task_module.Evaluate(
                           self.job,
                           event_module.Event(
                               type='initiate', target_task=None, payload={}),
                           self.evaluator))
   self.assertEqual(
       {
           'read_value_chromium@aaaaaaa_%s' % (attempt,): {
               'benchmark':
                   'some_benchmark',
               'change': mock.ANY,
               'mode':
                   'histogram_sets',
               'results_filename':
                   'some_benchmark/perf_results.json',
               'histogram_options': {
                   'grouping_label': None,
                   'story': None,
                   'statistic': None,
               },
               'graph_json_options': {
                   'chart': None,
                   'trace': 'some_trace'
               },
               'result_values': [0],
               'status':
                   'completed',
               'tries':
                   1,
               'trace_urls': [{
                   'key': 'trace',
                   'value': 'trace_url1',
                   'url': 'trace_url1'
               }, {
                   'key': 'trace',
                   'value': 'trace_url2',
                   'url': 'trace_url2',
               }]
           } for attempt in range(10)
       },
       task_module.Evaluate(
           self.job,
           event_module.Event(type='select', target_task=None, payload={}),
           evaluators.Selector(task_type='read_value')))
예제 #2
0
 def testEvaluateFail_GraphJsonMissingTrace(self, isolate_retrieve):
     isolate_retrieve.side_effect = itertools.chain(*itertools.repeat(
         [('{"files": {"some_benchmark/perf_results.json": '
           '{"h": "394890891823812873798734a"}}}'),
          json.dumps(
              {'chart': {
                  'traces': {
                      'trace': ['126444.869721', '0.0']
                  }
              }})], 10))
     self.PopulateTaskGraph(benchmark='some_benchmark',
                            chart='chart',
                            trace='must_not_be_found',
                            mode='graph_json')
     self.assertNotEqual({},
                         task_module.Evaluate(
                             self.job,
                             event_module.Event(type='initiate',
                                                target_task=None,
                                                payload={}),
                             self.evaluator))
     self.assertEqual(
         {
             'read_value_chromium@aaaaaaa_%s' % (attempt, ): {
                 'benchmark':
                 'some_benchmark',
                 'change':
                 mock.ANY,
                 'mode':
                 'graph_json',
                 'results_filename':
                 'some_benchmark/perf_results.json',
                 'histogram_options': {
                     'grouping_label': None,
                     'story': None,
                     'statistic': None,
                     'histogram_name': 'chart',
                 },
                 'graph_json_options': {
                     'chart': 'chart',
                     'trace': 'must_not_be_found',
                 },
                 'errors': [{
                     'reason': 'ReadValueTraceNotFound',
                     'message': mock.ANY,
                 }],
                 'status':
                 'failed',
                 'tries':
                 1,
                 'index':
                 attempt,
             }
             for attempt in range(10)
         },
         task_module.Evaluate(
             self.job,
             event_module.Event(type='select', target_task=None,
                                payload={}),
             evaluators.Selector(task_type='read_value')))
예제 #3
0
  def testEvaluateSuccess_MultipleHistograms(self, isolate_retrieve):

    def CreateHistogram(name):
      histogram = histogram_module.Histogram(name, 'count')
      histogram.AddSample(0)
      histogram.AddSample(1)
      histogram.AddSample(2)
      return histogram

    histograms = histogram_set.HistogramSet([
        CreateHistogram(name)
        for name in ('some_chart', 'some_chart', 'some_other_chart')
    ])
    histograms.AddSharedDiagnosticToAllHistograms(
        reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
    histograms.AddSharedDiagnosticToAllHistograms(
        reserved_infos.STORIES.name, generic_set.GenericSet(['story']))
    isolate_retrieve.side_effect = itertools.chain(
        *itertools.repeat([('{"files": {"some_benchmark/perf_results.json": '
                            '{"h": "394890891823812873798734a"}}}'),
                           json.dumps(histograms.AsDicts())], 10))
    self.PopulateTaskGraph(
        benchmark='some_benchmark',
        chart='some_chart',
        grouping_label='label',
        story='story')
    self.assertNotEqual({},
                        task_module.Evaluate(
                            self.job,
                            event_module.Event(
                                type='initiate', target_task=None, payload={}),
                            self.evaluator))
    self.assertEqual(
        {
            'read_value_chromium@aaaaaaa_%s' % (attempt,): {
                'benchmark': 'some_benchmark',
                'change': mock.ANY,
                'mode': 'histogram_sets',
                'results_filename': 'some_benchmark/perf_results.json',
                'results_path': ['some_benchmark', 'perf_results.json'],
                'histogram_options': {
                    'grouping_label': 'label',
                    'story': 'story',
                    'statistic': None,
                    'histogram_name': 'some_chart',
                },
                'graph_json_options': {
                    'chart': 'some_chart',
                    'trace': 'some_trace'
                },
                'result_values': [0, 1, 2, 0, 1, 2],
                'status': 'completed',
                'tries': 1,
                'index': attempt,
            } for attempt in range(10)
        },
        task_module.Evaluate(
            self.job,
            event_module.Event(type='select', target_task=None, payload={}),
            evaluators.Selector(task_type='read_value')))
예제 #4
0
  def testEvaluateSuccess_WithData(self, isolate_retrieve):
    # Seed the response to the call to the isolate service.
    histogram = histogram_module.Histogram('some_chart', 'count')
    histogram.AddSample(0)
    histogram.AddSample(1)
    histogram.AddSample(2)
    histograms = histogram_set.HistogramSet([histogram])
    histograms.AddSharedDiagnosticToAllHistograms(
        reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label']))
    histograms.AddSharedDiagnosticToAllHistograms(
        reserved_infos.STORIES.name, generic_set.GenericSet(['story']))
    isolate_retrieve.side_effect = itertools.chain(
        *itertools.repeat([('{"files": {"some_benchmark/perf_results.json": '
                            '{"h": "394890891823812873798734a"}}}'),
                           json.dumps(histograms.AsDicts())], 10))

    # Set it up so that we are building a graph that's looking for no statistic.
    self.PopulateTaskGraph(
        benchmark='some_benchmark',
        chart='some_chart',
        grouping_label='label',
        story='story')
    self.assertNotEqual({},
                        task_module.Evaluate(
                            self.job,
                            event_module.Event(
                                type='initiate', target_task=None, payload={}),
                            self.evaluator))

    # Ensure we find the find a value, and the histogram (?) associated with the
    # data we're looking for.
    self.assertEqual(
        {
            'read_value_chromium@aaaaaaa_%s' % (attempt,): {
                'benchmark': 'some_benchmark',
                'change': mock.ANY,
                'mode': 'histogram_sets',
                'results_filename': 'some_benchmark/perf_results.json',
                'results_path': ['some_benchmark', 'perf_results.json'],
                'histogram_options': {
                    'grouping_label': 'label',
                    'story': 'story',
                    'statistic': None,
                    'histogram_name': 'some_chart',
                },
                'graph_json_options': {
                    'chart': 'some_chart',
                    'trace': 'some_trace',
                },
                'status': 'completed',
                'result_values': [0, 1, 2],
                'tries': 1,
                'index': attempt,
            } for attempt in range(10)
        },
        task_module.Evaluate(
            self.job,
            event_module.Event(type='select', target_task=None, payload={}),
            evaluators.Selector(task_type='read_value')))
예제 #5
0
 def testEvaluateFailure_HistogramNoSamples(self, isolate_retrieve):
     histogram = histogram_module.Histogram('some_benchmark', 'count')
     histograms = histogram_set.HistogramSet([histogram])
     histograms.AddSharedDiagnosticToAllHistograms(
         reserved_infos.STORY_TAGS.name,
         generic_set.GenericSet(['group:tir_label']))
     histograms.AddSharedDiagnosticToAllHistograms(
         reserved_infos.STORIES.name,
         generic_set.GenericSet(['https://story']))
     isolate_retrieve.side_effect = itertools.chain(
         *itertools.repeat([(
             '{"files": {"some_benchmark/perf_results.json": '
             '{"h": "394890891823812873798734a"}}}'),
                            json.dumps(histograms.AsDicts())], 10))
     self.PopulateTaskGraph(benchmark='some_benchmark',
                            chart='some_chart',
                            tir_label='tir_label',
                            story='https://story')
     self.assertNotEqual({},
                         task_module.Evaluate(
                             self.job,
                             event_module.Event(type='initiate',
                                                target_task=None,
                                                payload={}),
                             self.evaluator))
     self.assertEqual(
         {
             'read_value_chromium@aaaaaaa_%s' % (attempt, ): {
                 'benchmark':
                 'some_benchmark',
                 'mode':
                 'histogram_sets',
                 'results_filename':
                 'some_benchmark/perf_results.json',
                 'histogram_options': {
                     'tir_label': 'tir_label',
                     'story': 'https://story',
                     'statistic': None,
                 },
                 'graph_json_options': {
                     'chart': 'some_chart',
                     'trace': 'some_trace'
                 },
                 'status':
                 'failed',
                 'errors': [{
                     'reason': 'ReadValueNoValues',
                     'message': mock.ANY,
                 }],
                 'tries':
                 1,
             }
             for attempt in range(10)
         },
         task_module.Evaluate(
             self.job,
             event_module.Event(type='select', target_task=None,
                                payload={}),
             evaluators.Selector(task_type='read_value')))
예제 #6
0
 def testEvaluateFailedDependency(self, *_):
     self.PopulateTaskGraph(benchmark='some_benchmark',
                            chart='chart',
                            trace='must_not_be_found',
                            mode='graph_json')
     self.assertNotEqual(
         {},
         task_module.Evaluate(
             self.job,
             event_module.Event(type='initiate',
                                target_task=None,
                                payload={}),
             evaluators.SequenceEvaluator(evaluators=(
                 evaluators.FilteringEvaluator(
                     predicate=evaluators.TaskTypeEq('find_isolate'),
                     delegate=evaluators.SequenceEvaluator(evaluators=(
                         functools.partial(FakeFoundIsolate, self.job),
                         evaluators.TaskPayloadLiftingEvaluator()))),
                 evaluators.FilteringEvaluator(
                     predicate=evaluators.TaskTypeEq('run_test'),
                     delegate=evaluators.SequenceEvaluator(evaluators=(
                         functools.partial(FakeFailedRunTest, self.job),
                         evaluators.TaskPayloadLiftingEvaluator()))),
                 read_value.Evaluator(self.job),
             ))))
     self.assertEqual(
         {
             'read_value_chromium@aaaaaaa_%s' % (attempt, ): {
                 'benchmark':
                 'some_benchmark',
                 'mode':
                 'graph_json',
                 'results_filename':
                 'some_benchmark/perf_results.json',
                 'histogram_options': {
                     'tir_label': None,
                     'story': None,
                     'statistic': None,
                 },
                 'graph_json_options': {
                     'chart': 'chart',
                     'trace': 'must_not_be_found',
                 },
                 'errors': [{
                     'reason': 'DependencyFailed',
                     'message': mock.ANY,
                 }],
                 'status':
                 'failed',
                 'tries':
                 1,
             }
             for attempt in range(10)
         },
         task_module.Evaluate(
             self.job,
             event_module.Event(type='select', target_task=None,
                                payload={}),
             evaluators.Selector(task_type='read_value')))
예제 #7
0
 def testEvaluateFailure_HistogramNoValues(self, isolate_retrieve):
     isolate_retrieve.side_effect = itertools.chain(*itertools.repeat(
         [('{"files": {"some_benchmark/perf_results.json": '
           '{"h": "394890891823812873798734a"}}}'),
          json.dumps(
              histogram_set.HistogramSet([
                  histogram_module.Histogram('some_benchmark', 'count')
              ]).AsDicts())], 10))
     self.PopulateTaskGraph(benchmark='some_benchmark',
                            chart='some_chart',
                            grouping_label='label',
                            story='https://story')
     self.assertNotEqual({},
                         task_module.Evaluate(
                             self.job,
                             event_module.Event(type='initiate',
                                                target_task=None,
                                                payload={}),
                             self.evaluator))
     self.assertEqual(
         {
             'read_value_chromium@aaaaaaa_%s' % (attempt, ): {
                 'benchmark':
                 'some_benchmark',
                 'change':
                 mock.ANY,
                 'mode':
                 'histogram_sets',
                 'results_filename':
                 'some_benchmark/perf_results.json',
                 'histogram_options': {
                     'grouping_label': 'label',
                     'story': 'https://story',
                     'statistic': None,
                     'histogram_name': 'some_chart',
                 },
                 'graph_json_options': {
                     'chart': 'some_chart',
                     'trace': 'some_trace',
                 },
                 'status':
                 'failed',
                 'errors': [{
                     'reason': 'ReadValueNotFound',
                     'message': mock.ANY,
                 }],
                 'tries':
                 1,
                 'index':
                 attempt,
             }
             for attempt in range(10)
         },
         task_module.Evaluate(
             self.job,
             event_module.Event(type='select', target_task=None,
                                payload={}),
             evaluators.Selector(task_type='read_value')))
예제 #8
0
 def testUpdate_BuildFailed_HardFailure(self, get_build_status):
   get_build_status.return_value = {
       'build': {
           'status': 'COMPLETED',
           'result': 'FAILURE',
           'result_details_json': '{}',
       }
   }
   self.assertDictEqual(
       {
           'find_isolate_chromium@7c7e90be': {
               'bucket': 'luci.bucket',
               'buildbucket_result': {
                   'build': {
                       'id': '345982437987234'
                   },
               },
               'buildbucket_job_status': mock.ANY,
               'builder': 'Mac Builder',
               'build_url': mock.ANY,
               'change': mock.ANY,
               'status': 'failed',
               'target': 'telemetry_perf_tests',
               'errors': [mock.ANY],
               'tries': 1,
           },
       },
       task_module.Evaluate(
           self.job,
           event_module.Event(
               type='update',
               target_task='find_isolate_chromium@7c7e90be',
               payload={'status': 'build_completed'}),
           find_isolate.Evaluator(self.job)))
   self.assertEqual(1, get_build_status.call_count)
   self.assertEqual(
       {
           'find_isolate_chromium@7c7e90be': {
               'completed':
                   True,
               'exception':
                   mock.ANY,
               'details': [{
                   'key': 'builder',
                   'value': 'Mac Builder',
                   'url': None,
               }, {
                   'key': 'build',
                   'value': '345982437987234',
                   'url': mock.ANY,
               }]
           }
       },
       task_module.Evaluate(
           self.job,
           event_module.Event(
               type='unimportant', target_task=None, payload={}),
           find_isolate.Serializer()))
예제 #9
0
    def testEvaluateSuccess_NeedToRefineAttempts(self):
        self.PopulateSimpleBisectionGraph()
        task_module.Evaluate(
            self.job,
            event_module.Event(type='initiate', target_task=None, payload={}),
            self.CompoundEvaluatorForTesting(
                FakeReadValueMapResult(
                    self.job, {
                        change_module.Change.FromDict({
                            'commits': [{
                                'repository': 'chromium',
                                'git_hash': commit
                            }]
                        }): values
                        for commit, values in (
                            ('commit_0', range(10)),
                            ('commit_1', range(10)),
                            ('commit_2', range(4, 14)),
                            ('commit_3', range(3, 13)),
                            ('commit_4', range(3, 13)),
                            ('commit_5', range(3, 13)),
                        )
                    })))

        # Here we test that we have more than the minimum attempts for the change
        # between commit_1 and commit_2.
        evaluate_result = task_module.Evaluate(
            self.job, SelectEvent(),
            evaluators.Selector(task_type='read_value'))
        attempt_counts = {}
        for payload in evaluate_result.values():
            change = change_module.Change.FromDict(payload.get('change'))
            attempt_counts[change] = attempt_counts.get(change, 0) + 1
        self.assertGreater(
            attempt_counts[change_module.Change.FromDict({
                'commits': [{
                    'repository': 'chromium',
                    'git_hash': 'commit_2',
                }]
            })], 10)
        self.assertLess(
            attempt_counts[change_module.Change.FromDict({
                'commits': [{
                    'repository': 'chromium',
                    'git_hash': 'commit_2',
                }]
            })], 100)

        # We know that we will never get a deterministic answer, so we ensure that
        # we don't inadvertently blame the wrong changes at the end of the
        # refinement.
        evaluate_result = task_module.Evaluate(
            self.job, SelectEvent(),
            evaluators.Selector(task_type='find_culprit'))
        self.assertIn('performance_bisection', evaluate_result)
        logging.info('Results: %s', evaluate_result['performance_bisection'])
        self.assertEquals(evaluate_result['performance_bisection']['culprits'],
                          [])
    def testEvaluateSuccess_NeedToRefineAttempts(self):
        self.PopulateSimpleBisectionGraph(self.job)
        task_module.Evaluate(
            self.job,
            event_module.Event(type='initiate', target_task=None, payload={}),
            self.BisectionEvaluatorForTesting(
                bisection_test_util.FakeReadValueMapResult(
                    self.job, {
                        change_module.Change.FromDict({
                            'commits': [{
                                'repository': 'chromium',
                                'git_hash': commit
                            }]
                        }): values
                        for commit, values in (
                            ('commit_0', range(10)),
                            ('commit_1', range(1, 11)),
                            ('commit_2', range(2, 12)),
                            ('commit_3', range(3, 13)),
                            ('commit_4', range(3, 13)),
                            ('commit_5', range(3, 13)),
                        )
                    })))

        # Here we test that we have more than the minimum attempts for the change
        # between commit_1 and commit_2.
        evaluate_result = task_module.Evaluate(
            self.job, bisection_test_util.SelectEvent(),
            evaluators.Selector(task_type='read_value'))
        attempt_counts = {}
        for payload in evaluate_result.values():
            change = change_module.Change.FromDict(payload.get('change'))
            attempt_counts[change] = attempt_counts.get(change, 0) + 1
        self.assertGreater(
            attempt_counts[change_module.Change.FromDict({
                'commits': [{
                    'repository': 'chromium',
                    'git_hash': 'commit_2',
                }]
            })], 10)
        self.assertLess(
            attempt_counts[change_module.Change.FromDict({
                'commits': [{
                    'repository': 'chromium',
                    'git_hash': 'commit_2',
                }]
            })], 100)

        # We know that we will refine the graph until we see the progression from
        # commit_0 -> commit_1 -> commit_2 -> commit_3 and stabilize.
        evaluate_result = task_module.Evaluate(
            self.job, bisection_test_util.SelectEvent(),
            evaluators.Selector(task_type='find_culprit'))
        self.assertIn('performance_bisection', evaluate_result)
        self.assertEquals(evaluate_result['performance_bisection']['culprits'],
                          [mock.ANY, mock.ANY, mock.ANY])
예제 #11
0
 def testEvaluateFailure_DependenciesFailed(self):
   self.PopulateSimpleBisectionGraph()
   task_module.Evaluate(
       self.job,
       event_module.Event(type='initiate', target_task=None, payload={}),
       self.CompoundEvaluatorForTesting(FakeReadValueFails(self.job)))
   evaluate_result = task_module.Evaluate(
       self.job, SelectEvent(), evaluators.Selector(task_type='find_culprit'))
   self.assertIn('performance_bisection', evaluate_result)
   self.assertEqual(evaluate_result['performance_bisection']['status'],
                    'failed')
   self.assertNotEqual([], evaluate_result['performance_bisection']['errors'])
예제 #12
0
 def testEvaluateFailure_GraphJsonMissingFile(self, isolate_retrieve):
     isolate_retrieve.return_value = '{"files": {}}'
     self.PopulateTaskGraph(benchmark='some_benchmark',
                            chart='chart',
                            trace='trace',
                            mode='graph_json')
     self.assertNotEqual({},
                         task_module.Evaluate(
                             self.job,
                             event_module.Event(type='initiate',
                                                target_task=None,
                                                payload={}),
                             self.evaluator))
     self.assertEqual(
         {
             'read_value_chromium@aaaaaaa_%s' % (attempt, ): {
                 'benchmark':
                 'some_benchmark',
                 'change':
                 mock.ANY,
                 'mode':
                 'graph_json',
                 'results_filename':
                 'some_benchmark/perf_results.json',
                 'histogram_options': {
                     'grouping_label': None,
                     'story': None,
                     'statistic': None,
                     'histogram_name': 'chart',
                 },
                 'graph_json_options': {
                     'chart': 'chart',
                     'trace': 'trace',
                 },
                 'errors': [{
                     'reason': 'ReadValueNoFile',
                     'message': mock.ANY,
                 }],
                 'status':
                 'failed',
                 'tries':
                 1,
                 'index':
                 attempt,
             }
             for attempt in range(10)
         },
         task_module.Evaluate(
             self.job,
             event_module.Event(type='select', target_task=None,
                                payload={}),
             evaluators.Selector(task_type='read_value')))
 def testEvaluateSuccess_NoReproduction(self):
   self.PopulateSimpleBisectionGraph(self.job)
   task_module.Evaluate(
       self.job,
       event_module.Event(type='initiate', target_task=None, payload={}),
       self.BisectionEvaluatorForTesting(
           bisection_test_util.FakeReadValueSameResult(self.job, 1.0)))
   evaluate_result = task_module.Evaluate(
       self.job,
       event_module.Event(type='select', target_task=None, payload={}),
       evaluators.Selector(task_type='find_culprit'))
   self.assertIn('performance_bisection', evaluate_result)
   logging.info('Results: %s', evaluate_result['performance_bisection'])
   self.assertEquals(evaluate_result['performance_bisection']['culprits'], [])
예제 #14
0
 def testEvaluateStateTransitionProgressions(self):
     self.assertDictEqual(
         {
             'task_0': 'ongoing',
             'task_1': 'pending',
             'task_2': 'ongoing'
         },
         task_module.Evaluate(
             self.job, {
                 'target': 'task_0',
                 'current_state': 'pending',
                 'new_state': 'ongoing'
             }, functools.partial(TransitionEvaluator, self.job)))
     self.assertDictEqual(
         {
             'task_0': 'ongoing',
             'task_1': 'ongoing',
             'task_2': 'ongoing'
         },
         task_module.Evaluate(
             self.job, {
                 'target': 'task_1',
                 'current_state': 'pending',
                 'new_state': 'ongoing'
             }, functools.partial(TransitionEvaluator, self.job)))
     self.assertDictEqual(
         {
             'task_0': 'completed',
             'task_1': 'ongoing',
             'task_2': 'ongoing'
         },
         task_module.Evaluate(
             self.job, {
                 'target': 'task_0',
                 'current_state': 'ongoing',
                 'new_state': 'completed'
             }, functools.partial(TransitionEvaluator, self.job)))
     self.assertDictEqual(
         {
             'task_0': 'completed',
             'task_1': 'completed',
             'task_2': 'completed'
         },
         task_module.Evaluate(
             self.job, {
                 'target': 'task_1',
                 'current_state': 'ongoing',
                 'new_state': 'completed'
             }, functools.partial(TransitionEvaluator, self.job)))
예제 #15
0
    def testInitiate_FoundIsolate(self, *_):
        # Seed the isolate for this change.
        change = change_module.Change(
            commits=[change_module.Commit('chromium', '7c7e90be')])
        isolate.Put((('Mac Builder', change, 'telemetry_perf_tests',
                      'https://isolate.server', '7c7e90be'), ))

        # Then ensure that we can find the seeded isolate for the specified
        # revision.
        self.assertDictEqual(
            {
                'find_isolate_chromium@7c7e90be': {
                    'bucket': 'luci.bucket',
                    'builder': 'Mac Builder',
                    'change': mock.ANY,
                    'isolate_hash': '7c7e90be',
                    'isolate_server': 'https://isolate.server',
                    'status': 'completed',
                    'target': 'telemetry_perf_tests',
                },
            },
            task_module.Evaluate(
                self.job,
                event_module.Event(
                    type='initiate',
                    target_task='find_isolate_chromium@7c7e90be',
                    payload={}), find_isolate.Evaluator(self.job)))
예제 #16
0
파일: job.py 프로젝트: malinka007/catapult
    def Start(self):
        """Starts the Job and updates it in the Datastore.

    This method is designed to return fast, so that Job creation is responsive
    to the user. It schedules the Job on the task queue without running
    anything. It also posts a bug comment, and updates the Datastore.
    """
        if self.use_execution_engine:
            # Treat this as if it's a poll, and run the handler here.
            try:
                task_module.Evaluate(
                    self,
                    event_module.Event(type='initiate',
                                       target_task=None,
                                       payload={}),
                    task_evaluator.ExecutionEngine(self)),
            except task_module.Error as error:
                logging.error('Failed: %s', error)
                self.Fail()
                self.put()
                return
        else:
            self._Schedule()
        self.started = True
        self.started_time = datetime.datetime.now()
        self.put()

        title = _ROUND_PUSHPIN + ' Pinpoint job started.'
        comment = '\n'.join((title, self.url))
        deferred.defer(_PostBugCommentDeferred,
                       self.bug_id,
                       comment,
                       send_email=True,
                       _retry_options=RETRY_OPTIONS)
예제 #17
0
    def testPopulateAndEvaluateAdderGraph(self):
        job = job_module.Job.New((), ())
        task_graph = task_module.TaskGraph(
            vertices=[
                task_module.TaskVertex(id='input0',
                                       vertex_type='constant',
                                       payload={'value': 0}),
                task_module.TaskVertex(id='input1',
                                       vertex_type='constant',
                                       payload={'value': 1}),
                task_module.TaskVertex(id='plus',
                                       vertex_type='operator+',
                                       payload={}),
            ],
            edges=[
                task_module.Dependency(from_='plus', to='input0'),
                task_module.Dependency(from_='plus', to='input1'),
            ],
        )
        task_module.PopulateTaskGraph(job, task_graph)

        def AdderEvaluator(task, _, accumulator):
            if task.task_type == 'constant':
                accumulator[task.id] = task.payload.get('value', 0)
            elif task.task_type == 'operator+':
                inputs = [accumulator.get(dep) for dep in task.dependencies]
                accumulator[task.id] = functools.reduce(
                    lambda a, v: a + v, inputs)

        accumulator = task_module.Evaluate(job, {}, AdderEvaluator)
        self.assertEqual(1, accumulator.get('plus'))
예제 #18
0
 def testPopulateEmptyGraph(self):
     job = job_module.Job.New((), ())
     task_graph = task_module.TaskGraph(vertices=[], edges=[])
     task_module.PopulateTaskGraph(job, task_graph)
     evaluator = mock.MagicMock()
     evaluator.assert_not_called()
     task_module.Evaluate(job, 'test', evaluator)
예제 #19
0
    def testPopulateCycles(self):
        job = job_module.Job.New((), ())
        task_graph = task_module.TaskGraph(
            vertices=[
                task_module.TaskVertex(id='node_0',
                                       vertex_type='process',
                                       payload={}),
                task_module.TaskVertex(id='node_1',
                                       vertex_type='process',
                                       payload={})
            ],
            edges=[
                task_module.Dependency(from_='node_0', to='node_1'),
                task_module.Dependency(from_='node_1', to='node_0')
            ])
        task_module.PopulateTaskGraph(job, task_graph)
        calls = {}

        def CycleEvaluator(task, event, accumulator):
            logging.debug('Evaluate(%s, %s, %s) called.', task.id, event,
                          accumulator)
            calls[task.id] = calls.get(task.id, 0) + 1
            return None

        task_module.Evaluate(job, 'test', CycleEvaluator)
        self.assertDictEqual({'node_0': 1, 'node_1': 1}, calls)
예제 #20
0
 def testMissingDependency(self):
     job = job_module.Job.New((), ())
     task_module.PopulateTaskGraph(
         job,
         task_module.TaskGraph(vertices=[
             task_module.TaskVertex(id='run_test_bbbbbbb_0',
                                    vertex_type='run_test',
                                    payload={
                                        'swarming_server': 'some_server',
                                        'dimensions': DIMENSIONS,
                                        'extra_args': [],
                                    }),
         ],
                               edges=[]))
     self.assertEqual(
         {
             'run_test_bbbbbbb_0': {
                 'errors': [{
                     'cause': 'DependencyError',
                     'message': mock.ANY
                 }]
             }
         },
         task_module.Evaluate(
             job,
             event_module.Event(type='validate',
                                target_task=None,
                                payload={}), run_test.Validator()))
예제 #21
0
    def testPopulateEvaluateCallCounts(self):
        job = job_module.Job.New((), ())
        task_module.PopulateTaskGraph(
            job,
            task_module.TaskGraph(vertices=[
                task_module.TaskVertex(id='leaf_0',
                                       vertex_type='node',
                                       payload={}),
                task_module.TaskVertex(id='leaf_1',
                                       vertex_type='node',
                                       payload={}),
                task_module.TaskVertex(id='parent',
                                       vertex_type='node',
                                       payload={}),
            ],
                                  edges=[
                                      task_module.Dependency(from_='parent',
                                                             to='leaf_0'),
                                      task_module.Dependency(from_='parent',
                                                             to='leaf_1'),
                                  ]))
        calls = {}

        def CallCountEvaluator(task, event, accumulator):
            logging.debug('Evaluate(%s, %s, %s) called.', task.id, event,
                          accumulator)
            calls[task.id] = calls.get(task.id, 0) + 1
            return None

        task_module.Evaluate(job, 'test', CallCountEvaluator)
        self.assertDictEqual({
            'leaf_0': 1,
            'leaf_1': 1,
            'parent': 1,
        }, calls)
예제 #22
0
    def testEvaluateFailedDependency(self, *_):
        evaluator = evaluators.SequenceEvaluator(evaluators=(
            evaluators.FilteringEvaluator(
                predicate=evaluators.TaskTypeEq('find_isolate'),
                delegate=evaluators.SequenceEvaluator(evaluators=(
                    functools.partial(FakeFindIsolateFailed, self.job),
                    evaluators.TaskPayloadLiftingEvaluator()))),
            run_test.Evaluator(self.job),
        ))

        # When we initiate the run_test tasks, we should immediately see the tasks
        # failing because the dependency has a hard failure status.
        self.assertEqual(
            dict([('build_aaaaaaa', mock.ANY)] +
                 [('run_test_aaaaaaa_%s' % (attempt, ), {
                     'status': 'failed',
                     'errors': mock.ANY,
                     'dimensions': DIMENSIONS,
                     'extra_args': [],
                     'swarming_server': 'some_server',
                 }) for attempt in range(11)]),
            task_module.Evaluate(
                self.job,
                event_module.Event(type='initiate',
                                   target_task=None,
                                   payload={}), evaluator))
예제 #23
0
 def testEvaluatePendingDependency(self, *_):
     # Ensure that tasks stay pending in the event of an update.
     self.assertEqual(
         dict([('build_aaaaaaa', {
             'builder': 'Some Builder',
             'target': 'telemetry_perf_tests',
             'bucket': 'luci.bucket',
             'change': {
                 'commits': [{
                     'repository': 'chromium',
                     'git_hash': 'aaaaaaa',
                 }]
             },
             'status': 'pending',
         })] + [('run_test_aaaaaaa_%s' % (attempt, ), {
             'status': 'pending',
             'dimensions': DIMENSIONS,
             'extra_args': [],
             'swarming_server': 'some_server',
         }) for attempt in range(11)]),
         task_module.Evaluate(
             self.job,
             event_module.Event(type='update',
                                target_task=None,
                                payload={
                                    'kind': 'synthetic',
                                    'action': 'poll'
                                }), run_test.Evaluator(self.job)))
예제 #24
0
  def testInitiate_ScheduleBuild(self, put, _):
    # We then need to make sure that the buildbucket put was called.
    put.return_value = {'build': {'id': '345982437987234'}}

    # This time we don't seed the isolate for the change to force the build.
    self.assertDictEqual(
        {
            'build_7c7e90be': {
                'isolate_server': None,
                'isolate_hash': None,
                'buildbucket_result': {
                    'build': {
                        'id': '345982437987234'
                    },
                },
                'buildbucket_job_status': None,
                'status': 'ongoing',
            },
        },
        task_module.Evaluate(
            self.job,
            find_isolate.BuildEvent(
                type='initiate', target_task='build_7c7e90be', payload={}),
            find_isolate.Evaluator(self.job)))
    self.assertEqual(1, put.call_count)
예제 #25
0
  def setUp(self):
    super(FindIsolateEvaluatorUpdateTests, self).setUp()

    # Here we set up the pre-requisite for polling, where we've already had a
    # successful build scheduled.
    with mock.patch('dashboard.services.buildbucket_service.Put') as put:
      put.return_value = {'build': {'id': '345982437987234'}}
      self.assertDictEqual(
          {
              'build_7c7e90be': {
                  'isolate_server': None,
                  'isolate_hash': None,
                  'buildbucket_result': {
                      'build': {
                          'id': '345982437987234'
                      }
                  },
                  'buildbucket_job_status': None,
                  'status': 'ongoing',
              },
          },
          task_module.Evaluate(
              self.job,
              find_isolate.BuildEvent(
                  type='initiate', target_task='build_7c7e90be', payload={}),
              find_isolate.Evaluator(self.job)))
      self.assertEqual(1, put.call_count)
예제 #26
0
 def testUpdate_BuildFailed_Cancelled(self, get_build_status):
   get_build_status.return_value = {
       'build': {
           'status': 'COMPLETED',
           'result': 'CANCELLED',
           'result_details_json': '{}',
       }
   }
   self.assertDictEqual(
       {
           'build_7c7e90be': {
               'isolate_server': None,
               'isolate_hash': None,
               'buildbucket_result': {
                   'build': {
                       'id': '345982437987234'
                   }
               },
               'buildbucket_job_status': {
                   'status': 'COMPLETED',
                   'result': 'CANCELLED',
                   'result_details_json': '{}',
               },
               'status': 'cancelled',
           },
       },
       task_module.Evaluate(
           self.job,
           find_isolate.BuildEvent(
               type='update',
               target_task='build_7c7e90be',
               payload={'status': 'build_completed'}),
           find_isolate.Evaluator(self.job)))
   self.assertEqual(1, get_build_status.call_count)
예제 #27
0
    def setUp(self):
        super(FindIsolateEvaluatorUpdateTests, self).setUp()

        # Here we set up the pre-requisite for polling, where we've already had a
        # successful build scheduled.
        with mock.patch('dashboard.services.buildbucket_service.Put') as put:
            put.return_value = {'build': {'id': '345982437987234'}}
            self.assertDictEqual(
                {
                    'find_isolate_chromium@7c7e90be': {
                        'buildbucket_result': {
                            'build': {
                                'id': '345982437987234'
                            }
                        },
                        'status': 'ongoing',
                        'builder': 'Mac Builder',
                        'bucket': 'luci.bucket',
                        'change': mock.ANY,
                        'target': 'telemetry_perf_tests',
                        'tries': 1,
                    },
                },
                task_module.Evaluate(
                    self.job,
                    event_module.Event(
                        type='initiate',
                        target_task='find_isolate_chromium@7c7e90be',
                        payload={}), find_isolate.Evaluator(self.job)))
            self.assertEqual(1, put.call_count)
예제 #28
0
 def testUpdate_MissingIsolates_InvalidJson(self, get_build_status):
   json = '{ invalid }'
   get_build_status.return_value = {
       'build': {
           'status': 'COMPLETED',
           'result': 'SUCCESS',
           'result_details_json': json,
       }
   }
   self.assertDictEqual(
       {
           'build_7c7e90be': {
               'isolate_server': None,
               'isolate_hash': None,
               'buildbucket_result': {
                   'build': {
                       'id': '345982437987234'
                   }
               },
               'buildbucket_job_status': mock.ANY,
               'status': 'failed',
               'errors': mock.ANY,
           },
       },
       task_module.Evaluate(
           self.job,
           find_isolate.BuildEvent(
               type='update',
               target_task='build_7c7e90be',
               payload={'status': 'build_completed'}),
           find_isolate.Evaluator(self.job)))
   self.assertEqual(1, get_build_status.call_count)
예제 #29
0
 def testUpdate_MissingIsolates_InvalidJson(self, get_build_status):
     json = '{ invalid }'
     get_build_status.return_value = {
         'build': {
             'status': 'COMPLETED',
             'result': 'SUCCESS',
             'result_details_json': json,
         }
     }
     self.assertDictEqual(
         {
             'find_isolate_chromium@7c7e90be': {
                 'bucket': 'luci.bucket',
                 'buildbucket_result': {
                     'build': {
                         'id': '345982437987234'
                     }
                 },
                 'buildbucket_job_status': mock.ANY,
                 'builder': 'Mac Builder',
                 'change': mock.ANY,
                 'status': 'failed',
                 'errors': mock.ANY,
                 'target': 'telemetry_perf_tests',
                 'tries': 1,
             },
         },
         task_module.Evaluate(
             self.job,
             event_module.Event(
                 type='update',
                 target_task='find_isolate_chromium@7c7e90be',
                 payload={'status': 'build_completed'}),
             find_isolate.Evaluator(self.job)))
     self.assertEqual(1, get_build_status.call_count)
예제 #30
0
    def testInitiate_ScheduleBuild(self, put, _):
        # We then need to make sure that the buildbucket put was called.
        put.return_value = {'build': {'id': '345982437987234'}}

        # This time we don't seed the isolate for the change to force the build.
        self.assertDictEqual(
            {
                'find_isolate_chromium@7c7e90be': {
                    'bucket': 'luci.bucket',
                    'buildbucket_result': {
                        'build': {
                            'id': '345982437987234'
                        },
                    },
                    'builder': 'Mac Builder',
                    'change': mock.ANY,
                    'status': 'ongoing',
                    'target': 'telemetry_perf_tests',
                    'tries': 1,
                },
            },
            task_module.Evaluate(
                self.job,
                event_module.Event(
                    type='initiate',
                    target_task='find_isolate_chromium@7c7e90be',
                    payload={}), find_isolate.Evaluator(self.job)))
        self.assertEqual(1, put.call_count)