def testNotUpdateBug(self, issue_tracker):
        analysis_not_completed = MasterFlakeAnalysis.Create(
            'm', 'b', 1, 's', 't')
        analysis_not_completed.status = analysis_status.RUNNING
        analysis_without_bug = MasterFlakeAnalysis.Create(
            'm', 'b', 1, 's', 't')
        analysis_without_bug.status = analysis_status.COMPLETED
        analysis_config_not_to_update = MasterFlakeAnalysis.Create(
            'm', 'b', 1, 's', 't')
        analysis_config_not_to_update.status = analysis_status.COMPLETED
        analysis_config_not_to_update.bug_id = 123
        analysis_config_not_to_update.data_points = [
            DataPoint(), DataPoint(), DataPoint()
        ]
        analysis_config_not_to_update.algorithm_parameters = {
            'update_monorail_bug': False,
        }
        analysis_without_enough_data_points = MasterFlakeAnalysis.Create(
            'm', 'b', 1, 's', 't')
        analysis_without_enough_data_points.status = analysis_status.COMPLETED
        analysis_without_enough_data_points.bug_id = 123
        analysis_without_enough_data_points.algorithm_parameters = {
            'update_monorail_bug': True,
        }
        analysis_without_enough_data_points.data_points = [DataPoint()]

        analyses = [
            analysis_not_completed, analysis_without_bug,
            analysis_config_not_to_update, analysis_without_enough_data_points
        ]
        for analysis in analyses:
            analysis.put()
            pipeline = update_flake_bug_pipeline.UpdateFlakeBugPipeline()
            self.assertFalse(pipeline.run(analysis.key.urlsafe()))
        issue_tracker.assert_not_called()
 def testSplitIndexNotExists(self, mocked_steppiness):
     data_points = [
         DataPoint(build_number=99, pass_rate=0.95),
         DataPoint(build_number=100, pass_rate=0.9),
     ]
     self.assertRaises(AssertionError, confidence._Steppiness, data_points,
                       lambda x: x.build_number, 90)
     mocked_steppiness.assert_not_called()
 def testShouldUpdateBugForAnalysisFalse(self):
     analysis = MasterFlakeAnalysis.Create('m', 'b', 1, 's', 't')
     analysis.status = analysis_status.COMPLETED
     analysis.bug_id = 123
     analysis.data_points = [DataPoint(), DataPoint(), DataPoint()]
     analysis.suspected_flake_build_number = 1
     analysis.algorithm_parameters = {'update_monorail_bug': False}
     self.assertFalse(
         update_flake_bug_pipeline._ShouldUpdateBugForAnalysis(analysis))
 def testNotEnoughData(self, mocked_steppiness):
     data_points = [
         DataPoint(build_number=99, pass_rate=0.95),
         DataPoint(build_number=100, pass_rate=0.9),
     ]
     steppiness = confidence._Steppiness(data_points,
                                         lambda x: x.build_number, 100)
     self.assertEqual(0, steppiness)
     mocked_steppiness.assert_not_called()
Beispiel #5
0
    def testGetDictOfCommitPositionAndRevision(self):
        data_point = DataPoint()
        data_point.blame_list = ['r1', 'r2', 'r3']
        data_point.commit_position = 100

        expected_CLs = {100: 'r3', 99: 'r2', 98: 'r1'}

        self.assertEqual(expected_CLs,
                         data_point.GetDictOfCommitPositionAndRevision())
 def testPaddingDataPoints(self, mocked_steppiness):
     data_points = [
         DataPoint(build_number=99, pass_rate=-1),
         DataPoint(build_number=100, pass_rate=0.5),
     ]
     mocked_steppiness.side_effect = [1]
     steppiness = confidence._Steppiness(data_points,
                                         lambda x: x.build_number, 100)
     self.assertEqual(1, steppiness)
     mocked_steppiness.assert_called_once_with([1, 1, 1, 1, 0.5], 4)
Beispiel #7
0
    def testGetDataPointOfSuspectedBuild(self):
        expected_build_number = 123
        data_point = DataPoint()
        data_point.build_number = expected_build_number

        analysis = MasterFlakeAnalysis.Create('m', 'b', 125, 's', 't')
        analysis.suspected_flake_build_number = expected_build_number
        analysis.data_points.append(data_point)

        suspected_data_point = analysis.GetDataPointOfSuspectedBuild()
        self.assertEqual(expected_build_number,
                         suspected_data_point.build_number)
Beispiel #8
0
    def testGetDataPointOfSuspectedBuildNoDataPoint(self):
        # This scenario should not happen.
        expected_build_number = 123
        unexpected_build_number = 124
        data_point = DataPoint()
        data_point.build_number = expected_build_number

        analysis = MasterFlakeAnalysis.Create('m', 'b', 125, 's', 't')
        analysis.suspected_flake_build_number = unexpected_build_number
        analysis.data_points.append(data_point)

        self.assertIsNone(analysis.GetDataPointOfSuspectedBuild())
 def testCommitPosition(self, mocked_steppiness):
     data_points = [
         DataPoint(commit_position=90, pass_rate=1),
         DataPoint(commit_position=94, pass_rate=1),
         DataPoint(commit_position=97, pass_rate=1),
         DataPoint(commit_position=99, pass_rate=1),
         DataPoint(commit_position=100, pass_rate=1),
     ]
     mocked_steppiness.side_effect = [0]
     steppiness = confidence.SteppinessForCommitPosition(data_points, 99)
     self.assertEqual(0, steppiness)
     mocked_steppiness.assert_called_once_with([1, 1, 1, 1, 1], 3)
 def testShouldUpdateBugForAnalysis(self):
     analysis = MasterFlakeAnalysis.Create('m', 'b', 1, 's', 't')
     analysis.status = analysis_status.COMPLETED
     analysis.bug_id = 123
     analysis.data_points = [DataPoint(), DataPoint(), DataPoint()]
     analysis.suspected_flake_build_number = 1
     analysis.algorithm_parameters = {
         'update_monorail_bug': True,
         'minimum_confidence_score_to_run_tryjobs': 0.6
     }
     analysis.confidence_in_suspected_build = 0.7
     self.assertTrue(
         update_flake_bug_pipeline._ShouldUpdateBugForAnalysis(analysis))
  def testGetSuspectedFlakeInfo(self):
    analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
    analysis.suspected_flake_build_number = 123
    data_point = DataPoint()
    data_point.build_number = 123
    data_point.pass_rate = 0.9
    data_point.commit_position = 2
    data_point.git_hash = 'git_hash_2'
    data_point.previous_build_commit_position = 1
    data_point.previous_build_git_hash = 'git_hash_1'
    analysis.data_points.append(data_point)
    analysis.confidence_in_suspected_build = 0
    analysis.Save()

    expected_result = {
        'confidence': 0,
        'build_number': analysis.suspected_flake_build_number,
        'commit_position': 2,
        'git_hash': 'git_hash_2',
        'lower_bound_commit_position': 1,
        'lower_bound_git_hash': 'git_hash_1',
        'triage_result': 0
    }
    self.assertEqual(expected_result,
                     check_flake._GetSuspectedFlakeInfo(analysis))
    def testNoUpdateIfBugDeleted(self, issue_tracker):
        analysis = MasterFlakeAnalysis.Create('m', 'b', 1, 's', 't')
        analysis.status = analysis_status.COMPLETED
        analysis.suspected_flake_build_number = 1
        analysis.confidence_in_suspected_build = 0.7
        analysis.bug_id = 123
        analysis.algorithm_parameters = {'update_monorail_bug': True}
        analysis.data_points = [DataPoint(), DataPoint(), DataPoint()]
        analysis.put()

        mocked_instance = mock.Mock()
        mocked_instance.getIssue.return_value = None
        issue_tracker.return_value = mocked_instance
        pipeline = update_flake_bug_pipeline.UpdateFlakeBugPipeline()
        self.assertFalse(pipeline.run(analysis.key.urlsafe()))
        issue_tracker.assert_has_calls(
            [mock.call('chromium'),
             mock.call().getIssue(123)])
Beispiel #13
0
    def testGetCommitPosition(self):
        data_point = DataPoint()
        data_point.blame_list = ['r1', 'r2', 'r3']
        data_point.commit_position = 100
        data_point.previous_build_commit_position = 97

        self.assertEqual(98, data_point.GetCommitPosition('r1'))
        self.assertEqual(99, data_point.GetCommitPosition('r2'))
        self.assertEqual(100, data_point.GetCommitPosition('r3'))
Beispiel #14
0
    def testGetRevisionAtCommitPosition(self):
        data_point = DataPoint()
        data_point.blame_list = ['r1', 'r2', 'r3']
        data_point.commit_position = 100

        self.assertEqual('r1', data_point.GetRevisionAtCommitPosition(98))
        self.assertEqual('r2', data_point.GetRevisionAtCommitPosition(99))
        self.assertEqual('r3', data_point.GetRevisionAtCommitPosition(100))
  def testGetNumbersOfDataPointGroups(self):
    data_point1 = DataPoint()
    data_point1.try_job_url = 'try_job_url'

    data_point2 = DataPoint()
    data_point2.build_number = 1

    data_points = [data_point1, data_point2]
    self.assertEqual((1, 1),
                     check_flake._GetNumbersOfDataPointGroups(data_points))
 def testBuildNumber(self, mocked_steppiness):
     data_points = [
         DataPoint(build_number=90, pass_rate=1),
         DataPoint(build_number=94, pass_rate=1),
         DataPoint(build_number=94, pass_rate=0.5, try_job_url='http://'),
         DataPoint(build_number=97, pass_rate=1),
         DataPoint(build_number=99, pass_rate=1),
         DataPoint(build_number=100, pass_rate=1),
     ]
     mocked_steppiness.side_effect = [0]
     steppiness = confidence.SteppinessForBuild(data_points, 99)
     self.assertEqual(0, steppiness)
     mocked_steppiness.assert_called_once_with([1, 1, 1, 1, 1], 3)
Beispiel #17
0
    def testReset(self):
        analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 't')
        analysis.swarming_rerun_results = [{}]
        analysis.status = analysis_status.RUNNING
        analysis.correct_regression_range = True
        analysis.correct_culprit = False
        analysis.correct_culprit = None
        analysis.data_points = [DataPoint()]
        analysis.suspected_flake_build_number = 123
        analysis.culprit = FlakeCulprit.Create('r', 'a1b2c3d4', 12345, 'url')
        analysis.try_job_status = analysis_status.COMPLETED
        analysis.Reset()

        self.assertEqual([], analysis.swarming_rerun_results)
        self.assertEqual(analysis_status.PENDING, analysis.status)
        self.assertIsNone(analysis.correct_regression_range)
        self.assertIsNone(analysis.correct_culprit)
        self.assertIsNone(analysis.suspected_flake_build_number)
        self.assertEqual([], analysis.data_points)
        self.assertIsNone(analysis.culprit)
        self.assertIsNone(analysis.try_job_status)
  def run(self, revision, commit_position, try_job_result, urlsafe_try_job_key,
          urlsafe_flake_analysis_key):
    """Extracts pass rate information and updates flake analysis.

    Args:
      revision (str): The git hash the try job was run against.
      commit_position (int): The commit position corresponding to |revision|.
      try_job_result (dict): The result dict reported by buildbucket.
          Example:
          {
              'metadata': {},
              'result': {
                  'cafed52c5f3313646b8e04e05601b5cb98f305b3': {
                      'browser_tests': {
                          'status': 'failed',
                          'failures': ['TabCaptureApiTest.FullscreenEvents'],
                          'valid': True,
                          'pass_fail_counts': {
                              'TabCaptureApiTest.FullscreenEvents': {
                                  'pass_count': 28,
                                  'fail_count': 72
                              }
                          },
                          'step_metadata': {
                              'task_ids': [],
                              ...
                          }
                      }
                  }
              }
          }
      urlsafe_try_job_key (str): The urlsafe key to the corresponding try job
          entity.
      urlsafe_flake_analysis_key (str): The urlsafe key for the master flake
          analysis entity to be updated.
    """
    flake_analysis = ndb.Key(urlsafe=urlsafe_flake_analysis_key).get()
    try_job = ndb.Key(urlsafe=urlsafe_try_job_key).get()
    assert flake_analysis
    assert try_job

    try_job.status = analysis_status.COMPLETED
    try_job.put()

    step_name = flake_analysis.canonical_step_name
    test_name = flake_analysis.test_name
    result = try_job_result['report']['result']
    pass_fail_counts = result[revision][step_name].get('pass_fail_counts', {})

    if pass_fail_counts:
      test_results = pass_fail_counts[test_name]
      pass_count = test_results['pass_count']
      fail_count = test_results['fail_count']
      tries = pass_count + fail_count
      pass_rate = float(pass_count) / tries
    else:  # Test does not exist.
      pass_rate = -1

    data_point = DataPoint()
    data_point.commit_position = commit_position
    data_point.git_hash = revision
    data_point.pass_rate = pass_rate
    data_point.try_job_url = try_job.flake_results[-1].get('url')
    data_point.task_id = _GetSwarmingTaskIdForTryJob(
        try_job.flake_results[-1].get('report'), revision, step_name, test_name)
    flake_analysis.data_points.append(data_point)
    flake_analysis.put()
Beispiel #19
0
    def _UpdateMasterFlakeAnalysis(self,
                                   master_name,
                                   builder_name,
                                   build_number,
                                   step_name,
                                   master_build_number,
                                   test_name,
                                   version_number,
                                   pass_rate,
                                   flake_swarming_task,
                                   has_valid_artifact=True):
        """Update MasterFlakeAnalysis to include result of the swarming task."""
        master_flake_analysis = MasterFlakeAnalysis.GetVersion(
            master_name,
            builder_name,
            master_build_number,
            step_name,
            test_name,
            version=version_number)
        logging.info(
            'Updating MasterFlakeAnalysis swarming task data %s/%s/%s/%s/%s',
            master_name, builder_name, master_build_number, step_name,
            test_name)

        data_point = DataPoint()
        data_point.build_number = build_number
        data_point.pass_rate = pass_rate
        data_point.task_id = flake_swarming_task.task_id
        data_point.has_valid_artifact = has_valid_artifact

        # Include git information about each build that was run.
        build_info = build_util.GetBuildInfo(master_name, builder_name,
                                             build_number)
        data_point.commit_position = build_info.commit_position
        data_point.git_hash = build_info.chromium_revision

        if build_number > 0:
            previous_build = build_util.GetBuildInfo(master_name, builder_name,
                                                     build_number - 1)
            data_point.previous_build_commit_position = previous_build.commit_position
            data_point.previous_build_git_hash = previous_build.chromium_revision
            data_point.blame_list = _GetCommitsBetweenRevisions(
                previous_build.chromium_revision, build_info.chromium_revision)
        else:
            data_point.blame_list = build_info.blame_list

        master_flake_analysis.data_points.append(data_point)

        results = flake_swarming_task.GetFlakeSwarmingTaskData()
        # TODO(lijeffrey): Determine whether or not this flake swarming task
        # was a cache hit (already ran results for more iterations than were
        # requested) and update results['cache_hit'] accordingly.
        master_flake_analysis.swarming_rerun_results.append(results)
        master_flake_analysis.put()
Beispiel #20
0
def _GenerateDataPoint(pass_rate=None,
                       build_number=None,
                       task_id=None,
                       try_job_url=None,
                       commit_position=None,
                       git_hash=None,
                       previous_build_commit_position=None,
                       previous_build_git_hash=None,
                       blame_list=None):
    data_point = DataPoint()
    data_point.pass_rate = pass_rate
    data_point.build_number = build_number
    data_point.task_id = task_id
    data_point.try_job_url = try_job_url
    data_point.commit_position = commit_position
    data_point.git_hash = git_hash
    data_point.previous_build_commit_position = previous_build_commit_position
    data_point.previous_build_git_hash = previous_build_git_hash
    data_point.blame_list = blame_list if blame_list else []
    return data_point
  def testRequestExistingAnalysis(self, *_):
    master_name = 'm'
    builder_name = 'b'
    build_number = 123
    step_name = 's'
    test_name = 't'
    success_rate = 0.9

    previous_analysis = MasterFlakeAnalysis.Create(
        master_name, builder_name, build_number - 1, step_name, test_name)
    data_point = DataPoint()
    data_point.build_number = build_number - 1
    data_point.pass_rate = success_rate
    previous_analysis.data_points.append(data_point)
    previous_analysis.status = analysis_status.COMPLETED
    previous_analysis.suspected_flake_build_number = 100
    previous_analysis.request_time = datetime.datetime(2016, 10, 01, 12, 10, 00)
    previous_analysis.start_time = datetime.datetime(2016, 10, 01, 12, 10, 05)
    previous_analysis.end_time = datetime.datetime(2016, 10, 01, 13, 10, 00)
    previous_analysis.algorithm_parameters = {'iterations_to_rerun': 100}
    previous_analysis.Save()

    previous_request = FlakeAnalysisRequest.Create(test_name, False, None)
    build_step = BuildStep.Create(
        master_name, builder_name, build_number, step_name, None)
    build_step.wf_master_name = build_step.master_name
    build_step.wf_builder_name = build_step.builder_name
    build_step.wf_build_number = build_step.build_number
    build_step.wf_step_name = build_step.step_name
    previous_request.build_steps.append(build_step)
    previous_request.analyses.append(previous_analysis.key)
    previous_request.Save()

    self.mock_current_user(user_email='*****@*****.**')

    response = self.test_app.get('/waterfall/flake', params={
        'url': buildbot.CreateBuildUrl(master_name, builder_name, build_number),
        'step_name': step_name,
        'test_name': test_name,
        'format': 'json'})

    expected_check_flake_result = {
        'key': previous_analysis.key.urlsafe(),
        'pass_rates': [[12345, 0.9, '1', 100, 'git_hash_2', 12344,
                        'git_hash_1']],
        'analysis_status': STATUS_TO_DESCRIPTION.get(previous_analysis.status),
        'master_name': master_name,
        'builder_name': builder_name,
        'build_number': build_number - 1,
        'step_name': step_name,
        'test_name': test_name,
        'request_time': '2016-10-01 12:10:00 UTC',
        'build_level_number': 1,
        'revision_level_number': 0,
        'error': None,
        'iterations_to_rerun': 100,
        'pending_time': '00:00:05',
        'duration': '00:59:55',
        'suspected_flake': {
            'build_number': 100,
            'commit_position': 12345,
            'git_hash': 'a_git_hash',
            'triage_result': 0
        },
        'version_number': 1,
        'show_input_ui': False,
        'culprit': {},
        'try_job_status': None,
        'last_attempted_swarming_task': {
            'task_id': None,
            'build_number': None
        },
        'last_attempted_try_job': {},
        'user_email': '*****@*****.**'
    }

    self.assertEqual(200, response.status_int)
    self.assertEqual(expected_check_flake_result, response.json_body)
  def testGetCoordinatesData(self):
    master_name = 'm'
    builder_name = 'b'
    build_number = 123
    step_name = 's'
    test_name = 't'
    success_rate = .9
    try_job_url = 'try_job_url'
    analysis = MasterFlakeAnalysis.Create(
        master_name, builder_name, build_number, step_name, test_name)
    data_point_1 = DataPoint()
    data_point_1.build_number = build_number
    data_point_1.pass_rate = success_rate
    data_point_1.commit_position = 5
    data_point_1.git_hash = 'git_hash_5'
    data_point_1.previous_build_commit_position = 4
    data_point_1.previous_build_git_hash = 'git_hash_4'
    data_point_1.try_job_url = try_job_url
    analysis.data_points.append(data_point_1)

    data_point_2 = DataPoint()
    data_point_2.build_number = build_number - 3
    data_point_2.pass_rate = success_rate
    data_point_2.commit_position = 2
    data_point_2.git_hash = 'git_hash_2'
    data_point_2.previous_build_commit_position = 1
    data_point_2.previous_build_git_hash = 'git_hash_1'
    data_point_2.try_job_url = try_job_url
    analysis.data_points.append(data_point_2)
    analysis.Save()

    expected_result = [
        {
            'commit_position': 2,
            'pass_rate': success_rate,
            'task_id': None,
            'build_number': build_number - 3,
            'git_hash': 'git_hash_2',
            'try_job_url': try_job_url
        },
        {
            'commit_position': 5,
            'pass_rate': success_rate,
            'task_id': None,
            'build_number': build_number,
            'git_hash': 'git_hash_5',
            'lower_bound_commit_position': 2,
            'lower_bound_git_hash': 'git_hash_2',
            'try_job_url': try_job_url
        }
    ]
    self.assertEqual(expected_result, check_flake._GetCoordinatesData(analysis))
  def testAnyoneCanViewScheduledAnalysis(self, *_):
    master_name = 'm'
    builder_name = 'b'
    build_number = '123'
    step_name = 's'
    test_name = 't'
    success_rate = .9

    analysis = MasterFlakeAnalysis.Create(
        master_name, builder_name, build_number, step_name, test_name)
    data_point = DataPoint()
    data_point.build_number = int(build_number)
    data_point.pass_rate = success_rate
    data_point.task_id = '1'
    analysis.data_points.append(data_point)
    analysis.status = analysis_status.COMPLETED
    analysis.suspected_flake_build_number = 100
    analysis.request_time = datetime.datetime(2016, 10, 01, 12, 10, 00)
    analysis.start_time = datetime.datetime(2016, 10, 01, 12, 10, 05)
    analysis.end_time = datetime.datetime(2016, 10, 01, 13, 10, 00)
    analysis.algorithm_parameters = {'iterations_to_rerun': 100}
    analysis.Save()

    self.mock_current_user(user_email='*****@*****.**')

    response = self.test_app.get('/waterfall/flake', params={
        'key': analysis.key.urlsafe(),
        'format': 'json'})

    expected_check_flake_result = {
        'key': analysis.key.urlsafe(),
        'pass_rates': [[12345, 0.9, '1', 100, 'git_hash_2', 12344,
                        'git_hash_1']],
        'analysis_status': STATUS_TO_DESCRIPTION.get(analysis.status),
        'master_name': master_name,
        'builder_name': builder_name,
        'build_number': int(build_number),
        'step_name': step_name,
        'test_name': test_name,
        'request_time': '2016-10-01 12:10:00 UTC',
        'build_level_number': 1,
        'revision_level_number': 0,
        'error': None,
        'iterations_to_rerun': 100,
        'pending_time': '00:00:05',
        'duration': '00:59:55',
        'suspected_flake': {
            'build_number': 100,
            'commit_position': 12345,
            'git_hash': 'git_hash_1',
            'triage_result': 0
        },
        'version_number': 1,
        'show_input_ui': False,
        'culprit': {},
        'try_job_status': None,
        'last_attempted_swarming_task': {
            'task_id': None,
            'build_number': None
        },
        'last_attempted_try_job': {},
        'user_email': '*****@*****.**'
    }

    self.assertEquals(200, response.status_int)
    self.assertEqual(expected_check_flake_result, response.json_body)