def testCopyFrom(self):
        request1 = FlakeAnalysisRequest.Create('flaky_test', False, 123)

        request2 = FlakeAnalysisRequest.Create('flaky_test', True, 456)
        request2.AddBuildStep('m', 'b1', 1, 's', datetime(2016, 10, 1))
        request2.user_emails = ['email']
        analysis = MasterFlakeAnalysis.Create('m', 'b', 100, 's', 't')
        analysis.Save()
        request2.analyses.append(analysis.key)

        request1.CopyFrom(request2)

        self.assertEqual(request2.is_step, request1.is_step)
        self.assertEqual(request2.bug_id, request1.bug_id)
        self.assertEqual(request2.user_emails, request1.user_emails)
        self.assertEqual(request2.build_steps, request1.build_steps)
        self.assertEqual(request2.analyses, request1.analyses)
Exemple #2
0
 def CreateFlakeAnalysisRequest(flake):
   analysis_request = FlakeAnalysisRequest.Create(
       flake.name, flake.is_step, flake.bug_id)
   for step in flake.build_steps:
     analysis_request.AddBuildStep(step.master_name, step.builder_name,
                                   step.build_number, step.step_name,
                                   time_util.GetUTCNow())
   return analysis_request
    def testFindMatchingAnalysisForConfiguration(self, _):
        request = FlakeAnalysisRequest.Create('test', False, 123)
        analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 'test')
        analysis.Save()
        request.analyses.append(analysis.key)
        request.Save()

        self.assertEqual(
            analysis, request.FindMatchingAnalysisForConfiguration('m', 'b'))
Exemple #4
0
def _CheckForNewAnalysis(request):
    """Checks if a new analysis is needed for the requested flake.

  Args:
    request (FlakeAnalysisRequest): The request to analyze a flake.

  Returns:
    (version_number, build_step)
    version_number (int): The version of the FlakeAnalysisRequest if a new
        analysis is needed; otherwise 0.
    build_step (BuildStep): a BuildStep instance if a new analysis is needed;
        otherwise None.
  """
    existing_request = FlakeAnalysisRequest.GetVersion(key=request.name)
    if not existing_request or (existing_request.bug_id and request.bug_id
                                and existing_request.bug_id != request.bug_id):
        # If no existing analysis or last analysis was for a different bug, randomly
        # pick one configuration for a new analysis.
        if existing_request:
            # Make a copy to preserve the version number of existing analysis and
            # prevent concurrent analyses of the same flake.
            user_emails = (email_util.ObscureEmails(
                existing_request.user_emails, ['google.com']) +
                           list(set(request.user_emails)))
            existing_request.CopyFrom(request)
            request = existing_request
            request.user_emails = user_emails
        request.user_emails_obscured = False
        request.user_emails_last_edit = time_util.GetUTCNow()

        swarmed, supported, supported_build_step = _CheckFlakeSwarmedAndSupported(
            request)
        request.swarmed = swarmed
        request.supported = supported

        if supported_build_step and not request.is_step:
            supported_build_step.scheduled = True  # This step will be analyzed.

        # For unsupported or step-level flakes, still save them for monitoring.
        _, saved = request.Save(
            retry_on_conflict=False)  # Create a new version.

        if not saved or not supported_build_step or request.is_step:
            # No new analysis if:
            # 1. Another analysis was just triggered.
            # 2. No representative step is Swarmed Gtest.
            # 3. The flake is a step-level one.
            return 0, None

        return request.version_number, supported_build_step
    else:
        # If no bug is attached to the existing analysis or the new request, or both
        # are attached to the same bug, start a new analysis with a different
        # configuration. For a configuration that was analyzed 7 days ago, reset it
        # to use the new reported step of the same configuration.
        # TODO: move this setting to config.
        return _MergeNewRequestIntoExistingOne(request, existing_request)
    def testObscureFlakeAnalysisRequest(self):
        self.mock_current_user(user_email='*****@*****.**', is_admin=True)

        mocked_utcnow = datetime(2017, 05, 05, 22, 50, 10)
        self.MockUTCNow(mocked_utcnow)
        valid_request_time = obscure_emails._TimeBeforeNow(days=5)
        invalid_request_time = obscure_emails._TimeBeforeNow(
            days=obscure_emails._REQUEST_RECORD_RENTENSION_DAYS + 10)

        old_request = FlakeAnalysisRequest.Create('flake1', False, 123)
        old_request.user_emails.append('*****@*****.**')
        old_request.user_emails_obscured = False
        old_request.user_emails_last_edit = invalid_request_time
        old_request.Save()

        recent_request = FlakeAnalysisRequest.Create('flake2', False, 321)
        recent_request.user_emails.append('*****@*****.**')
        recent_request.user_emails_obscured = False
        recent_request.user_emails_last_edit = valid_request_time
        recent_request.Save()

        response = self.test_app.get('/obscure-emails',
                                     params={'format': 'json'})
        expected_response = {
            'failure_triage_count': 0,
            'flake_triage_count': 0,
            'flake_request_aggregated_count': 1,
            'flake_request_count': 0,
        }
        self.assertEqual(expected_response, response.json_body)

        old_request = FlakeAnalysisRequest.GetVersion(key='flake1', version=1)
        self.assertTrue(old_request.user_emails_obscured)
        self.assertEqual(['*****@*****.**'], old_request.user_emails)

        recent_request = FlakeAnalysisRequest.GetVersion(key='flake2',
                                                         version=1)
        self.assertFalse(recent_request.user_emails_obscured)
        self.assertEqual(['*****@*****.**'], recent_request.user_emails)
 def testAddBuildStep(self):
     t1 = datetime(2016, 10, 1, 0, 0, 0)
     t2 = datetime(2016, 10, 2, 0, 0, 0)
     t3 = datetime(2016, 10, 2, 1, 0, 0)
     t4 = datetime(2016, 10, 2, 0, 30, 0)
     request = FlakeAnalysisRequest.Create('flaky_test', False, 123)
     self.assertTrue(request.AddBuildStep('m', 'b1', 1, 's', t1))
     self.assertTrue(request.AddBuildStep('m', 'b2', 10, 's', t2))
     self.assertFalse(request.AddBuildStep('m', 'b2', 11, 's', t3))
     self.assertTrue(request.AddBuildStep('m', 'b2', 9, 's', t4))
     self.assertEqual(2, len(request.build_steps), request.build_steps)
     self.assertEqual(BuildStep.Create('m', 'b1', 1, 's', t1),
                      request.build_steps[0])
     self.assertEqual(BuildStep.Create('m', 'b2', 9, 's', t4),
                      request.build_steps[1])
  def run(self, master_name, builder_name, build_number):
    """Triggers flake analyses for flaky tests found by build failure analysis.

    Args:
      master_name (str): The master name.
      builder_name (str): The builder name.
      build_number (str): The build number.
    """

    analysis = WfAnalysis.Get(master_name, builder_name, build_number)

    if not analysis or not analysis.failure_result_map:  # pragma: no cover
      return

    for step in analysis.failure_result_map.iterkeys():
      task = WfSwarmingTask.Get(
          master_name, builder_name, build_number, step)

      if not task:  # pragma: no cover
        continue

      flaky_tests = task.classified_tests.get('flaky_tests', [])

      if not flaky_tests:  # pragma: no cover
        continue

      # Trigger a master flake analysis on each detected flaky test.
      # TODO lijeffrey): rerun all tests once typical load is determined to be
      # within reasonable limits. For experimentation with automatic flakiness
      # checking, only run 1 test per anaysis to avoid excessive load on the
      # swarming server in case there are too many flaky tests per analysis for
      # now.
      test_name = flaky_tests[0]
      request = FlakeAnalysisRequest.Create(test_name, False, None)
      request.AddBuildStep(
          master_name, builder_name, build_number, step,
          time_util.GetUTCNow())
      scheduled = flake_analysis_service.ScheduleAnalysisForFlake(
          request, '*****@*****.**', False,
          triggering_sources.FINDIT_PIPELINE)

      if scheduled:  # pragma: no branch
        logging.info('%s/%s/%s has %s flaky tests.',
                     master_name, builder_name, build_number, len(flaky_tests))
        logging.info('A flake analysis has been triggered for %s', test_name)
Exemple #8
0
def _ObscureFlakeAnalysisRequest():
    """Obscures the user emails in FlakeAnalysisRequest."""
    count = 0
    time_limit = _TimeBeforeNow(days=_REQUEST_RECORD_RENTENSION_DAYS)
    query = FlakeAnalysisRequest.query(
        FlakeAnalysisRequest.user_emails_obscured == False,
        FlakeAnalysisRequest.user_emails_last_edit < time_limit)
    more = True
    cursor = None
    while more:
        entities, cursor, more = query.fetch_page(_PAGE_SIZE,
                                                  start_cursor=cursor)
        for entity in entities:
            entity.user_emails = email_util.ObscureEmails(
                entity.user_emails, ['google.com'])
            entity.user_emails_obscured = True
        ndb.put_multi(entities)
        count += len(entities)
    return count
 def testGetNormalizedConfigurationNames(self):
     master_name = 'm'
     builder_name = 'b'
     build_number = 123
     step_name = 's'
     test_name = 't'
     reported_time = datetime(2016, 11, 16)
     request = FlakeAnalysisRequest.Create(test_name, False, 123)
     build_step = BuildStep.Create(master_name, builder_name, build_number,
                                   step_name, reported_time)
     build_step.wf_master_name = master_name
     build_step.wf_builder_name = builder_name
     build_step.wf_build_number = build_number
     build_step.wf_step_name = step_name
     request.build_steps.append(build_step)
     self.assertEqual((None, None),
                      request._GetNormalizedConfigurationNames('m2', 'b2'))
     self.assertEqual(
         (master_name, builder_name),
         request._GetNormalizedConfigurationNames(master_name,
                                                  builder_name))
  def testRequestUnsupportedAnalysis(self, _):
    master_name = 'm'
    builder_name = 'b'
    build_number = 123
    step_name = 's'
    test_name = 't'

    previous_request = FlakeAnalysisRequest.Create(test_name, False, None)
    previous_request.AddBuildStep(
        master_name, builder_name, build_number, step_name, None)
    previous_request.swarmed = False
    previous_request.supported = False

    self.assertRaisesRegexp(
        webtest.app.AppError,
        re.compile('.*not supported.*', re.MULTILINE | re.DOTALL),
        self.test_app.get,
        '/waterfall/flake',
        params={
            'url': buildbot.CreateBuildUrl(
                master_name, builder_name, build_number),
            'step_name': step_name,
            'test_name': test_name,
            'format': 'json'})
 def testWaterfallFlake(self):
     request = FlakeAnalysisRequest.Create('flaky_test', False, 123)
     request.AddBuildStep('chromium.linux', 'b1', 1, 's',
                          datetime(2016, 11, 14))
     self.assertFalse(request.on_cq)
Exemple #12
0
def ScheduleAnalysisForFlake(request, user_email, is_admin, triggering_source):
    """Schedules an analysis on the flake in the given request if needed.

  Args:
    request (FlakeAnalysisRequest): The request to analyze a flake.
    user_email (str): The email of the requester.
    is_admin (bool): Whether the requester is an admin.
    triggering_source (int): Where the request is coming from, either Findit
      UI (check flake page), pipeline (from analysis) or Findit API.

  Returns:
    True if an analysis was scheduled; False if a new analysis is not needed;
    None if the user has no permission to.
  """
    assert len(request.build_steps), 'At least 1 build step is needed!'

    if not IsAuthorizedUser(user_email, is_admin):
        logging.info('user:%s, admin:%s', user_email, is_admin)
        return None
    request.user_emails = [user_email]

    manually_triggered = user_email.endswith('@google.com')

    trigger_action = 'manual' if manually_triggered else 'auto'
    flake_source = 'cq' if request.on_cq else 'waterfall'

    for build_step in request.build_steps:
        step_mapper.FindMatchingWaterfallStep(build_step, request.name)

    version_number, build_step = _CheckForNewAnalysis(request)
    if version_number and build_step:
        # A new analysis is needed.
        # TODO(lijeffrey): Add support for the force flag to trigger a rerun.
        logging.info('A new analysis is needed for: %s', build_step)
        normalized_test = TestInfo(build_step.wf_master_name,
                                   build_step.wf_builder_name,
                                   build_step.wf_build_number,
                                   build_step.wf_step_name, request.name)
        original_test = TestInfo(build_step.master_name,
                                 build_step.builder_name,
                                 build_step.build_number, build_step.step_name,
                                 request.name)
        analysis = initialize_flake_pipeline.ScheduleAnalysisIfNeeded(
            normalized_test,
            original_test,
            bug_id=request.bug_id,
            allow_new_analysis=True,
            manually_triggered=manually_triggered,
            user_email=user_email,
            triggering_source=triggering_source,
            queue_name=constants.WATERFALL_ANALYSIS_QUEUE)
        if analysis:
            # TODO: put this in a transaction.
            request = FlakeAnalysisRequest.GetVersion(key=request.name,
                                                      version=version_number)
            request.analyses.append(analysis.key)
            request.put()
            logging.info('A new analysis was triggered successfully: %s',
                         analysis.key)
            monitoring.flakes.increment({
                'operation': 'analyze',
                'trigger': trigger_action,
                'source': flake_source,
            })
            return True
        else:
            logging.error('But new analysis was not triggered!')
    else:
        logging.info('No new analysis is needed: %s', request)

    monitoring.flakes.increment({
        'operation': 'skip',
        'trigger': trigger_action,
        'source': flake_source,
    })
    return False
  def testRequestExistingAnalysis(self, *_):
    master_name = 'm'
    builder_name = 'b'
    build_number = 123
    step_name = 's'
    test_name = 't'
    success_rate = 0.9

    previous_analysis = MasterFlakeAnalysis.Create(
        master_name, builder_name, build_number - 1, step_name, test_name)
    data_point = DataPoint()
    data_point.build_number = build_number - 1
    data_point.pass_rate = success_rate
    previous_analysis.data_points.append(data_point)
    previous_analysis.status = analysis_status.COMPLETED
    previous_analysis.suspected_flake_build_number = 100
    previous_analysis.request_time = datetime.datetime(2016, 10, 01, 12, 10, 00)
    previous_analysis.start_time = datetime.datetime(2016, 10, 01, 12, 10, 05)
    previous_analysis.end_time = datetime.datetime(2016, 10, 01, 13, 10, 00)
    previous_analysis.algorithm_parameters = {'iterations_to_rerun': 100}
    previous_analysis.Save()

    previous_request = FlakeAnalysisRequest.Create(test_name, False, None)
    build_step = BuildStep.Create(
        master_name, builder_name, build_number, step_name, None)
    build_step.wf_master_name = build_step.master_name
    build_step.wf_builder_name = build_step.builder_name
    build_step.wf_build_number = build_step.build_number
    build_step.wf_step_name = build_step.step_name
    previous_request.build_steps.append(build_step)
    previous_request.analyses.append(previous_analysis.key)
    previous_request.Save()

    self.mock_current_user(user_email='*****@*****.**')

    response = self.test_app.get('/waterfall/flake', params={
        'url': buildbot.CreateBuildUrl(master_name, builder_name, build_number),
        'step_name': step_name,
        'test_name': test_name,
        'format': 'json'})

    expected_check_flake_result = {
        'key': previous_analysis.key.urlsafe(),
        'pass_rates': [[12345, 0.9, '1', 100, 'git_hash_2', 12344,
                        'git_hash_1']],
        'analysis_status': STATUS_TO_DESCRIPTION.get(previous_analysis.status),
        'master_name': master_name,
        'builder_name': builder_name,
        'build_number': build_number - 1,
        'step_name': step_name,
        'test_name': test_name,
        'request_time': '2016-10-01 12:10:00 UTC',
        'build_level_number': 1,
        'revision_level_number': 0,
        'error': None,
        'iterations_to_rerun': 100,
        'pending_time': '00:00:05',
        'duration': '00:59:55',
        'suspected_flake': {
            'build_number': 100,
            'commit_position': 12345,
            'git_hash': 'a_git_hash',
            'triage_result': 0
        },
        'version_number': 1,
        'show_input_ui': False,
        'culprit': {},
        'try_job_status': None,
        'last_attempted_swarming_task': {
            'task_id': None,
            'build_number': None
        },
        'last_attempted_try_job': {},
        'user_email': '*****@*****.**'
    }

    self.assertEqual(200, response.status_int)
    self.assertEqual(expected_check_flake_result, response.json_body)
    def HandleGet(self):
        key = self.request.get('key')
        if key:
            analysis = ndb.Key(urlsafe=key).get()
            if not analysis:  # pragma: no cover
                return self.CreateError('Analysis of flake is not found', 404)
        else:
            build_url = self.request.get('url', '').strip()
            build_info = buildbot.ParseBuildUrl(build_url)
            if not build_info:  # pragma: no cover
                return self.CreateError('Unknown build info!', 400)
            master_name, builder_name, build_number = build_info

            step_name = self.request.get('step_name', '').strip()
            test_name = self.request.get('test_name', '').strip()
            bug_id = self.request.get('bug_id', '').strip()
            # TODO(lijeffrey): Add support for force flag to trigger a rerun.

            error = self._ValidateInput(step_name, test_name, bug_id)

            if error:  # pragma: no cover
                return error

            build_number = int(build_number)
            bug_id = int(bug_id) if bug_id else None
            user_email = auth_util.GetUserEmail()
            is_admin = auth_util.IsCurrentUserAdmin()

            request = FlakeAnalysisRequest.Create(test_name, False, bug_id)
            request.AddBuildStep(master_name, builder_name, build_number,
                                 step_name, time_util.GetUTCNow())
            scheduled = flake_analysis_service.ScheduleAnalysisForFlake(
                request, user_email, is_admin, triggering_sources.FINDIT_UI)

            analysis = MasterFlakeAnalysis.GetVersion(master_name,
                                                      builder_name,
                                                      build_number, step_name,
                                                      test_name)

            if not analysis:
                if scheduled is None:
                    # User does not have permission to trigger, nor was any previous
                    # analysis triggered to view.
                    return {
                        'template': 'error.html',
                        'data': {
                            'error_message':
                            ('You could schedule an analysis for flaky test only '
                             'after you login with google.com account.'),
                            'login_url':
                            self.GetLoginUrl(),
                        },
                        'return_code': 401,
                    }

                # Check if a previous request has already covered this analysis so use
                # the results from that analysis.
                request = FlakeAnalysisRequest.GetVersion(key=test_name)

                if not (request and request.analyses):
                    return {
                        'template': 'error.html',
                        'data': {
                            'error_message':
                            ('Flake analysis is not supported for this request. Either'
                             ' the build step may not be supported or the test is not '
                             'swarmed.'),
                        },
                        'return_code': 400,
                    }

                analysis = request.FindMatchingAnalysisForConfiguration(
                    master_name, builder_name)

                if not analysis:  # pragma: no cover
                    logging.error('Flake analysis was deleted unexpectedly!')
                    return {
                        'template': 'error.html',
                        'data': {
                            'error_message':
                            'Flake analysis was deleted unexpectedly!',
                        },
                        'return_code': 400
                    }

        suspected_flake = _GetSuspectedFlakeInfo(analysis)
        culprit = _GetCulpritInfo(analysis)
        build_level_number, revision_level_number = _GetNumbersOfDataPointGroups(
            analysis.data_points)

        data = {
            'key':
            analysis.key.urlsafe(),
            'master_name':
            analysis.master_name,
            'builder_name':
            analysis.builder_name,
            'build_number':
            analysis.build_number,
            'step_name':
            analysis.step_name,
            'test_name':
            analysis.test_name,
            'pass_rates': [],
            'analysis_status':
            analysis.status_description,
            'try_job_status':
            analysis_status.STATUS_TO_DESCRIPTION.get(analysis.try_job_status),
            'last_attempted_swarming_task':
            _GetLastAttemptedSwarmingTaskDetails(analysis),
            'last_attempted_try_job':
            _GetLastAttemptedTryJobDetails(analysis),
            'version_number':
            analysis.version_number,
            'suspected_flake':
            suspected_flake,
            'culprit':
            culprit,
            'request_time':
            time_util.FormatDatetime(analysis.request_time),
            'build_level_number':
            build_level_number,
            'revision_level_number':
            revision_level_number,
            'error':
            analysis.error_message,
            'iterations_to_rerun':
            analysis.iterations_to_rerun,
            'show_input_ui':
            self._ShowInputUI(analysis)
        }

        if (users.is_current_user_admin() and analysis.completed
                and analysis.triage_history):
            data['triage_history'] = analysis.GetTriageHistory()

        data['pending_time'] = time_util.FormatDuration(
            analysis.request_time, analysis.start_time
            or time_util.GetUTCNow())
        if analysis.status != analysis_status.PENDING:
            data['duration'] = time_util.FormatDuration(
                analysis.start_time, analysis.end_time
                or time_util.GetUTCNow())

        data['pass_rates'] = _GetCoordinatesData(analysis)

        return {'template': 'flake/result.html', 'data': data}