コード例 #1
0
  def testNeedNewAnalysisWhenPreviousOneWasForAnotherBug(self):
    existing_request = FlakeAnalysisRequest.Create('flake', False, 123)
    existing_request.user_emails = ['*****@*****.**']
    existing_request.Save()

    request = FlakeAnalysisRequest.Create('flake', False, 456)
    step1 = BuildStep.Create('m', 'b1', 10, 's', datetime(2016, 10, 01))
    step1.swarmed = False
    step1.supported = False
    step2 = BuildStep.Create('m', 'b2', 10, 's', datetime(2016, 10, 01))
    step2.swarmed = True
    step2.supported = True
    request.build_steps = [step1, step2]
    request.user_emails = ['*****@*****.**']

    mocked_now = datetime(2017, 05, 01, 10, 10, 10)
    self.MockUTCNow(mocked_now)

    version, step = flake_analysis_service._CheckForNewAnalysis(request)

    self.assertEqual(2, version)
    new_request = FlakeAnalysisRequest.GetVersion(key='flake', version=version)
    self.assertEqual(['*****@*****.**', '*****@*****.**'],
                     new_request.user_emails)
    self.assertFalse(new_request.user_emails_obscured)
    self.assertEqual(mocked_now, new_request.user_emails_last_edit)

    self.assertIsNotNone(step)
    self.assertTrue(step.scheduled)
    self.assertTrue(step.swarmed)
    self.assertTrue(step.supported)
コード例 #2
0
  def testNeedNewAnalysisWithTooOldPreviousAnalysis(self):
    existing_request = FlakeAnalysisRequest.Create('flake', False, None)
    step1 = BuildStep.Create('m', 'b1', 11, 's', datetime(2016, 10, 01))
    step1.swarmed = True
    step1.supported = True
    step1.scheduled = True
    step2 = BuildStep.Create('m', 'b2', 12, 's', datetime(2016, 10, 01))
    step2.swarmed = True
    step2.supported = True
    step2.scheduled = True
    existing_request.supported = True
    existing_request.swarmed = True
    existing_request.user_emails = ['*****@*****.**']
    existing_request.build_steps = [step1, step2]
    existing_request.Save()

    request = FlakeAnalysisRequest.Create('flake', False, 123)
    step3 = BuildStep.Create('m', 'b2', 80, 's', datetime(2016, 10, 20))
    step3.swarmed = True
    step3.supported = True
    request.build_steps = [step3]
    request.user_emails = ['*****@*****.**']

    version, step = flake_analysis_service._CheckForNewAnalysis(request)

    self.assertEqual(1, version)
    self.assertIsNotNone(step)
    self.assertEqual(80, step.build_number)

    request = FlakeAnalysisRequest.GetVersion(key='flake')
    self.assertEqual(['*****@*****.**', '*****@*****.**'],
                     request.user_emails)
コード例 #3
0
  def testNeedNewAnalysisWithFreshEnoughPreviousAnalysisWithRerunFlag(self):
    existing_request = FlakeAnalysisRequest.Create('flake', False, 123)
    step1 = BuildStep.Create('m', 'b1', 11, 's', datetime(2016, 10, 01))
    step1.swarmed = True
    step1.supported = True
    step1.scheduled = True
    step2 = BuildStep.Create('m', 'b2', 12, 's', datetime(2016, 10, 01))
    step2.swarmed = True
    step2.supported = True
    step2.scheduled = True
    existing_request.supported = True
    existing_request.swarmed = True
    existing_request.build_steps = [step1, step2]
    existing_request.Save()

    request = FlakeAnalysisRequest.Create('flake', False, 123)
    step3 = BuildStep.Create('m', 'b2', 20, 's', datetime(2016, 10, 01))
    step3.swarmed = True
    step3.supported = True
    request.build_steps = [step3]
    request.user_emails = ['*****@*****.**']

    mocked_now = datetime(2016, 10, 01)
    self.MockUTCNow(mocked_now)

    version, step = flake_analysis_service._CheckForNewAnalysis(request, True)

    self.assertEqual(1, version)
    new_request = FlakeAnalysisRequest.GetVersion(key='flake', version=version)
    self.assertEqual(['*****@*****.**'], new_request.user_emails)
    self.assertFalse(new_request.user_emails_obscured)
    self.assertEqual(datetime(2016, 10, 01), new_request.user_emails_last_edit)

    self.assertIsNotNone(step)
    self.assertTrue(step.scheduled)
コード例 #4
0
def _CheckForNewAnalysis(request, rerun=False):
    """Checks if a new analysis is needed for the requested flake.

  Args:
    request (FlakeAnalysisRequest): The request to analyze a flake.
    rerun (bool): Indicates a forced rerun by admin.

  Returns:
    (version_number, build_step)
    version_number (int): The version of the FlakeAnalysisRequest if a new
        analysis is needed; otherwise 0.
    build_step (BuildStep): a BuildStep instance if a new analysis is needed;
        otherwise None.
  """
    existing_request = FlakeAnalysisRequest.GetVersion(key=request.name)
    if not existing_request or (existing_request.bug_id and request.bug_id
                                and existing_request.bug_id != request.bug_id):
        # If no existing analysis or last analysis was for a different bug, randomly
        # pick one configuration for a new analysis.
        if existing_request:
            # Make a copy to preserve the version number of existing analysis and
            # prevent concurrent analyses of the same flake.
            user_emails = (email_util.ObscureEmails(
                existing_request.user_emails, ['google.com']) +
                           list(set(request.user_emails)))
            existing_request.CopyFrom(request)
            request = existing_request
            request.user_emails = user_emails
        request.user_emails_obscured = False
        request.user_emails_last_edit = time_util.GetUTCNow()

        swarmed, supported, supported_build_step = _CheckFlakeSwarmedAndSupported(
            request)
        request.swarmed = swarmed
        request.supported = supported

        if supported_build_step and not request.is_step:
            supported_build_step.scheduled = True  # This step will be analyzed.

        # For unsupported or step-level flakes, still save them for monitoring.
        _, saved = request.Save(
            retry_on_conflict=False)  # Create a new version.

        if not saved or not supported_build_step or request.is_step:
            # No new analysis if:
            # 1. Another analysis was just triggered.
            # 2. No representative step is Swarmed Gtest.
            # 3. The flake is a step-level one.
            return 0, None

        return request.version_number, supported_build_step
    else:
        # If no bug is attached to the existing analysis or the new request, or both
        # are attached to the same bug, start a new analysis with a different
        # configuration. For a configuration that was analyzed 7 days ago, reset it
        # to use the new reported step of the same configuration.
        # TODO: move this setting to config.
        return _MergeNewRequestIntoExistingOne(request, existing_request,
                                               rerun)
コード例 #5
0
  def testObscureFlakeAnalysisRequest(self):
    mocked_utcnow = datetime(2017, 05, 05, 22, 50, 10)
    self.MockUTCNow(mocked_utcnow)
    valid_request_time = obscure_emails._TimeBeforeNow(days=5)
    invalid_request_time = obscure_emails._TimeBeforeNow(
        days=obscure_emails._REQUEST_RECORD_RENTENSION_DAYS + 10)

    old_request = FlakeAnalysisRequest.Create('flake1', False, 123)
    old_request.user_emails.append('*****@*****.**')
    old_request.user_emails_obscured = False
    old_request.user_emails_last_edit = invalid_request_time
    old_request.Save()

    recent_request = FlakeAnalysisRequest.Create('flake2', False, 321)
    recent_request.user_emails.append('*****@*****.**')
    recent_request.user_emails_obscured = False
    recent_request.user_emails_last_edit = valid_request_time
    recent_request.Save()

    response = self.test_app.get(
        '/obscure-emails',
        params={'format': 'json'},
        headers={'X-AppEngine-Cron': 'true'},
    )
    expected_response = {
        'failure_triage_count': 0,
        'flake_triage_count': 0,
        'flake_request_aggregated_count': 1,
        'flake_request_count': 0,
    }
    self.assertEqual(expected_response, response.json_body)

    old_request = FlakeAnalysisRequest.GetVersion(key='flake1', version=1)
    self.assertTrue(old_request.user_emails_obscured)
    self.assertEqual(['*****@*****.**'], old_request.user_emails)

    recent_request = FlakeAnalysisRequest.GetVersion(key='flake2', version=1)
    self.assertFalse(recent_request.user_emails_obscured)
    self.assertEqual(['*****@*****.**'], recent_request.user_emails)
コード例 #6
0
def ScheduleAnalysisForFlake(request,
                             user_email,
                             is_admin,
                             triggering_source,
                             rerun=False):
    """Schedules an analysis on the flake in the given request if needed.

  Args:
    request (FlakeAnalysisRequest): The request to analyze a flake.
    user_email (str): The email of the requester.
    is_admin (bool): Whether the requester is an admin.
    triggering_source (int): Where the request is coming from, either Findit
      UI (check flake page), pipeline (from analysis) or Findit API.
    rerun (bool): This is a rerun, so force it to be run.

  Returns:
    True if an analysis was scheduled; False if a new analysis is not needed;
    None if the user has no permission to.
  """
    # TODO(crbug.com/853325): Refactor rerun logic.

    assert len(request.build_steps), 'At least 1 build step is needed!'

    if not IsAuthorizedUser(user_email, is_admin):
        logging.info(
            'Schedule failed because user is not authorized. user:%s, admin:%s',
            user_email, is_admin)
        return None
    request.bug_reported_by = triggering_source
    request.user_emails = [user_email]

    manually_triggered = user_email.endswith('@google.com')

    trigger_action = 'manual' if manually_triggered else 'auto'
    flake_source = 'cq' if request.on_cq else 'waterfall'

    build_steps = []
    for build_step in request.build_steps:
        step_mapper.FindMatchingWaterfallStep(build_step, request.name)
        # crbug.com/844516: temporarily bail out for fuchsia and Win7.
        if build_step.has_matching_waterfall_step and (
                build_step.wf_builder_name == 'Win10 Tests x64 (dbg)'
                or 'fuchsia' in build_step.wf_builder_name.lower()):
            continue
        build_steps.append(build_step)
    request.build_steps = build_steps
    if not request.build_steps:
        logging.info('Flake %s on android/fuchsia temporarily unsupported.',
                     request.name)
        return None

    canonical_step_name = 'unknown'
    isolate_target_name = 'unknown'
    if request.build_steps[0].step_metadata:
        # Tries to use step_metadata from request.build_steps as default.
        canonical_step_name = request.build_steps[0].step_metadata.get(
            'canonical_step_name') or canonical_step_name
        isolate_target_name = request.build_steps[0].step_metadata.get(
            'isolate_target_name') or isolate_target_name

    version_number, build_step = _CheckForNewAnalysis(request, rerun)

    if build_step and build_step.step_metadata:
        # Uses the step_metadata from the step that the analysis will actually
        # run for.
        canonical_step_name = (
            build_step.step_metadata.get('canonical_step_name')
            or canonical_step_name)
        isolate_target_name = (
            build_step.step_metadata.get('isolate_target_name')
            or isolate_target_name)

    if version_number and build_step:
        # A new analysis is needed.
        logging.info('A new analysis is needed for: %s', build_step)
        normalized_test = TestInfo(build_step.wf_master_name,
                                   build_step.wf_builder_name,
                                   build_step.wf_build_number,
                                   build_step.wf_step_name, request.name)
        original_test = TestInfo(build_step.master_name,
                                 build_step.builder_name,
                                 build_step.build_number, build_step.step_name,
                                 request.name)
        analysis = initialize_flake_pipeline.ScheduleAnalysisIfNeeded(
            normalized_test,
            original_test,
            request.flake_key,
            bug_id=request.bug_id,
            allow_new_analysis=True,
            manually_triggered=manually_triggered,
            user_email=user_email,
            triggering_source=triggering_source,
            queue_name=constants.WATERFALL_ANALYSIS_QUEUE,
            force=rerun)
        if analysis:
            # TODO: put this in a transaction.
            request = FlakeAnalysisRequest.GetVersion(key=request.name,
                                                      version=version_number)
            request.analyses.append(analysis.key)
            request.put()
            logging.info(
                'A new analysis was triggered successfully with key: %s',
                analysis.key)

            monitoring.OnFlakeAnalysisTriggered(
                source=flake_source,
                operation='analyze',
                trigger=trigger_action,
                canonical_step_name=canonical_step_name,
                isolate_target_name=isolate_target_name)
            return True
        else:
            logging.error('But new analysis was not triggered!')
            monitoring.OnFlakeAnalysisTriggered(
                source=flake_source,
                operation='error',
                trigger=trigger_action,
                canonical_step_name=canonical_step_name,
                isolate_target_name=isolate_target_name)
    else:
        logging.info('No new analysis is needed: %s', request)
        monitoring.OnFlakeAnalysisTriggered(
            source=flake_source,
            operation='skip',
            trigger=trigger_action,
            canonical_step_name=canonical_step_name,
            isolate_target_name=isolate_target_name)
    return False
コード例 #7
0
ファイル: check_flake.py プロジェクト: xinghun61/infra
  def HandlePost(self):
    # Information needed to execute this endpoint, will be populated
    # by the branches below.
    rerun = self.request.get('rerun', '0').strip() == '1'
    cancel = self.request.get('cancel', '0').strip() == '1'
    analyze_recent_commit = (
        self.request.get('analyze_recent_commit', '0').strip() == '1')
    if rerun:  # Rerun an analysis.
      return self._HandleRerunAnalysis()
    elif cancel:  # Force an analysis to be cancelled.
      return self._HandleCancelAnalysis()
    elif analyze_recent_commit:
      return self._HandleAnalyzeRecentCommit()
    else:  # Regular POST requests to start an analysis.
      # If the key hasn't been specified, then we get the information from
      # other URL parameters.
      build_url = self.request.get('url', '').strip()
      build_info = buildbot.ParseBuildUrl(build_url)
      if not build_info:
        return self.CreateError('Unknown build info!', 400)
      master_name, builder_name, build_number = build_info

      step_name = self.request.get('step_name', '').strip()
      test_name = self.request.get('test_name', '').strip()
      bug_id = self.request.get('bug_id', '').strip()

      error = self._ValidateInput(step_name, test_name, bug_id)
      if error:
        return error

      build_number = int(build_number)
      bug_id = int(bug_id) if bug_id else None

      request = FlakeAnalysisRequest.Create(test_name, False, bug_id)
      request.AddBuildStep(master_name, builder_name, build_number, step_name,
                           time_util.GetUTCNow())
      analysis, scheduled = self._CreateAndScheduleFlakeAnalysis(
          request, master_name, builder_name, build_number, step_name,
          test_name, bug_id, False)

      if not analysis:
        if scheduled is None:
          # User does not have permission to trigger, nor was any previous
          # analysis triggered to view.
          return {
              'template': 'error.html',
              'data': {
                  'error_message': (
                      'No permission to schedule an analysis for flaky test. '
                      'Please log in with your @google.com account first.'),
              },
              'return_code': 403,
          }

        # Check if a previous request has already covered this analysis so use
        # the results from that analysis.
        request = FlakeAnalysisRequest.GetVersion(key=test_name)

        if not (request and request.analyses):
          return {
              'template': 'error.html',
              'data': {
                  'error_message': (
                      'Flake analysis is not supported for "%s/%s". Either '
                      'the test type is not supported or the test is not '
                      'swarmed yet.' % (step_name, test_name)),
              },
              'return_code': 400,
          }

        analysis = request.FindMatchingAnalysisForConfiguration(
            master_name, builder_name)

        if not analysis:
          logging.error('Flake analysis was deleted unexpectedly!')
          return {
              'template': 'error.html',
              'data': {
                  'error_message': 'Flake analysis was deleted unexpectedly!',
              },
              'return_code': 404,
          }

      logging.info('Analysis: %s has a scheduled status of: %r', analysis.key,
                   scheduled)
      return self.CreateRedirect(
          '/p/chromium/flake-portal/analysis/analyze?redirect=1&key=%s' %
          analysis.key.urlsafe())