def testNeedNewAnalysisWithFreshEnoughPreviousAnalysisWithRerunFlag(self): existing_request = FlakeAnalysisRequest.Create('flake', False, 123) step1 = BuildStep.Create('m', 'b1', 11, 's', datetime(2016, 10, 01)) step1.swarmed = True step1.supported = True step1.scheduled = True step2 = BuildStep.Create('m', 'b2', 12, 's', datetime(2016, 10, 01)) step2.swarmed = True step2.supported = True step2.scheduled = True existing_request.supported = True existing_request.swarmed = True existing_request.build_steps = [step1, step2] existing_request.Save() request = FlakeAnalysisRequest.Create('flake', False, 123) step3 = BuildStep.Create('m', 'b2', 20, 's', datetime(2016, 10, 01)) step3.swarmed = True step3.supported = True request.build_steps = [step3] request.user_emails = ['*****@*****.**'] mocked_now = datetime(2016, 10, 01) self.MockUTCNow(mocked_now) version, step = flake_analysis_service._CheckForNewAnalysis(request, True) self.assertEqual(1, version) new_request = FlakeAnalysisRequest.GetVersion(key='flake', version=version) self.assertEqual(['*****@*****.**'], new_request.user_emails) self.assertFalse(new_request.user_emails_obscured) self.assertEqual(datetime(2016, 10, 01), new_request.user_emails_last_edit) self.assertIsNotNone(step) self.assertTrue(step.scheduled)
def testNeedNewAnalysisWithTooOldPreviousAnalysis(self): existing_request = FlakeAnalysisRequest.Create('flake', False, None) step1 = BuildStep.Create('m', 'b1', 11, 's', datetime(2016, 10, 01)) step1.swarmed = True step1.supported = True step1.scheduled = True step2 = BuildStep.Create('m', 'b2', 12, 's', datetime(2016, 10, 01)) step2.swarmed = True step2.supported = True step2.scheduled = True existing_request.supported = True existing_request.swarmed = True existing_request.user_emails = ['*****@*****.**'] existing_request.build_steps = [step1, step2] existing_request.Save() request = FlakeAnalysisRequest.Create('flake', False, 123) step3 = BuildStep.Create('m', 'b2', 80, 's', datetime(2016, 10, 20)) step3.swarmed = True step3.supported = True request.build_steps = [step3] request.user_emails = ['*****@*****.**'] version, step = flake_analysis_service._CheckForNewAnalysis(request) self.assertEqual(1, version) self.assertIsNotNone(step) self.assertEqual(80, step.build_number) request = FlakeAnalysisRequest.GetVersion(key='flake') self.assertEqual(['*****@*****.**', '*****@*****.**'], request.user_emails)
def testNotNeedNewAnalysisWithFreshEnoughPreviousAnalysis(self): existing_request = FlakeAnalysisRequest.Create('flake', False, 123) step1 = BuildStep.Create('m', 'b1', 11, 's', datetime(2016, 10, 01)) step1.swarmed = True step1.supported = True step1.scheduled = True step2 = BuildStep.Create('m', 'b2', 12, 's', datetime(2016, 10, 01)) step2.swarmed = True step2.supported = True step2.scheduled = True existing_request.supported = True existing_request.swarmed = True existing_request.build_steps = [step1, step2] existing_request.Save() request = FlakeAnalysisRequest.Create('flake', False, 123) step3 = BuildStep.Create('m', 'b2', 20, 's', datetime(2016, 10, 03)) step3.swarmed = True step3.supported = True request.build_steps = [step3] version, step = flake_analysis_service._CheckForNewAnalysis(request) self.assertEqual(0, version) self.assertIsNone(step)
def testNeedNewAnalysisWithADifferentFormerReportedStep(self): existing_request = FlakeAnalysisRequest.Create('flake', False, 123) step1 = BuildStep.Create('m', 'b1', 11, 's', datetime(2016, 10, 01)) step1.swarmed = True step1.supported = True step1.scheduled = True step2 = BuildStep.Create('m', 'b2', 12, 's', datetime(2016, 10, 01)) step2.swarmed = True step2.supported = True step2.scheduled = False existing_request.supported = True existing_request.swarmed = True existing_request.build_steps = [step1, step2] existing_request.Save() request = FlakeAnalysisRequest.Create('flake', False, 123) step3 = BuildStep.Create('m', 'b3', 13, 's', datetime(2016, 10, 01)) step3.swarmed = False step3.supported = False request.build_steps = [step3] version, step = flake_analysis_service._CheckForNewAnalysis(request) self.assertEqual(1, version) self.assertIsNotNone(step) self.assertTrue(step.scheduled) self.assertEqual('b2', step.builder_name)
def testNeedNewAnalysisWhenPreviousOneWasForAnotherBug(self): existing_request = FlakeAnalysisRequest.Create('flake', False, 123) existing_request.user_emails = ['*****@*****.**'] existing_request.Save() request = FlakeAnalysisRequest.Create('flake', False, 456) step1 = BuildStep.Create('m', 'b1', 10, 's', datetime(2016, 10, 01)) step1.swarmed = False step1.supported = False step2 = BuildStep.Create('m', 'b2', 10, 's', datetime(2016, 10, 01)) step2.swarmed = True step2.supported = True request.build_steps = [step1, step2] request.user_emails = ['*****@*****.**'] mocked_now = datetime(2017, 05, 01, 10, 10, 10) self.MockUTCNow(mocked_now) version, step = flake_analysis_service._CheckForNewAnalysis(request) self.assertEqual(2, version) new_request = FlakeAnalysisRequest.GetVersion(key='flake', version=version) self.assertEqual(['*****@*****.**', '*****@*****.**'], new_request.user_emails) self.assertFalse(new_request.user_emails_obscured) self.assertEqual(mocked_now, new_request.user_emails_last_edit) self.assertIsNotNone(step) self.assertTrue(step.scheduled) self.assertTrue(step.swarmed) self.assertTrue(step.supported)
def AnalyzeDetectedFlakeOccurrence(flake, flake_occurrence, bug_id): """Analyze detected flake occurrence by Flake Detection. Args: flake (Flake): The Flake triggering this analysis. flake_occurrece (FlakeOccurrence): A FlakeOccurrence model entity. bug_id (int): Id of the bug to update after the analysis finishes. """ test_name = flake_occurrence.test_name analysis_request = FlakeAnalysisRequest.Create(test_name, False, bug_id) analysis_request.flake_key = flake.key master_name = flake_occurrence.build_configuration.legacy_master_name builder_name = flake_occurrence.build_configuration.luci_builder build_number = flake_occurrence.build_configuration.legacy_build_number step_ui_name = flake_occurrence.step_ui_name analysis_request.AddBuildStep(master_name, builder_name, build_number, step_ui_name, time_util.GetUTCNow()) analysis_request.Save() logging.info('flake report for detected flake occurrence: %r', analysis_request) AsyncProcessFlakeReport(analysis_request, user_email=constants.DEFAULT_SERVICE_ACCOUNT, is_admin=False)
def _CheckForNewAnalysis(request, rerun=False): """Checks if a new analysis is needed for the requested flake. Args: request (FlakeAnalysisRequest): The request to analyze a flake. rerun (bool): Indicates a forced rerun by admin. Returns: (version_number, build_step) version_number (int): The version of the FlakeAnalysisRequest if a new analysis is needed; otherwise 0. build_step (BuildStep): a BuildStep instance if a new analysis is needed; otherwise None. """ existing_request = FlakeAnalysisRequest.GetVersion(key=request.name) if not existing_request or (existing_request.bug_id and request.bug_id and existing_request.bug_id != request.bug_id): # If no existing analysis or last analysis was for a different bug, randomly # pick one configuration for a new analysis. if existing_request: # Make a copy to preserve the version number of existing analysis and # prevent concurrent analyses of the same flake. user_emails = (email_util.ObscureEmails( existing_request.user_emails, ['google.com']) + list(set(request.user_emails))) existing_request.CopyFrom(request) request = existing_request request.user_emails = user_emails request.user_emails_obscured = False request.user_emails_last_edit = time_util.GetUTCNow() swarmed, supported, supported_build_step = _CheckFlakeSwarmedAndSupported( request) request.swarmed = swarmed request.supported = supported if supported_build_step and not request.is_step: supported_build_step.scheduled = True # This step will be analyzed. # For unsupported or step-level flakes, still save them for monitoring. _, saved = request.Save( retry_on_conflict=False) # Create a new version. if not saved or not supported_build_step or request.is_step: # No new analysis if: # 1. Another analysis was just triggered. # 2. No representative step is Swarmed Gtest. # 3. The flake is a step-level one. return 0, None return request.version_number, supported_build_step else: # If no bug is attached to the existing analysis or the new request, or both # are attached to the same bug, start a new analysis with a different # configuration. For a configuration that was analyzed 7 days ago, reset it # to use the new reported step of the same configuration. # TODO: move this setting to config. return _MergeNewRequestIntoExistingOne(request, existing_request, rerun)
def testCopyFrom(self): request1 = FlakeAnalysisRequest.Create('flaky_test', False, 123) request2 = FlakeAnalysisRequest.Create('flaky_test', True, 456) request2.AddBuildStep('m', 'b1', 1, 's', datetime(2016, 10, 1)) request2.user_emails = ['email'] analysis = MasterFlakeAnalysis.Create('m', 'b', 100, 's', 't') analysis.Save() request2.analyses.append(analysis.key) request1.CopyFrom(request2) self.assertEqual(request2.is_step, request1.is_step) self.assertEqual(request2.bug_id, request1.bug_id) self.assertEqual(request2.user_emails, request1.user_emails) self.assertEqual(request2.build_steps, request1.build_steps) self.assertEqual(request2.analyses, request1.analyses)
def testUnauthorizedAccess(self): request = FlakeAnalysisRequest.Create('flake', False, 123) step = BuildStep.Create('m', 'b2', 80, 's', datetime(2016, 10, 20)) request.build_steps = [step] self.assertIsNone( flake_analysis_service.ScheduleAnalysisForFlake( request, '*****@*****.**', False, triggering_sources.FINDIT_UI))
def RunImpl(self, build_key): """Triggers flake analyses for flaky tests found by CI failure analysis.""" master_name, builder_name, build_number = build_key.GetParts() flake_settings = waterfall_config.GetCheckFlakeSettings() throttled = flake_settings.get('throttle_flake_analyses', True) analysis = WfAnalysis.Get(master_name, builder_name, build_number) if not analysis or not analysis.flaky_tests: return analysis_counts = defaultdict(lambda: defaultdict(int)) for step, flaky_tests in analysis.flaky_tests.iteritems(): logging.info('%s/%s/%s/%s has %s flaky tests.', master_name, builder_name, build_number, step, len(flaky_tests)) for test_name in flaky_tests: # TODO(crbug.com/904050): Deprecate FlakeAnalysisRequest in favor of # Flake. flake = flake_util.GetFlake(_LUCI_PROJECT, step, test_name, master_name, builder_name, build_number) request = FlakeAnalysisRequest.Create(test_name, False, None) request.AddBuildStep(master_name, builder_name, build_number, step, time_util.GetUTCNow()) request.flake_key = flake.key scheduled = flake_analysis_service.ScheduleAnalysisForFlake( request, '*****@*****.**', False, triggering_sources.FINDIT_PIPELINE) if scheduled: # pragma: no branch analysis_counts[step]['analyzed'] += 1 logging.info( 'A flake analysis has been triggered for %s/%s', step, test_name) if throttled and len(flaky_tests) > 1: logging.info( 'Throttling is enabled, skipping %d tests.', len(flaky_tests) - 1) analysis_counts[step]['throttled'] = len( flaky_tests) - 1 break # If we're throttled, stop after the first. else: analysis_counts[step]['error'] += 1 for step, step_counts in analysis_counts.iteritems(): # Collects metrics. step_metadata = step_util.GetStepMetadata(master_name, builder_name, build_number, step) canonical_step_name = step_metadata.get( 'canonical_step_name') or 'Unknown' isolate_target_name = step_metadata.get( 'isolate_target_name') or 'Unknown' for operation, count in step_counts.iteritems(): monitoring.OnFlakeIdentified(canonical_step_name, isolate_target_name, operation, count)
def testAuthorizedAccessAndNewAnalysisNeededButNotTriggered( self, mock_mon, _): step = BuildStep.Create('m', 'b', 80, 's', datetime(2016, 10, 20)) step.step_metadata = { 'isolate_target_name': 'wf_s', 'canonical_step_name': 'wf_s' } flake = Flake.Create('chromium', 's', 't', 'l') request = FlakeAnalysisRequest.Create('flake', False, 123) request.flake_key = flake.key request.build_steps = [step] user_email = '*****@*****.**' triggering_source = triggering_sources.FINDIT_UI def CheckForNewAnalysis(*_): step.wf_master_name = 'wf_m' step.wf_builder_name = 'wf_b' step.wf_build_number = 100 step.wf_step_name = 'wf_s' return 1, step normalized_test = TestInfo('wf_m', 'wf_b', 100, 'wf_s', 'flake') original_test = TestInfo('m', 'b', 80, 's', 'flake') with mock.patch.object( flake_analysis_service, '_CheckForNewAnalysis', side_effect=CheckForNewAnalysis) as ( mocked_CheckForNewAnalysis), mock.patch.object( flake_analysis_service.initialize_flake_pipeline, 'ScheduleAnalysisIfNeeded', return_value=None) as ( mocked_ScheduleAnalysisIfNeeded), mock.patch.object( flake_analysis_service.FlakeAnalysisRequest, 'GetVersion', return_value=None) as mocked_GetVersion: self.assertFalse( flake_analysis_service.ScheduleAnalysisForFlake( request, user_email, True, triggering_sources.FINDIT_UI)) mocked_CheckForNewAnalysis.assert_called_once_with(request, False) mocked_ScheduleAnalysisIfNeeded.assert_called_once_with( normalized_test, original_test, flake.key, bug_id=123, allow_new_analysis=True, manually_triggered=False, user_email=user_email, triggering_source=triggering_source, queue_name=constants.WATERFALL_ANALYSIS_QUEUE, force=False) self.assertFalse(mocked_GetVersion.called) mock_mon.assert_called_once_with( source='waterfall', operation='error', trigger='auto', canonical_step_name='wf_s', isolate_target_name='wf_s')
def testFindMatchingAnalysisForConfiguration(self, _): request = FlakeAnalysisRequest.Create('test', False, 123) analysis = MasterFlakeAnalysis.Create('m', 'b', 123, 's', 'test') analysis.Save() request.analyses.append(analysis.key) request.Save() self.assertEqual( analysis, request.FindMatchingAnalysisForConfiguration('m', 'b'))
def CreateFlakeAnalysisRequest(flake): analysis_request = FlakeAnalysisRequest.Create( flake.name, flake.is_step, flake.bug_id) for step in flake.build_steps: analysis_request.AddBuildStep(step.master_name, step.builder_name, step.build_number, step.step_name, time_util.GetUTCNow()) return analysis_request
def testNotNeedNewAnalysisForStepLevelFlake(self): request = FlakeAnalysisRequest.Create('flake', True, 123) step1 = BuildStep.Create('m', 'b1', 10, 's', datetime(2016, 10, 01)) step1.swarmed = True step1.supported = True request.build_steps = [step1] version, step = flake_analysis_service._CheckForNewAnalysis(request) self.assertEqual(0, version) self.assertIsNone(step)
def testObscureFlakeAnalysisRequest(self): mocked_utcnow = datetime(2017, 05, 05, 22, 50, 10) self.MockUTCNow(mocked_utcnow) valid_request_time = obscure_emails._TimeBeforeNow(days=5) invalid_request_time = obscure_emails._TimeBeforeNow( days=obscure_emails._REQUEST_RECORD_RENTENSION_DAYS + 10) old_request = FlakeAnalysisRequest.Create('flake1', False, 123) old_request.user_emails.append('*****@*****.**') old_request.user_emails_obscured = False old_request.user_emails_last_edit = invalid_request_time old_request.Save() recent_request = FlakeAnalysisRequest.Create('flake2', False, 321) recent_request.user_emails.append('*****@*****.**') recent_request.user_emails_obscured = False recent_request.user_emails_last_edit = valid_request_time recent_request.Save() response = self.test_app.get( '/obscure-emails', params={'format': 'json'}, headers={'X-AppEngine-Cron': 'true'}, ) expected_response = { 'failure_triage_count': 0, 'flake_triage_count': 0, 'flake_request_aggregated_count': 1, 'flake_request_count': 0, } self.assertEqual(expected_response, response.json_body) old_request = FlakeAnalysisRequest.GetVersion(key='flake1', version=1) self.assertTrue(old_request.user_emails_obscured) self.assertEqual(['*****@*****.**'], old_request.user_emails) recent_request = FlakeAnalysisRequest.GetVersion(key='flake2', version=1) self.assertFalse(recent_request.user_emails_obscured) self.assertEqual(['*****@*****.**'], recent_request.user_emails)
def testCheckFlakeSwarmedAndSupportedWhenNotSupported(self): request = FlakeAnalysisRequest.Create('flake', False, 123) step1 = BuildStep.Create('m', 'b1', 10, 's', datetime(2016, 10, 01)) step1.swarmed = False step1.supported = False step2 = BuildStep.Create('m', 'b2', 10, 's', datetime(2016, 10, 01)) step2.swarmed = False step2.supported = False request.build_steps = [step1, step2] self.assertEqual( (False, False, None), flake_analysis_service._CheckFlakeSwarmedAndSupported(request))
def testAuthorizedAccessButNoNewAnalysisNeeded(self, mock_mon, *_): request = FlakeAnalysisRequest.Create('flake', False, 123) step = BuildStep.Create('m', 'b2', 80, 's', datetime(2016, 10, 20)) request.build_steps = [step] self.assertFalse( flake_analysis_service.ScheduleAnalysisForFlake( request, '*****@*****.**', True, triggering_sources.FINDIT_UI)) mock_mon.assert_called_once_with( source='waterfall', operation='skip', trigger='auto', canonical_step_name='unknown', isolate_target_name='unknown')
def testAddBuildStep(self): t1 = datetime(2016, 10, 1, 0, 0, 0) t2 = datetime(2016, 10, 2, 0, 0, 0) t3 = datetime(2016, 10, 2, 1, 0, 0) t4 = datetime(2016, 10, 2, 0, 30, 0) request = FlakeAnalysisRequest.Create('flaky_test', False, 123) self.assertTrue(request.AddBuildStep('m', 'b1', 1, 's', t1)) self.assertTrue(request.AddBuildStep('m', 'b2', 10, 's', t2)) self.assertFalse(request.AddBuildStep('m', 'b2', 11, 's', t3)) self.assertTrue(request.AddBuildStep('m', 'b2', 9, 's', t4)) self.assertEqual(2, len(request.build_steps), request.build_steps) self.assertEqual(BuildStep.Create('m', 'b1', 1, 's', t1), request.build_steps[0]) self.assertEqual(BuildStep.Create('m', 'b2', 9, 's', t4), request.build_steps[1])
def _ObscureFlakeAnalysisRequest(): """Obscures the user emails in FlakeAnalysisRequest.""" count = 0 time_limit = _TimeBeforeNow(days=_REQUEST_RECORD_RENTENSION_DAYS) query = FlakeAnalysisRequest.query( FlakeAnalysisRequest.user_emails_obscured == False, FlakeAnalysisRequest.user_emails_last_edit < time_limit) more = True cursor = None while more: entities, cursor, more = query.fetch_page(_PAGE_SIZE, start_cursor=cursor) for entity in entities: entity.user_emails = email_util.ObscureEmails(entity.user_emails, ['google.com']) entity.user_emails_obscured = True ndb.put_multi(entities) count += len(entities) return count
def testGetNormalizedConfigurationNames(self): master_name = 'm' builder_name = 'b' build_number = 123 step_name = 's' test_name = 't' reported_time = datetime(2016, 11, 16) request = FlakeAnalysisRequest.Create(test_name, False, 123) build_step = BuildStep.Create(master_name, builder_name, build_number, step_name, reported_time) build_step.wf_master_name = master_name build_step.wf_builder_name = builder_name build_step.wf_build_number = build_number build_step.wf_step_name = step_name request.build_steps.append(build_step) self.assertEqual((None, None), request._GetNormalizedConfigurationNames('m2', 'b2')) self.assertEqual( (master_name, builder_name), request._GetNormalizedConfigurationNames(master_name, builder_name))
def testBailoutForAndroidAndFuchsia(self, *_): step1 = BuildStep.Create('m', 'Win10 Tests x64 (dbg)', 80, 's', datetime(2016, 10, 20)) step2 = BuildStep.Create('m', 'Fuchsia t', 80, 's', datetime(2016, 10, 20)) request = FlakeAnalysisRequest.Create('flake', False, 123) request.build_steps = [step1, step2] def FindMatchingWaterfallStep(step, _): step.wf_master_name = step.master_name step.wf_builder_name = step.builder_name step.wf_build_number = 100 step.wf_step_name = 'step' with mock.patch.object( flake_analysis_service.step_mapper, 'FindMatchingWaterfallStep', side_effect=FindMatchingWaterfallStep): self.assertIsNone( flake_analysis_service.ScheduleAnalysisForFlake( request, '*****@*****.**', True, triggering_sources.FINDIT_API))
def _HandleRerunAnalysis(self): """Rerun an analysis as a response to a user request.""" # If the key has been specified, we can derive the above information # from the analysis itself. if not auth_util.IsCurrentUserAdmin(): return self.CreateError('Only admin is allowed to rerun.', 403) key = self.request.get('key') if not key: return self.CreateError('No key was provided.', 404) analysis = ndb.Key(urlsafe=key).get() if not analysis: return self.CreateError('Analysis of flake is not found.', 404) if not self._AnalysisCompleted(analysis): return self.CreateError( 'Cannot rerun analysis if one is currently running or pending.', 400) logging.info( 'Rerun button pushed, analysis will be reset and triggered.\n' 'Analysis key: %s', key) request = FlakeAnalysisRequest.Create(analysis.original_test_name, False, analysis.bug_id) request.AddBuildStep( analysis.original_master_name, analysis.original_builder_name, analysis.original_build_number, analysis.original_step_name, time_util.GetUTCNow()) analysis, _ = self._CreateAndScheduleFlakeAnalysis( request, analysis.master_name, analysis.builder_name, analysis.build_number, analysis.step_name, analysis.test_name, analysis.bug_id, True) return self.CreateRedirect( '/p/chromium/flake-portal/analysis/analyze?redirect=1&key=%s' % analysis.key.urlsafe())
def ScheduleAnalysisForFlake(request, user_email, is_admin, triggering_source, rerun=False): """Schedules an analysis on the flake in the given request if needed. Args: request (FlakeAnalysisRequest): The request to analyze a flake. user_email (str): The email of the requester. is_admin (bool): Whether the requester is an admin. triggering_source (int): Where the request is coming from, either Findit UI (check flake page), pipeline (from analysis) or Findit API. rerun (bool): This is a rerun, so force it to be run. Returns: True if an analysis was scheduled; False if a new analysis is not needed; None if the user has no permission to. """ # TODO(crbug.com/853325): Refactor rerun logic. assert len(request.build_steps), 'At least 1 build step is needed!' if not IsAuthorizedUser(user_email, is_admin): logging.info( 'Schedule failed because user is not authorized. user:%s, admin:%s', user_email, is_admin) return None request.bug_reported_by = triggering_source request.user_emails = [user_email] manually_triggered = user_email.endswith('@google.com') trigger_action = 'manual' if manually_triggered else 'auto' flake_source = 'cq' if request.on_cq else 'waterfall' build_steps = [] for build_step in request.build_steps: step_mapper.FindMatchingWaterfallStep(build_step, request.name) # crbug.com/844516: temporarily bail out for fuchsia and Win7. if build_step.has_matching_waterfall_step and ( build_step.wf_builder_name == 'Win10 Tests x64 (dbg)' or 'fuchsia' in build_step.wf_builder_name.lower()): continue build_steps.append(build_step) request.build_steps = build_steps if not request.build_steps: logging.info('Flake %s on android/fuchsia temporarily unsupported.', request.name) return None canonical_step_name = 'unknown' isolate_target_name = 'unknown' if request.build_steps[0].step_metadata: # Tries to use step_metadata from request.build_steps as default. canonical_step_name = request.build_steps[0].step_metadata.get( 'canonical_step_name') or canonical_step_name isolate_target_name = request.build_steps[0].step_metadata.get( 'isolate_target_name') or isolate_target_name version_number, build_step = _CheckForNewAnalysis(request, rerun) if build_step and build_step.step_metadata: # Uses the step_metadata from the step that the analysis will actually # run for. canonical_step_name = ( build_step.step_metadata.get('canonical_step_name') or canonical_step_name) isolate_target_name = ( build_step.step_metadata.get('isolate_target_name') or isolate_target_name) if version_number and build_step: # A new analysis is needed. logging.info('A new analysis is needed for: %s', build_step) normalized_test = TestInfo(build_step.wf_master_name, build_step.wf_builder_name, build_step.wf_build_number, build_step.wf_step_name, request.name) original_test = TestInfo(build_step.master_name, build_step.builder_name, build_step.build_number, build_step.step_name, request.name) analysis = initialize_flake_pipeline.ScheduleAnalysisIfNeeded( normalized_test, original_test, request.flake_key, bug_id=request.bug_id, allow_new_analysis=True, manually_triggered=manually_triggered, user_email=user_email, triggering_source=triggering_source, queue_name=constants.WATERFALL_ANALYSIS_QUEUE, force=rerun) if analysis: # TODO: put this in a transaction. request = FlakeAnalysisRequest.GetVersion(key=request.name, version=version_number) request.analyses.append(analysis.key) request.put() logging.info( 'A new analysis was triggered successfully with key: %s', analysis.key) monitoring.OnFlakeAnalysisTriggered( source=flake_source, operation='analyze', trigger=trigger_action, canonical_step_name=canonical_step_name, isolate_target_name=isolate_target_name) return True else: logging.error('But new analysis was not triggered!') monitoring.OnFlakeAnalysisTriggered( source=flake_source, operation='error', trigger=trigger_action, canonical_step_name=canonical_step_name, isolate_target_name=isolate_target_name) else: logging.info('No new analysis is needed: %s', request) monitoring.OnFlakeAnalysisTriggered( source=flake_source, operation='skip', trigger=trigger_action, canonical_step_name=canonical_step_name, isolate_target_name=isolate_target_name) return False
def HandlePost(self): # Information needed to execute this endpoint, will be populated # by the branches below. rerun = self.request.get('rerun', '0').strip() == '1' cancel = self.request.get('cancel', '0').strip() == '1' analyze_recent_commit = ( self.request.get('analyze_recent_commit', '0').strip() == '1') if rerun: # Rerun an analysis. return self._HandleRerunAnalysis() elif cancel: # Force an analysis to be cancelled. return self._HandleCancelAnalysis() elif analyze_recent_commit: return self._HandleAnalyzeRecentCommit() else: # Regular POST requests to start an analysis. # If the key hasn't been specified, then we get the information from # other URL parameters. build_url = self.request.get('url', '').strip() build_info = buildbot.ParseBuildUrl(build_url) if not build_info: return self.CreateError('Unknown build info!', 400) master_name, builder_name, build_number = build_info step_name = self.request.get('step_name', '').strip() test_name = self.request.get('test_name', '').strip() bug_id = self.request.get('bug_id', '').strip() error = self._ValidateInput(step_name, test_name, bug_id) if error: return error build_number = int(build_number) bug_id = int(bug_id) if bug_id else None request = FlakeAnalysisRequest.Create(test_name, False, bug_id) request.AddBuildStep(master_name, builder_name, build_number, step_name, time_util.GetUTCNow()) analysis, scheduled = self._CreateAndScheduleFlakeAnalysis( request, master_name, builder_name, build_number, step_name, test_name, bug_id, False) if not analysis: if scheduled is None: # User does not have permission to trigger, nor was any previous # analysis triggered to view. return { 'template': 'error.html', 'data': { 'error_message': ( 'No permission to schedule an analysis for flaky test. ' 'Please log in with your @google.com account first.'), }, 'return_code': 403, } # Check if a previous request has already covered this analysis so use # the results from that analysis. request = FlakeAnalysisRequest.GetVersion(key=test_name) if not (request and request.analyses): return { 'template': 'error.html', 'data': { 'error_message': ( 'Flake analysis is not supported for "%s/%s". Either ' 'the test type is not supported or the test is not ' 'swarmed yet.' % (step_name, test_name)), }, 'return_code': 400, } analysis = request.FindMatchingAnalysisForConfiguration( master_name, builder_name) if not analysis: logging.error('Flake analysis was deleted unexpectedly!') return { 'template': 'error.html', 'data': { 'error_message': 'Flake analysis was deleted unexpectedly!', }, 'return_code': 404, } logging.info('Analysis: %s has a scheduled status of: %r', analysis.key, scheduled) return self.CreateRedirect( '/p/chromium/flake-portal/analysis/analyze?redirect=1&key=%s' % analysis.key.urlsafe())
def testWaterfallFlake(self): request = FlakeAnalysisRequest.Create('flaky_test', False, 123) request.AddBuildStep('chromium.linux', 'b1', 1, 's', datetime(2016, 11, 14)) self.assertFalse(request.on_cq)
def testUpdateNoChanges(self): request = FlakeAnalysisRequest.Create('test', False, 123) request.Update(name='test') self.assertEqual('test', request.name)
def testAsyncProcessFlakeReportOnStaging(self, _): analysis_request = FlakeAnalysisRequest.Create('t', False, 12345) apis.AsyncProcessFlakeReport(analysis_request, '*****@*****.**', False) self.assertEqual(0, len(self.taskqueue_requests))