def _TriggerNewAnalysesOnDemand(builds): for build in builds: master_name = build['master_name'] builder_name = build['builder_name'] build_number = build['build_number'] failed_steps = build.get('failed_steps') # TODO(stgao): make alerts-dispatcher send information of whether a build # is completed. build_info = build_util.GetBuildInfo(master_name, builder_name, build_number) if not build_info: logging.error( 'Failed to retrieve build data for %s/%s/%s, steps=%s', master_name, builder_name, build_number, repr(failed_steps)) continue # Skip the build, wait for next request to recheck. build_completed = build_info.completed build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, failed_steps=failed_steps, build_completed=build_completed, force=False, queue_name=constants.WATERFALL_ANALYSIS_QUEUE)
def HandleGet(self): build_failures = _GetLatestBuildFailures(self.HTTP_CLIENT) for build_failure in build_failures: build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( build_failure['master_name'], build_failure['builder_name'], build_failure['build_number'], failed_steps=build_failure['failed_steps'], force=False, queue_name=_BUILD_FAILURE_ANALYSIS_TASKQUEUE)
def HandlePost(self): """Triggers an analysis on demand and redirects to the result page.""" url = self.request.get('url').strip() is_admin = auth_util.IsCurrentUserAdmin() user_email = auth_util.GetUserEmail() if not acl.CanTriggerNewAnalysis(user_email, is_admin): # No permission to schedule a new analysis. return self.CreateRedirect('/waterfall/failure?redirect=1&url=%s' % url) build_info = buildbot.ParseBuildUrl(url) if not build_info: return BaseHandler.CreateError( 'Url "%s" is not pointing to a build.' % url, 404) master_name, builder_name, build_number = build_info analysis = None if not (waterfall_config.MasterIsSupported(master_name) or auth_util.IsCurrentUserAdmin()): # If the build failure was already analyzed, just show it to the user. analysis = WfAnalysis.Get(master_name, builder_name, build_number) if not analysis: return BaseHandler.CreateError( 'Master "%s" is not supported yet.' % master_name, 501) if not analysis: # Only allow admin to force a re-run and set the build_completed. force = is_admin and self.request.get('force') == '1' build = build_util.GetBuildInfo(master_name, builder_name, build_number) if not build: return BaseHandler.CreateError( 'Can\'t get information about build "%s/%s/%s".' % (master_name, builder_name, build_number), 501) if not build.completed and force: return BaseHandler.CreateError( 'Can\'t force a rerun for an incomplete build "%s/%s/%s".' % (master_name, builder_name, build_number), 501) build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, build_completed=build.completed, force=force, queue_name=constants.WATERFALL_ANALYSIS_QUEUE) return self.CreateRedirect('/waterfall/failure?redirect=1&url=%s' % url)
def testNotStartPipelineForAnalysisWithNoFailure(self, mocked_pipeline, _): master_name = 'm' builder_name = 'b' build_number = 124 build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, failed_steps=['a'], build_completed=False, force=False, queue_name=constants.DEFAULT_QUEUE) self.assertFalse(mocked_pipeline.called)
def HandleGet(self): """Triggers analysis of a build failure on demand and return current result. If the final analysis result is available, set cache-control to 1 day to avoid overload by unnecessary and frequent query from clients; otherwise set cache-control to 5 seconds to allow repeated query. Serve HTML page or JSON result as requested. """ url = self.request.get('url').strip() build_info = buildbot.ParseBuildUrl(url) if not build_info: return BaseHandler.CreateError( 'Url "%s" is not pointing to a build.' % url, 501) master_name, builder_name, build_number = build_info if not masters.MasterIsSupported(master_name): return BaseHandler.CreateError( 'Master "%s" is not supported yet.' % master_name, 501) force = self.request.get('force') == '1' analysis = build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, force=force, queue_name=BUILD_FAILURE_ANALYSIS_TASKQUEUE) data = { 'master_name': analysis.master_name, 'builder_name': analysis.builder_name, 'build_number': analysis.build_number, 'pipeline_status_path': analysis.pipeline_status_path, 'show_debug_info': self._ShowDebugInfo(), 'analysis_request_time': _FormatDatetime(analysis.request_time), 'analysis_start_time': _FormatDatetime(analysis.start_time), 'analysis_end_time': _FormatDatetime(analysis.end_time), 'analysis_duration': analysis.duration, 'analysis_update_time': _FormatDatetime(analysis.updated_time), 'analysis_completed': analysis.completed, 'analysis_failed': analysis.failed, 'analysis_result': analysis.result, 'analysis_correct': analysis.correct, 'triage_history': _GetTriageHistory(analysis), } return {'template': 'build_failure.html', 'data': data}
def testStartPipelineForNewAnalysis(self, mocked_pipeline): master_name = 'm' builder_name = 'b' build_number = 124 build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, failed_steps=['a'], build_completed=False, force=False, queue_name=constants.DEFAULT_QUEUE) analysis = WfAnalysis.Get(master_name, builder_name, build_number) self.assertIsNotNone(analysis) mocked_pipeline.assert_has_calls( [mock.call().start(queue_name=constants.DEFAULT_QUEUE)])
def testNotStartPipelineForRunningAnalysis(self, mocked_logging, _): master_name = 'm' builder_name = 'b' build_number = 123 not_passed_steps = ['a'] self._CreateAndSaveWfAnalysis(master_name, builder_name, build_number, not_passed_steps, analysis_status.RUNNING) build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, failed_steps=['a'], build_completed=True, force=False, queue_name=constants.DEFAULT_QUEUE) mocked_logging.assert_called_once_with( 'An analysis is not needed for build %s, %s, %s', 'm', 'b', 123)
def testNotStartPipelineForNewAnalysis(self, mocked_pipeline): master_name = 'm' builder_name = 'b' build_number = 123 not_passed_steps = ['a'] self._CreateAndSaveWfAnalysis(master_name, builder_name, build_number, not_passed_steps, analysis_status.RUNNING) build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, failed_steps=['a'], build_completed=True, force=False, queue_name=constants.DEFAULT_QUEUE) self.assertFalse(mocked_pipeline.called)
def testStartPipelineForNewAnalysis(self): master_name = 'm' builder_name = 'b' build_number = 124 self.mock( build_failure_analysis_pipelines.analyze_build_failure_pipeline, 'AnalyzeBuildFailurePipeline', _MockRootPipeline) _MockRootPipeline.STARTED = False build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, failed_steps=['a'], force=False, queue_name='default') analysis = WfAnalysis.Get(master_name, builder_name, build_number) self.assertTrue(_MockRootPipeline.STARTED) self.assertIsNotNone(analysis)
def testStartCompilePipelineForNewAnalysis(self, mock_info): master_name = 'm' builder_name = 'b' build_number = 124 failure_info = { 'failed': True, 'chromium_revision': 'rev', 'failure_type': failure_type.COMPILE } mock_info.return_value = failure_info, True compile_pipeline_input = ( build_failure_analysis_pipelines.AnalyzeCompileFailureInput( build_key=BuildKey( master_name=master_name, builder_name=builder_name, build_number=build_number), current_failure_info=CompileFailureInfo.FromSerializable( failure_info), build_completed=False, force=False)) self.MockGeneratorPipeline( build_failure_analysis_pipelines.AnalyzeCompileFailurePipeline, compile_pipeline_input, None) build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, failed_steps=['a'], build_completed=False, force=False, queue_name=constants.DEFAULT_QUEUE) analysis = WfAnalysis.Get(master_name, builder_name, build_number) self.assertIsNotNone(analysis)
def testNotStartPipelineForNewAnalysis(self): master_name = 'm' builder_name = 'b' build_number = 123 not_passed_steps = ['a'] self._CreateAndSaveWfAnalysis(master_name, builder_name, build_number, not_passed_steps, wf_analysis_status.ANALYZING) self.mock( build_failure_analysis_pipelines.analyze_build_failure_pipeline, 'AnalyzeBuildFailurePipeline', _MockRootPipeline) _MockRootPipeline.STARTED = False build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, failed_steps=['a'], force=False, queue_name='default') self.assertFalse(_MockRootPipeline.STARTED)
def HandleGet(self): """Triggers analysis of a build failure on demand and return current result. If the final analysis result is available, set cache-control to 1 day to avoid overload by unnecessary and frequent query from clients; otherwise set cache-control to 5 seconds to allow repeated query. Serve HTML page or JSON result as requested. """ url = self.request.get('url').strip() build_info = buildbot.ParseBuildUrl(url) if not build_info: return BaseHandler.CreateError( 'Url "%s" is not pointing to a build.' % url, 501) master_name, builder_name, build_number = build_info analysis = None if not (waterfall_config.MasterIsSupported(master_name) or users.is_current_user_admin()): # If the build failure was already analyzed, just show it to the user. analysis = WfAnalysis.Get(master_name, builder_name, build_number) if not analysis: return BaseHandler.CreateError( 'Master "%s" is not supported yet.' % master_name, 501) if not analysis: # Only allow admin to force a re-run and set the build_completed. force = (users.is_current_user_admin() and self.request.get('force') == '1') build = build_util.GetBuildInfo(master_name, builder_name, build_number) if not build: return BaseHandler.CreateError( 'Can\'t get information about build "%s/%s/%s".' % (master_name, builder_name, build_number), 501) build_completed = build.completed if not build_completed and force: return BaseHandler.CreateError( 'Can\'t rerun an incomplete build "%s/%s/%s".' % (master_name, builder_name, build_number), 501) analysis = build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, build_completed=build_completed, force=force, queue_name=constants.WATERFALL_ANALYSIS_QUEUE) data = self._PrepareCommonDataForFailure(analysis) data['suspected_cls'] = _GetAllSuspectedCLsAndCheckStatus( master_name, builder_name, build_number, analysis) # TODO(crbug.com/702444): Do not assume failure_type.INFRA implies a # compile failure. Either use a special template, or choose the appropriate # one based on the type of job (compile/test). if analysis.failure_type == failure_type.COMPILE or ( analysis.failure_type == failure_type.INFRA): self._PrepareDataForCompileFailure(analysis, data) return {'template': 'waterfall/compile_failure.html', 'data': data} else: self._PrepareDataForTestFailures(analysis, build_info, data, self._ShowDebugInfo()) return {'template': 'waterfall/test_failure.html', 'data': data}
def AnalyzeBuildFailures(self, request): """Returns analysis results for the given build failures in the request. Analysis of build failures will be triggered automatically on demand. Args: request (_BuildFailureCollection): A list of build failures. Returns: _BuildFailureAnalysisResultCollection A list of analysis results for the given build failures. """ results = [] logging.info('%d build failure(s).', len(request.builds)) for build in request.builds: master_name = buildbot.GetMasterNameFromUrl(build.master_url) if not (master_name and masters.MasterIsSupported(master_name)): continue # If the build failure was already analyzed and a new analysis is # scheduled to analyze new failed steps, the returned WfAnalysis will # still have the result from last completed analysis. analysis = build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, build.builder_name, build.build_number, failed_steps=build.failed_steps, force=False, queue_name=_BUILD_FAILURE_ANALYSIS_TASKQUEUE) if analysis.failed or not analysis.result: # Bail out if the analysis failed or there is no result yet. continue for failure in analysis.result['failures']: if not failure['suspected_cls']: continue suspected_cls = [] for suspected_cl in failure['suspected_cls']: suspected_cls.append( _SuspectedCL( repo_name=suspected_cl['repo_name'], revision=suspected_cl['revision'], commit_position=suspected_cl['commit_position'])) results.append( _BuildFailureAnalysisResult( master_url=build.master_url, builder_name=build.builder_name, build_number=build.build_number, step_name=failure['step_name'], is_sub_test=False, test_name=None, first_known_failed_build_number=failure[ 'first_failure'], suspected_cls=suspected_cls)) return _BuildFailureAnalysisResultCollection(results=results)