def HandleGet(self): """Responses the deployed version of this app.""" return { 'data': { 'version': appengine_util.GetCurrentVersion(), } }
def _ResetAnalysis(self, master_name, builder_name, build_number): analysis = WfAnalysis.Get(master_name, builder_name, build_number) analysis.pipeline_status_path = self.pipeline_status_path() analysis.status = analysis_status.RUNNING analysis.result_status = None analysis.start_time = time_util.GetUTCNow() analysis.version = appengine_util.GetCurrentVersion() analysis.end_time = None analysis.put()
def UpdateTriageResult(self, triage_result, suspect_info, user_name, version_number=None): result = TriageResult() result.user_name = user_name result.triage_result = triage_result result.findit_version = appengine_util.GetCurrentVersion() result.version_number = version_number result.suspect_info = suspect_info self.triage_history.append(result) self.triage_email_obscured = False self.triage_record_last_add = time_util.GetUTCNow()
def run(self, *_args, **_kwargs): """Call predator to do the analysis of the given crash. N.B., due to the structure of AppEngine pipelines, this method must accept the same arguments as are passed to ``__init__``; however, because they were already passed to ``__init__`` there's no use in recieving them here. Thus, we discard all the arguments to this method (except for ``self``, naturally). """ # TODO(wrengr): shouldn't this method somehow call _NeedsNewAnalysis # to guard against race conditions? analysis = self._findit.GetAnalysis(self._crash_identifiers) # Update the model's status to say we're in the process of doing analysis. analysis.pipeline_status_path = self.pipeline_status_path() analysis.status = analysis_status.RUNNING analysis.started_time = time_util.GetUTCNow() analysis.findit_version = appengine_util.GetCurrentVersion() analysis.put() # Actually do the analysis. culprit = self._findit.FindCulprit(analysis.ToCrashReport()) if culprit is not None: result, tags = culprit.ToDicts() else: result = {'found': False} tags = { 'found_suspects': False, 'found_project': False, 'found_components': False, 'has_regression_range': False, 'solution': None, } # Update model's status to say we're done, and save the results. analysis.completed_time = time_util.GetUTCNow() analysis.result = result for tag_name, tag_value in tags.iteritems(): # TODO(http://crbug.com/602702): make it possible to add arbitrary tags. # TODO(http://crbug.com/659346): we misplaced the coverage test; find it! if hasattr(analysis, tag_name): # pragma: no cover setattr(analysis, tag_name, tag_value) if hasattr(monitoring, tag_name): metric = getattr(monitoring, tag_name) metric.increment({tag_name: tag_value, 'client_id': self.client_id}) analysis.status = analysis_status.COMPLETED analysis.put()
def PopulateAnalysisInfo(analysis): analysis.Reset() analysis.request_time = time_util.GetUTCNow() analysis.status = analysis_status.PENDING analysis.version = appengine_util.GetCurrentVersion() analysis.triggering_user_email = user_email analysis.triggering_user_email_obscured = False analysis.triggering_source = triggering_source analysis.original_master_name = original_test.master_name analysis.original_builder_name = original_test.builder_name analysis.original_build_number = original_test.build_number analysis.original_step_name = original_test.step_name analysis.original_test_name = original_test.test_name analysis.bug_id = bug_id analysis.flake_key = flake_key
def RunImpl(self, pipeline_input): master_name, builder_name, build_number = ( pipeline_input.build_key.GetParts()) build_failure_analysis.ResetAnalysisForANewAnalysis( master_name, builder_name, build_number, build_completed=pipeline_input.build_completed, pipeline_status_path=self.pipeline_status_path, current_version=appengine_util.GetCurrentVersion()) # TODO(crbug/869684): Use a gauge metric to track intermittent statuses. # The yield statements below return PipelineFutures, which allow subsequent # pipelines to refer to previous output values. # https://github.com/GoogleCloudPlatform/appengine-pipelines/wiki/Python # Heuristic Approach. heuristic_params = CompileHeuristicAnalysisParameters( failure_info=pipeline_input.current_failure_info, build_completed=pipeline_input.build_completed) heuristic_result = yield HeuristicAnalysisForCompilePipeline( heuristic_params) # Try job approach. # Checks if first time failures happen and starts a try job if yes. with pipelines.pipeline.InOrder(): start_compile_try_job_input = self.CreateInputObjectInstance( StartCompileTryJobInput, build_key=BuildKey(master_name=master_name, builder_name=builder_name, build_number=build_number), heuristic_result=heuristic_result, build_completed=pipeline_input.build_completed, force=pipeline_input.force) yield StartCompileTryJobPipeline(start_compile_try_job_input) # Report event to BQ. report_event_input = self.CreateInputObjectInstance( report_event_pipeline.ReportEventInput, analysis_urlsafe_key=WfAnalysis.Get( master_name, builder_name, build_number).key.urlsafe()) if not pipeline_input.force: yield report_event_pipeline.ReportAnalysisEventPipeline( report_event_input)
def run(self, *_args, **_kwargs): """Call predator to do the analysis of the given crash. N.B., due to the structure of AppEngine pipelines, this method must accept the same arguments as are passed to ``__init__``; however, because they were already passed to ``__init__`` there's no use in recieving them here. Thus, we discard all the arguments to this method (except for ``self``, naturally). """ logging.info('Start analysis of crash_pipeline. %s', json.dumps(self._crash_identifiers)) # TODO(wrengr): shouldn't this method somehow call _NeedsNewAnalysis # to guard against race conditions? analysis = self._predator.GetAnalysis(self._crash_identifiers) # Update the model's status to say we're in the process of doing analysis. analysis.pipeline_status_path = self.pipeline_status_path() analysis.status = analysis_status.RUNNING analysis.started_time = time_util.GetUTCNow() analysis.predator_version = appengine_util.GetCurrentVersion() analysis.put() # Actually do the analysis. culprit = self._predator.FindCulprit(analysis) result, tags = culprit.ToDicts() analysis.status = (analysis_status.COMPLETED if tags['success'] else analysis_status.ERROR) analysis.completed_time = time_util.GetUTCNow() # Update model's status to say we're done, and save the results. analysis.result = result for tag_name, tag_value in tags.iteritems(): # TODO(http://crbug.com/602702): make it possible to add arbitrary tags. # TODO(http://crbug.com/659346): we misplaced the coverage test; # find it! if hasattr(analysis, tag_name): # pragma: no cover setattr(analysis, tag_name, tag_value) analysis.put() self._predator.UpdateMetrics(analysis) logging.info('Found %s analysis result for %s: \n%s', self.client_id, repr(self._crash_identifiers), json.dumps(analysis.result, indent=2, sort_keys=True))
def RunImpl(self, pipeline_input): master_name, builder_name, build_number = ( pipeline_input.build_key.GetParts()) build_failure_analysis.ResetAnalysisForANewAnalysis( master_name, builder_name, build_number, build_completed=pipeline_input.build_completed, pipeline_status_path=self.pipeline_status_path, current_version=appengine_util.GetCurrentVersion()) # TODO(crbug/869684): Use a gauge metric to track intermittent statuses. # The yield statements below return PipelineFutures, which allow subsequent # pipelines to refer to previous output values. # https://github.com/GoogleCloudPlatform/appengine-pipelines/wiki/Python # Heuristic Approach. heuristic_params = TestHeuristicAnalysisParameters( failure_info=pipeline_input.current_failure_info, build_completed=pipeline_input.build_completed) heuristic_result = yield HeuristicAnalysisForTestPipeline( heuristic_params) # Try job approach. with pipeline.InOrder(): run_tasks_inputs = self.CreateInputObjectInstance( RunSwarmingTasksInput, build_key=pipeline_input.build_key, heuristic_result=heuristic_result, force=pipeline_input.force) # Swarming rerun. # Triggers swarming tasks when first time test failure happens. # This pipeline will run before build completes. yield RunSwarmingTasksPipeline(run_tasks_inputs) collect_task_results_inputs = self.CreateInputObjectInstance( CollectSwarmingTaskResultsInputs, build_key=pipeline_input.build_key, build_completed=pipeline_input.build_completed) # An async pipeline that queries swarming tasks periodically until all # swarming tasks completes and return consistent failures. consistent_failures = yield CollectSwarmingTaskResultsPipeline( collect_task_results_inputs) start_waterfall_try_job_inputs = self.CreateInputObjectInstance( StartTestTryJobInputs, build_key=pipeline_input.build_key, build_completed=pipeline_input.build_completed, force=pipeline_input.force, heuristic_result=heuristic_result, consistent_failures=consistent_failures) yield StartTestTryJobPipeline(start_waterfall_try_job_inputs) if not pipeline_input.force: # Report event to BQ. report_event_input = self.CreateInputObjectInstance( report_event_pipeline.ReportEventInput, analysis_urlsafe_key=WfAnalysis.Get( master_name, builder_name, build_number).key.urlsafe()) yield report_event_pipeline.ReportAnalysisEventPipeline( report_event_input) # Trigger flake analysis on flaky tests, if any. yield TriggerFlakeAnalysesPipeline(pipeline_input.build_key)