def HandlePost(self): """Triggers an analysis on demand and redirects to the result page.""" url = self.request.get('url').strip() is_admin = auth_util.IsCurrentUserAdmin() user_email = auth_util.GetUserEmail() if not acl.CanTriggerNewAnalysis(user_email, is_admin): # No permission to schedule a new analysis. return self.CreateRedirect('/waterfall/failure?redirect=1&url=%s' % url) build_info = buildbot.ParseBuildUrl(url) if not build_info: return BaseHandler.CreateError( 'Url "%s" is not pointing to a build.' % url, 404) master_name, builder_name, build_number = build_info analysis = None if not (waterfall_config.MasterIsSupported(master_name) or auth_util.IsCurrentUserAdmin()): # If the build failure was already analyzed, just show it to the user. analysis = WfAnalysis.Get(master_name, builder_name, build_number) if not analysis: return BaseHandler.CreateError( 'Master "%s" is not supported yet.' % master_name, 501) if not analysis: # Only allow admin to force a re-run and set the build_completed. force = is_admin and self.request.get('force') == '1' build = build_util.GetBuildInfo(master_name, builder_name, build_number) if not build: return BaseHandler.CreateError( 'Can\'t get information about build "%s/%s/%s".' % (master_name, builder_name, build_number), 501) if not build.completed and force: return BaseHandler.CreateError( 'Can\'t force a rerun for an incomplete build "%s/%s/%s".' % (master_name, builder_name, build_number), 501) build_failure_analysis_pipelines.ScheduleAnalysisIfNeeded( master_name, builder_name, build_number, build_completed=build.completed, force=force, queue_name=constants.WATERFALL_ANALYSIS_QUEUE) return self.CreateRedirect('/waterfall/failure?redirect=1&url=%s' % url)
def AnalyzeFlake(self, request): """Analyze a flake on Commit Queue. Currently only supports flaky tests.""" user_email = auth_util.GetUserEmail() is_admin = auth_util.IsCurrentUserAdmin() if not flake_analysis_service.IsAuthorizedUser(user_email, is_admin): raise endpoints.UnauthorizedException( 'No permission to run a new analysis! User is %s' % user_email) def CreateFlakeAnalysisRequest(flake): analysis_request = FlakeAnalysisRequest.Create( flake.name, flake.is_step, flake.bug_id) for step in flake.build_steps: analysis_request.AddBuildStep(step.master_name, step.builder_name, step.build_number, step.step_name, time_util.GetUTCNow()) return analysis_request flake_analysis_request = CreateFlakeAnalysisRequest(request) logging.info('Flake report: %s', flake_analysis_request) try: _AsyncProcessFlakeReport(flake_analysis_request, user_email, is_admin) queued = True except Exception: # Ignore the report when fail to queue it for async processing. queued = False logging.exception('Failed to queue flake report for async processing') return _FlakeAnalysis(queued=queued)
def _HasPermission(self): if (self.request.headers.get('X-AppEngine-QueueName') or self.request.headers.get('X-AppEngine-Cron')): # Requests from task queues or cron jobs could access all HTTP endpoints. return True elif self.PERMISSION_LEVEL == Permission.ANYONE: return True elif self.PERMISSION_LEVEL == Permission.CORP_USER: # Only give access to google accounts or admins. return self.IsCorpUserOrAdmin() elif self.PERMISSION_LEVEL == Permission.ADMIN: return auth_util.IsCurrentUserAdmin() else: logging.error('Unknown permission level: %s' % self.PERMISSION_LEVEL) return False
def _HasPermission(self): if self.PERMISSION_LEVEL == Permission.ANYONE: # For public info, it is readable to the world. return True elif self.PERMISSION_LEVEL == Permission.CORP_USER: # Only give access to google accounts or admins. return self.IsCorpUserOrAdmin() elif self.PERMISSION_LEVEL == Permission.ADMIN: return auth_util.IsCurrentUserAdmin() elif self.PERMISSION_LEVEL == Permission.APP_SELF: # For internal endpoints for task queues and cron jobs, they are # accessible to the app itself only. return self.IsRequestFromAppSelf() else: logging.error('Unknown permission level: %s' % self.PERMISSION_LEVEL) return False
def _CreateAndScheduleFlakeAnalysis(request, master_name, builder_name, build_number, step_name, test_name, bug_id, rerun=False): # pylint: disable=unused-argument """Create and schedule a flake analysis. Args: request (FlakeAnalysisRequest): The requested step to analyze, containing all original fields used to create the request, such as the master, builder, etc. on which the the flaky test was originally detected. master_name (string): The name of the master with which to reference the analysis with. builder_name (string): The name of the builder with which to reference the analysis with. build_number (int): The build number with which to reference the analysis with. step_name (string): The name of the step with which to reference the analysis with. test_name (string): The name of the test with which to reference the analysis with. bug_id (int): The bug id. rerun (boolean): Is this analysis a rerun. Returns: (analysis, scheduled) analysis is the new analysis created. scheduled is returned from flake analysis service. """ user_email = auth_util.GetUserEmail() is_admin = auth_util.IsCurrentUserAdmin() scheduled = flake_analysis_service.ScheduleAnalysisForFlake( request, user_email, is_admin, triggering_sources.FINDIT_UI, rerun=rerun) analysis = MasterFlakeAnalysis.GetVersion( master_name, builder_name, build_number, step_name, test_name) return analysis, scheduled
def _PrepareCommonDataForFailure(self, analysis): return { 'master_name': analysis.master_name, 'builder_name': analysis.builder_name, 'build_number': analysis.build_number, 'pipeline_status_path': analysis.pipeline_status_path, 'show_debug_info': self._ShowDebugInfo(), 'analysis_request_time': _FormatDatetime(analysis.request_time), 'analysis_start_time': _FormatDatetime(analysis.start_time), 'analysis_end_time': _FormatDatetime(analysis.end_time), 'analysis_duration': analysis.duration, 'analysis_update_time': _FormatDatetime(analysis.updated_time), 'analysis_completed': analysis.completed, 'analysis_failed': analysis.failed, 'analysis_correct': analysis.correct, 'analysis_is_duplicate': analysis.is_duplicate, 'triage_history': _GetTriageHistory(analysis), 'show_admin_controls': auth_util.IsCurrentUserAdmin(), 'triage_reference_analysis_master_name': analysis.triage_reference_analysis_master_name, 'triage_reference_analysis_builder_name': analysis.triage_reference_analysis_builder_name, 'triage_reference_analysis_build_number': analysis.triage_reference_analysis_build_number }
def _HandleCancelAnalysis(self): """Cancel analysis as a response to a user request.""" if not auth_util.IsCurrentUserAdmin(): return self.CreateError('Only admin is allowed to cancel.', 403) key = self.request.get('key') if not key: return self.CreateError('No key was provided.', 404) analysis = ndb.Key(urlsafe=key).get() if not analysis: return self.CreateError('Analysis of flake is not found.', 404) if analysis.status != analysis_status.RUNNING: return self.CreateError('Can\'t cancel an analysis that\'s complete', 400) if not analysis.root_pipeline_id: return self.CreateError('No root pipeline found for analysis.', 404) root_pipeline = AnalyzeFlakePipeline.from_id(analysis.root_pipeline_id) if not root_pipeline: return self.CreateError('Root pipeline couldn\'t be found.', 404) # If we can find the pipeline, cancel it. root_pipeline.abort('Pipeline was cancelled manually.') error = { 'error': 'The pipeline was aborted manually.', 'message': 'The pipeline was aborted manually.' } analysis.Update( status=analysis_status.ERROR, error=error, end_time=time_util.GetUTCNow()) return self.CreateRedirect( '/p/chromium/flake-portal/analysis/analyze?redirect=1&key=%s' % analysis.key.urlsafe())
def _HandleRerunAnalysis(self): """Rerun an analysis as a response to a user request.""" # If the key has been specified, we can derive the above information # from the analysis itself. if not auth_util.IsCurrentUserAdmin(): return self.CreateError('Only admin is allowed to rerun.', 403) key = self.request.get('key') if not key: return self.CreateError('No key was provided.', 404) analysis = ndb.Key(urlsafe=key).get() if not analysis: return self.CreateError('Analysis of flake is not found.', 404) if not self._AnalysisCompleted(analysis): return self.CreateError( 'Cannot rerun analysis if one is currently running or pending.', 400) logging.info( 'Rerun button pushed, analysis will be reset and triggered.\n' 'Analysis key: %s', key) request = FlakeAnalysisRequest.Create(analysis.original_test_name, False, analysis.bug_id) request.AddBuildStep( analysis.original_master_name, analysis.original_builder_name, analysis.original_build_number, analysis.original_step_name, time_util.GetUTCNow()) analysis, _ = self._CreateAndScheduleFlakeAnalysis( request, analysis.master_name, analysis.builder_name, analysis.build_number, analysis.step_name, analysis.test_name, analysis.bug_id, True) return self.CreateRedirect( '/p/chromium/flake-portal/analysis/analyze?redirect=1&key=%s' % analysis.key.urlsafe())
def _GetTriageHistory(analysis): if (not auth_util.IsCurrentUserAdmin() or not analysis.completed or not analysis.triage_history): return None triage_history = [] for triage_record in analysis.triage_history: triage_history.append({ 'triage_time': _FormatDatetime( datetime.utcfromtimestamp(triage_record['triage_timestamp'])), 'user_name': triage_record['user_name'], 'triaged_cl': _GetCLDict(analysis, triage_record.get('triaged_cl')), 'result_status': (RESULT_STATUS_TO_DESCRIPTION.get( triage_record.get('result_status')) or CL_STATUS_TO_DESCRIPTION.get(triage_record.get('cl_status'))), 'version': triage_record.get('version'), }) return triage_history
def HandleGet(self): key = self.request.get('key') if not key: return self.CreateError('No key was provided.', 404) analysis = ndb.Key(urlsafe=key).get() if not analysis: return self.CreateError('Analysis of flake is not found.', 404) suspected_flake = _GetSuspectedFlakeInfo(analysis) culprit = _GetCulpritInfo(analysis) build_level_number, revision_level_number = _GetNumbersOfDataPointGroups( analysis.data_points) regression_range = analysis.GetLatestRegressionRange() culprit_confidence = culprit.get('confidence', 0) def AsPercentString(val): """0-1 as a percent, rounded and returned as a string""" return "{0:d}".format(int(round(val * 100.0))) if val else '' culprit_confidence = AsPercentString(culprit_confidence) status = analysis.status if analysis.heuristic_analysis_status == analysis_status.ERROR: status = analysis_status.ERROR # Just use utc now when request_time is missing, but don't save it. if not analysis.request_time: analysis.request_time = time_util.GetUTCNow() # Just use utc now when end_time is missing, but don't save it. if not analysis.end_time: analysis.end_time = time_util.GetUTCNow() analysis_complete = self._AnalysisCompleted(analysis) data = { 'key': analysis.key.urlsafe(), 'pass_rates': [], 'last_attempted_swarming_task': _GetLastAttemptedSwarmingTaskDetails(analysis), 'last_attempted_try_job': _GetLastAttemptedTryJobDetails(analysis), 'version_number': analysis.version_number, 'suspected_flake': suspected_flake, 'suspected_culprits': _GetSuspectsInfoForAnalysis(analysis), 'culprit': culprit, 'request_time': time_util.FormatDatetime(analysis.request_time), 'ended_days_ago': str(time_util.GetUTCNow() - analysis.end_time).split('.')[0], 'duration': str(analysis.end_time - analysis.request_time).split('.')[0], 'last_updated': str(time_util.GetUTCNow() - analysis.updated_time).split('.')[0], 'analysis_complete': analysis_complete, 'build_level_number': build_level_number, 'revision_level_number': revision_level_number, 'error': analysis.error_message, 'show_admin_options': self._ShowCustomRunOptions(analysis), 'show_debug_options': self._ShowDebugInfo(), 'pipeline_status_path': analysis.pipeline_status_path, # new ui stuff 'master_name': analysis.original_master_name or analysis.master_name, 'builder_name': analysis.original_builder_name or analysis.builder_name, 'build_number': analysis.original_build_number or analysis.build_number, 'step_name': analysis.original_step_name or analysis.step_name, 'test_name': analysis.original_test_name or analysis.test_name, 'regression_range_upper': (regression_range.upper.commit_position if regression_range.upper else None), 'regression_range_lower': (regression_range.lower.commit_position if regression_range.lower else None), 'culprit_url': culprit.get('url', ''), 'culprit_revision': (culprit.get('commit_position', 0) or culprit.get('git_hash', '')), 'culprit_confidence': culprit_confidence, 'bug_id': str(analysis.bug_id) if analysis.bug_id else '', 'status': analysis_status.STATUS_TO_DESCRIPTION.get(status).lower(), } if (auth_util.IsCurrentUserAdmin() and analysis.completed and analysis.triage_history): data['triage_history'] = analysis.GetTriageHistory() data['pending_time'] = time_util.FormatDuration( analysis.request_time, analysis.start_time or time_util.GetUTCNow()) data['duration'] = _GetDurationForAnalysis(analysis) data['pass_rates'] = _GetCoordinatesData(analysis) # Show the most up-to-date flakiness. data['most_recent_flakiness'] = _GetRecentFlakinessInfo(analysis) return {'template': 'flake/result.html', 'data': data}
def _ShowCustomRunOptions(self, analysis): # TODO(lijeffrey): Remove checks for admin and debug flag once analyze # manual input for a regression range is implemented. return (auth_util.IsCurrentUserAdmin() and self.request.get('debug') == '1' and analysis.status != analysis_status.RUNNING)
def IsCorpUserOrAdmin(self): """Returns True if the user logged in with corp account or as admin.""" user_email = auth_util.GetUserEmail() return ((user_email and user_email.endswith('@google.com')) or auth_util.IsCurrentUserAdmin())
def testAdminFromCookie(self, mocked_users): self.assertTrue(auth_util.IsCurrentUserAdmin()) mocked_users.assert_called_once_with()
def _ShowDebugInfo(self): # Show debug info only if the app is run locally during development, if the # currently logged-in user is an admin, or if it is explicitly requested # with parameter 'debug=1'. return auth_util.IsCurrentUserAdmin() or self.request.get( 'debug') == '1'
def HandleGet(self): key = self.request.get('key') if key: analysis = ndb.Key(urlsafe=key).get() if not analysis: # pragma: no cover return self.CreateError('Analysis of flake is not found', 404) else: build_url = self.request.get('url', '').strip() build_info = buildbot.ParseBuildUrl(build_url) if not build_info: # pragma: no cover return self.CreateError('Unknown build info!', 400) master_name, builder_name, build_number = build_info step_name = self.request.get('step_name', '').strip() test_name = self.request.get('test_name', '').strip() bug_id = self.request.get('bug_id', '').strip() # TODO(lijeffrey): Add support for force flag to trigger a rerun. error = self._ValidateInput(step_name, test_name, bug_id) if error: # pragma: no cover return error build_number = int(build_number) bug_id = int(bug_id) if bug_id else None user_email = auth_util.GetUserEmail() is_admin = auth_util.IsCurrentUserAdmin() request = FlakeAnalysisRequest.Create(test_name, False, bug_id) request.AddBuildStep(master_name, builder_name, build_number, step_name, time_util.GetUTCNow()) scheduled = flake_analysis_service.ScheduleAnalysisForFlake( request, user_email, is_admin, triggering_sources.FINDIT_UI) analysis = MasterFlakeAnalysis.GetVersion(master_name, builder_name, build_number, step_name, test_name) if not analysis: if scheduled is None: # User does not have permission to trigger, nor was any previous # analysis triggered to view. return { 'template': 'error.html', 'data': { 'error_message': ('You could schedule an analysis for flaky test only ' 'after you login with google.com account.'), 'login_url': self.GetLoginUrl(), }, 'return_code': 401, } # Check if a previous request has already covered this analysis so use # the results from that analysis. request = FlakeAnalysisRequest.GetVersion(key=test_name) if not (request and request.analyses): return { 'template': 'error.html', 'data': { 'error_message': ('Flake analysis is not supported for this request. Either' ' the build step may not be supported or the test is not ' 'swarmed.'), }, 'return_code': 400, } analysis = request.FindMatchingAnalysisForConfiguration( master_name, builder_name) if not analysis: # pragma: no cover logging.error('Flake analysis was deleted unexpectedly!') return { 'template': 'error.html', 'data': { 'error_message': 'Flake analysis was deleted unexpectedly!', }, 'return_code': 400 } suspected_flake = _GetSuspectedFlakeInfo(analysis) culprit = _GetCulpritInfo(analysis) build_level_number, revision_level_number = _GetNumbersOfDataPointGroups( analysis.data_points) data = { 'key': analysis.key.urlsafe(), 'master_name': analysis.master_name, 'builder_name': analysis.builder_name, 'build_number': analysis.build_number, 'step_name': analysis.step_name, 'test_name': analysis.test_name, 'pass_rates': [], 'analysis_status': analysis.status_description, 'try_job_status': analysis_status.STATUS_TO_DESCRIPTION.get(analysis.try_job_status), 'last_attempted_swarming_task': _GetLastAttemptedSwarmingTaskDetails(analysis), 'last_attempted_try_job': _GetLastAttemptedTryJobDetails(analysis), 'version_number': analysis.version_number, 'suspected_flake': suspected_flake, 'culprit': culprit, 'request_time': time_util.FormatDatetime(analysis.request_time), 'build_level_number': build_level_number, 'revision_level_number': revision_level_number, 'error': analysis.error_message, 'iterations_to_rerun': analysis.iterations_to_rerun, 'show_input_ui': self._ShowInputUI(analysis) } if (users.is_current_user_admin() and analysis.completed and analysis.triage_history): data['triage_history'] = analysis.GetTriageHistory() data['pending_time'] = time_util.FormatDuration( analysis.request_time, analysis.start_time or time_util.GetUTCNow()) if analysis.status != analysis_status.PENDING: data['duration'] = time_util.FormatDuration( analysis.start_time, analysis.end_time or time_util.GetUTCNow()) data['pass_rates'] = _GetCoordinatesData(analysis) return {'template': 'flake/result.html', 'data': data}