def HandleGet(self): key = self.request.get('key') if not key: return self.CreateError('No key was provided.', 404) analysis = ndb.Key(urlsafe=key).get() if not analysis: return self.CreateError('Analysis of flake is not found.', 404) suspected_flake = _GetSuspectedFlakeInfo(analysis) culprit = _GetCulpritInfo(analysis) build_level_number, revision_level_number = _GetNumbersOfDataPointGroups( analysis.data_points) regression_range = analysis.GetLatestRegressionRange() culprit_confidence = culprit.get('confidence', 0) def AsPercentString(val): """0-1 as a percent, rounded and returned as a string""" return "{0:d}".format(int(round(val * 100.0))) if val else '' culprit_confidence = AsPercentString(culprit_confidence) status = analysis.status if analysis.heuristic_analysis_status == analysis_status.ERROR: status = analysis_status.ERROR # Just use utc now when request_time is missing, but don't save it. if not analysis.request_time: analysis.request_time = time_util.GetUTCNow() # Just use utc now when end_time is missing, but don't save it. if not analysis.end_time: analysis.end_time = time_util.GetUTCNow() analysis_complete = self._AnalysisCompleted(analysis) data = { 'key': analysis.key.urlsafe(), 'pass_rates': [], 'last_attempted_swarming_task': _GetLastAttemptedSwarmingTaskDetails(analysis), 'last_attempted_try_job': _GetLastAttemptedTryJobDetails(analysis), 'version_number': analysis.version_number, 'suspected_flake': suspected_flake, 'suspected_culprits': _GetSuspectsInfoForAnalysis(analysis), 'culprit': culprit, 'request_time': time_util.FormatDatetime(analysis.request_time), 'ended_days_ago': str(time_util.GetUTCNow() - analysis.end_time).split('.')[0], 'duration': str(analysis.end_time - analysis.request_time).split('.')[0], 'last_updated': str(time_util.GetUTCNow() - analysis.updated_time).split('.')[0], 'analysis_complete': analysis_complete, 'build_level_number': build_level_number, 'revision_level_number': revision_level_number, 'error': analysis.error_message, 'show_admin_options': self._ShowCustomRunOptions(analysis), 'show_debug_options': self._ShowDebugInfo(), 'pipeline_status_path': analysis.pipeline_status_path, # new ui stuff 'master_name': analysis.original_master_name or analysis.master_name, 'builder_name': analysis.original_builder_name or analysis.builder_name, 'build_number': analysis.original_build_number or analysis.build_number, 'step_name': analysis.original_step_name or analysis.step_name, 'test_name': analysis.original_test_name or analysis.test_name, 'regression_range_upper': (regression_range.upper.commit_position if regression_range.upper else None), 'regression_range_lower': (regression_range.lower.commit_position if regression_range.lower else None), 'culprit_url': culprit.get('url', ''), 'culprit_revision': (culprit.get('commit_position', 0) or culprit.get('git_hash', '')), 'culprit_confidence': culprit_confidence, 'bug_id': str(analysis.bug_id) if analysis.bug_id else '', 'status': analysis_status.STATUS_TO_DESCRIPTION.get(status).lower(), } if (auth_util.IsCurrentUserAdmin() and analysis.completed and analysis.triage_history): data['triage_history'] = analysis.GetTriageHistory() data['pending_time'] = time_util.FormatDuration( analysis.request_time, analysis.start_time or time_util.GetUTCNow()) data['duration'] = _GetDurationForAnalysis(analysis) data['pass_rates'] = _GetCoordinatesData(analysis) # Show the most up-to-date flakiness. data['most_recent_flakiness'] = _GetRecentFlakinessInfo(analysis) return {'template': 'flake/result.html', 'data': data}
def testFormatDuration(self): date1 = datetime(2016, 5, 1, 1, 1, 1) date2 = datetime(2016, 5, 1, 1, 2, 1) self.assertIsNone(time_util.FormatDuration(None, date1)) self.assertIsNone(time_util.FormatDuration(date1, None)) self.assertEqual('00:01:00', time_util.FormatDuration(date1, date2))
def _GetDurationForAnalysis(analysis): """Returns the duration of the given analysis.""" if analysis.status == analysis_status.PENDING: return None return time_util.FormatDuration(analysis.start_time, analysis.end_time or time_util.GetUTCNow())
def HandleGet(self): key = self.request.get('key') if key: analysis = ndb.Key(urlsafe=key).get() if not analysis: # pragma: no cover return self.CreateError('Analysis of flake is not found', 404) else: build_url = self.request.get('url', '').strip() build_info = buildbot.ParseBuildUrl(build_url) if not build_info: # pragma: no cover return self.CreateError('Unknown build info!', 400) master_name, builder_name, build_number = build_info step_name = self.request.get('step_name', '').strip() test_name = self.request.get('test_name', '').strip() bug_id = self.request.get('bug_id', '').strip() # TODO(lijeffrey): Add support for force flag to trigger a rerun. error = self._ValidateInput(step_name, test_name, bug_id) if error: # pragma: no cover return error build_number = int(build_number) bug_id = int(bug_id) if bug_id else None user_email = auth_util.GetUserEmail() is_admin = auth_util.IsCurrentUserAdmin() request = FlakeAnalysisRequest.Create(test_name, False, bug_id) request.AddBuildStep(master_name, builder_name, build_number, step_name, time_util.GetUTCNow()) scheduled = flake_analysis_service.ScheduleAnalysisForFlake( request, user_email, is_admin, triggering_sources.FINDIT_UI) analysis = MasterFlakeAnalysis.GetVersion(master_name, builder_name, build_number, step_name, test_name) if not analysis: if scheduled is None: # User does not have permission to trigger, nor was any previous # analysis triggered to view. return { 'template': 'error.html', 'data': { 'error_message': ('You could schedule an analysis for flaky test only ' 'after you login with google.com account.'), 'login_url': self.GetLoginUrl(), }, 'return_code': 401, } # Check if a previous request has already covered this analysis so use # the results from that analysis. request = FlakeAnalysisRequest.GetVersion(key=test_name) if not (request and request.analyses): return { 'template': 'error.html', 'data': { 'error_message': ('Flake analysis is not supported for this request. Either' ' the build step may not be supported or the test is not ' 'swarmed.'), }, 'return_code': 400, } analysis = request.FindMatchingAnalysisForConfiguration( master_name, builder_name) if not analysis: # pragma: no cover logging.error('Flake analysis was deleted unexpectedly!') return { 'template': 'error.html', 'data': { 'error_message': 'Flake analysis was deleted unexpectedly!', }, 'return_code': 400 } suspected_flake = _GetSuspectedFlakeInfo(analysis) culprit = _GetCulpritInfo(analysis) build_level_number, revision_level_number = _GetNumbersOfDataPointGroups( analysis.data_points) data = { 'key': analysis.key.urlsafe(), 'master_name': analysis.master_name, 'builder_name': analysis.builder_name, 'build_number': analysis.build_number, 'step_name': analysis.step_name, 'test_name': analysis.test_name, 'pass_rates': [], 'analysis_status': analysis.status_description, 'try_job_status': analysis_status.STATUS_TO_DESCRIPTION.get(analysis.try_job_status), 'last_attempted_swarming_task': _GetLastAttemptedSwarmingTaskDetails(analysis), 'last_attempted_try_job': _GetLastAttemptedTryJobDetails(analysis), 'version_number': analysis.version_number, 'suspected_flake': suspected_flake, 'culprit': culprit, 'request_time': time_util.FormatDatetime(analysis.request_time), 'build_level_number': build_level_number, 'revision_level_number': revision_level_number, 'error': analysis.error_message, 'iterations_to_rerun': analysis.iterations_to_rerun, 'show_input_ui': self._ShowInputUI(analysis) } if (users.is_current_user_admin() and analysis.completed and analysis.triage_history): data['triage_history'] = analysis.GetTriageHistory() data['pending_time'] = time_util.FormatDuration( analysis.request_time, analysis.start_time or time_util.GetUTCNow()) if analysis.status != analysis_status.PENDING: data['duration'] = time_util.FormatDuration( analysis.start_time, analysis.end_time or time_util.GetUTCNow()) data['pass_rates'] = _GetCoordinatesData(analysis) return {'template': 'flake/result.html', 'data': data}