def _GetOccurrenceInformation(occurrence): """Gets information of one occurrence in a dict. Args: occurrence(FlakeOccurrence): one flake occurrence. Returns: (dict): Information of one occurrence in a dict. """ occurrence_dict = occurrence.to_dict() # JavaScript numbers are always stored as double precision floating point # numbers, where the number (the fraction) is stored in bits 0 to 51, the # exponent in bits 52 to 62, and the sign in bit 63. So integers are # accurate up to 15 digits. To keep the precision of build ids (int 64), # convert them to string before rendering HTML pages. occurrence_dict['build_id'] = str(occurrence.build_id) # Formats the times in string representations with UTC. occurrence_dict['time_happened'] = time_util.FormatDatetime( occurrence_dict['time_happened']) occurrence_dict['time_detected'] = time_util.FormatDatetime( occurrence_dict['time_detected']) occurrence_dict['flake_type'] = FLAKE_TYPE_DESCRIPTIONS.get( occurrence_dict['flake_type'], 'Unknown') return occurrence_dict
def _GetCQHiddenFlakeQueryStartTime(): """Gets the latest happen time of cq hidden flakes. Uses this time to decide if we should run the query for cq hidden flakes. And also uses this time to decides the start time of the query. Returns: (str): String representation of a datetime in the format %Y-%m-%d %H:%M:%S UTC. """ last_query_time_right_bourndary = time_util.GetUTCNow() - timedelta( hours=_CQ_HIDDEN_FLAKE_QUERY_HOUR_INTERVAL) hidden_flake_query_start_time = time_util.FormatDatetime( time_util.GetUTCNow() - timedelta(hours=_CQ_HIDDEN_FLAKE_QUERY_HOUR_INTERVAL + _ROUGH_MAX_BUILD_CYCLE_HOURS, minutes=_CQ_HIDDEN_FLAKE_QUERY_OVERLAP_MINUTES)) hidden_flake_query_end_time = time_util.FormatDatetime( time_util.GetUTCNow() - timedelta(hours=_CQ_HIDDEN_FLAKE_QUERY_HOUR_INTERVAL)) last_query_time = _GetLastCQHiddenFlakeQueryTime() if not last_query_time: # Only before the first time of running the query. return hidden_flake_query_start_time, hidden_flake_query_end_time return ((hidden_flake_query_start_time, hidden_flake_query_end_time) if last_query_time <= last_query_time_right_bourndary else (None, None))
def testGetTopCountResults(self): expected_result = { 'client': self.handler.client, 'crashes': [self.crashes[4], self.crashes[3]], 'end_date': time_util.FormatDatetime(self.default_end_date), 'regression_range_triage_status': '-1', 'suspected_cls_triage_status': '-1', 'found_suspects': '-1', 'has_regression_range': '-1', 'start_date': time_util.FormatDatetime(self.default_start_date), 'signature': '', 'top_cursor': '', 'bottom_cursor': 'bottom_cursor', } with mock.patch('gae_libs.dashboard_util.GetPagedResults' ) as mock_get_paged_results: mock_get_paged_results.return_value = [ self._GetAnalysisResult(self.keys[4]), self._GetAnalysisResult(self.keys[3]) ], '', 'bottom_cursor' response_json = self.test_app.get( '/mock-dashboard?n=2&format=json&start_date=%s&end_date=%s' % (self.default_start_date.strftime(dashboard_util.DATE_FORMAT), self.default_end_date.strftime(dashboard_util.DATE_FORMAT))) self.assertEqual(200, response_json.status_int) self.assertEqual(expected_result, response_json.json_body)
def testDisplayAllAnalysisResults(self): expected_result = { 'client': self.handler.client, 'crashes': [ self.crashes[4], self.crashes[3], self.crashes[2], self.crashes[1], self.crashes[0] ], 'end_date': time_util.FormatDatetime(self.default_end_date), 'regression_range_triage_status': '-1', 'suspected_cls_triage_status': '-1', 'found_suspects': '-1', 'has_regression_range': '-1', 'start_date': time_util.FormatDatetime(self.default_start_date), 'signature': '', 'top_cursor': '', 'bottom_cursor': '', } response_json = self.test_app.get( '/mock-dashboard?format=json&start_date=%s&end_date=%s' % (self.default_start_date.strftime(dashboard_util.DATE_FORMAT), self.default_end_date.strftime(dashboard_util.DATE_FORMAT))) self.assertEqual(200, response_json.status_int) self.assertDictEqual(expected_result, response_json.json_body)
def HandleGet(self): """Lists WfAnalysis entities detected to have been aborted.""" midnight_today = datetime.combine(time_util.GetUTCNow(), time.min) start = self.request.get('start_date') end = self.request.get('end_date') start_date, end_date = _GetStartEndDates(start, end, midnight_today) analyses = WfAnalysis.query( ndb.AND(WfAnalysis.build_start_time >= start_date, WfAnalysis.build_start_time < end_date, WfAnalysis.aborted == True)).order(-WfAnalysis.build_start_time).fetch(_COUNT) analyses_data = [] for analysis in analyses: analyses_data.append(_Serialize(analysis)) data = { 'start_date': time_util.FormatDatetime(start_date), 'end_date': time_util.FormatDatetime(end_date), 'analyses': analyses_data, } return {'template': 'pipeline_errors_dashboard.html', 'data': data}
def HandleGet(self): """Shows crash analysis results in an HTML page.""" start_date, end_date = dashboard_util.GetStartAndEndDates( self.request.get('start_date'), self.request.get('end_date')) query = self.Filter(self.crash_analysis_cls.query(), start_date, end_date) page_size = self.request.get('n') or _PAGE_SIZE # TODO(katesonia): Add pagination here. crash_list = query.order( -self.crash_analysis_cls.requested_time).fetch(int(page_size)) crashes = [] for crash in crash_list: display_data = { 'signature': crash.signature, 'version': crash.crashed_version, 'channel': crash.channel, 'platform': crash.platform, 'regression_range': ('' if not crash.has_regression_range else crash.result['regression_range']), 'suspected_cls': (crash.result.get('suspected_cls', []) if crash.result else []), 'suspected_project': (crash.result.get('suspected_project', '') if crash.result else ''), 'suspected_components': (crash.result.get( 'suspected_components', []) if crash.result else []), 'key': crash.key.urlsafe() } crashes.append(display_data) data = { 'start_date': time_util.FormatDatetime(start_date), 'end_date': time_util.FormatDatetime(end_date), 'found_suspects': self.request.get('found_suspects', '-1'), 'has_regression_range': self.request.get('has_regression_range', '-1'), 'suspected_cls_triage_status': self.request.get('suspected_cls_triage_status', '-1'), 'regression_range_triage_status': self.request.get('regression_range_triage_status', '-1'), 'client': self.client, 'crashes': crashes, 'signature': self.request.get('signature') } return {'template': 'dashboard.html', 'data': data}
def testFormatDatetime(self): self.assertIsNone(time_util.FormatDatetime(None)) self.assertEqual( time_util.FormatDatetime(datetime(2016, 1, 2, 1, 2, 3)), '2016-01-02 01:02:03 UTC') self.assertEqual( time_util.FormatDatetime(datetime(2016, 1, 2, 1, 2, 3), day_only=True), '2016-01-02')
def _FormatDisplayData(try_job_data): """Returns information of a WfTryJobData/FlakeTryJobData as a dict.""" display_data = try_job_data.to_dict() for attribute in ('created_time', 'start_time', 'end_time', 'request_time'): display_data[attribute] = time_util.FormatDatetime( display_data[attribute]) display_data['pending_time'] = ( _FormatDuration(try_job_data.request_time, try_job_data.start_time) if try_job_data.start_time else _FormatDuration( try_job_data.created_time, time_util.GetUTCNow())) display_data['last_buildbucket_response'] = json.dumps( _PrepareBuildbucketResponseForDisplay( display_data['last_buildbucket_response']), sort_keys=True) if isinstance(try_job_data, FlakeTryJobData): # Flake try job data does not include try_job_type. display_data['try_job_type'] = 'flake' display_data['analysis_key'] = (try_job_data.analysis_key.urlsafe() if try_job_data.analysis_key else None) # Do not include the try job key in the response. display_data.pop('try_job_key', None) return display_data
def CanAutoCreateRevert(culprit, parameters): """Checks if Findit can auto create a revert. Args: culprit (Basestring): Urlsafe key for the suspected cl. parameters (CulpritActionParameters): Parameters to run culprit action pipelines. Findit can auto create a revert if: 1. Auto create revert for test is turned on; 2. The number of reverts in past 24 hours is less than the daily limit; 3. The culprit is also being suspected by the heuristic analysis. """ heuristic_cls = parameters.heuristic_cls if culprit not in heuristic_cls: return False action_settings = waterfall_config.GetActionSettings() # Auto revert has been turned off. if not bool(action_settings.get('auto_create_revert')): return False auto_create_revert_daily_threshold_test = action_settings.get( 'auto_create_revert_daily_threshold_test', _DEFAULT_AUTO_CREATE_REVERT_DAILY_THRESHOLD_TEST) # Auto revert has exceeded daily limit. if _GetDailyNumberOfRevertedCulprits( auto_create_revert_daily_threshold_test ) >= auto_create_revert_daily_threshold_test: logging.info( 'Auto reverts for test culprits on %s has met daily limit.', time_util.FormatDatetime(time_util.GetUTCNow())) return False return True
def HandleGet(self): """Lists the flake analyses in which the culprit introduced flakiness.""" key = self.request.get('key', '') culprit = ndb.Key(urlsafe=key).get() if not culprit: return BaseHandler.CreateError('Culprit not found!', 404) data = { 'project_name': culprit.project_name, 'revision': culprit.revision, 'commit_position': culprit.commit_position, 'cr_notified': culprit.cr_notified, 'cr_notification_time': time_util.FormatDatetime(culprit.cr_notification_time), 'analyses': _GetFlakeAnalysesAsDicts(culprit), 'key': key, } return {'template': 'flake/flake-culprit.html', 'data': data}
def HandleGet(self): """Lists the build cycles in which the culprit caused failures.""" key = self.request.get('key', '') culprit = ndb.Key(urlsafe=key).get() if not culprit: # pragma: no cover return self.CreateError('Culprit not found', 404) data = { 'project_name': culprit.project_name, 'revision': culprit.revision, 'commit_position': culprit.commit_position, 'cr_notified': culprit.cr_notified, 'cr_notification_time': time_util.FormatDatetime(culprit.cr_notification_time), 'builds': _GetBuildInfoAsDict(culprit), 'key': key, } return {'template': 'waterfall/culprit.html', 'data': data}
def serialize(self): result = { 'patchset_id': self.patchset_id, 'reverting_user_email': self.reverting_user_email, # Time of reverting patch creation. 'timestamp': time_util.FormatDatetime(self.timestamp), } if self.reverting_cl: result['reverting_cl'] = self.reverting_cl.serialize() return result
def testFilterWithHasRegression(self): expected_result = { 'client': self.handler.client, 'crashes': [self.crashes[4], self.crashes[3], self.crashes[2]], 'end_date': time_util.FormatDatetime(self.default_end_date), 'regression_range_triage_status': '-1', 'suspected_cls_triage_status': '-1', 'found_suspects': '-1', 'has_regression_range': 'yes', 'start_date': time_util.FormatDatetime(self.default_start_date), 'signature': '' } response_json = self.test_app.get( '/mock-dashboard?has_regression_range=yes&format=json' '&start_date=%s&end_date=%s' % (self.default_start_date.strftime(dashboard_util.DATE_FORMAT), self.default_end_date.strftime(dashboard_util.DATE_FORMAT))) self.assertEqual(200, response_json.status_int) self.assertEqual(expected_result, response_json.json_body)
def HandleGet(self): """Shows crash analysis results in an HTML page.""" start_date, end_date = dashboard_util.GetStartAndEndDates( self.request.get('start_date'), self.request.get('end_date')) query = self.Filter(start_date, end_date) try: page_size = int(self.request.get('n')) except (ValueError, TypeError): page_size = _PAGE_SIZE crash_analyses, top_cusor, bottom_cursor = dashboard_util.GetPagedResults( query, [(self.crash_analysis_cls.requested_time, dashboard_util.DESC)], cursor=self.request.get('cursor'), direction=self.request.get('direction', 'next'), page_size=page_size) # TODO(katesonia): An optimization is to index analysis.status. crash_analyses = [analysis for analysis in crash_analyses if analysis.completed] data = { 'start_date': time_util.FormatDatetime(start_date), 'end_date': time_util.FormatDatetime(end_date), 'found_suspects': self.request.get('found_suspects', '-1'), 'has_regression_range': self.request.get('has_regression_range', '-1'), 'suspected_cls_triage_status': self.request.get( 'suspected_cls_triage_status', '-1'), 'regression_range_triage_status': self.request.get( 'regression_range_triage_status', '-1'), 'client': self.client, 'crashes': self.CrashDataToDisplay(crash_analyses), 'signature': self.request.get('signature'), 'top_cursor': top_cusor, 'bottom_cursor': bottom_cursor, } return { 'template': self.template, 'data': data }
def testSearchSignature(self): """Tests search by signature in dashboard.""" expected_result = { 'client': self.handler.client, 'crashes': [self.crashes[4]], 'end_date': time_util.FormatDatetime(self.default_end_date), 'regression_range_triage_status': '-1', 'suspected_cls_triage_status': '-1', 'found_suspects': '-1', 'has_regression_range': '-1', 'start_date': time_util.FormatDatetime(self.default_start_date), 'signature': self.crashes[4]['signature'] } response_json = self.test_app.get( '/mock-dashboard?format=json&start_date=%s&end_date=%s&signature=%s' % (self.default_start_date.strftime(dashboard_util.DATE_FORMAT), self.default_end_date.strftime( dashboard_util.DATE_FORMAT), self.crashes[4]['signature'])) self.assertEqual(200, response_json.status_int) self.assertEqual(expected_result, response_json.json_body)
def _Serialize(analysis): return { 'master_name': analysis.master_name, 'builder_name': analysis.builder_name, 'build_number': analysis.build_number, 'analysis_type': failure_type.GetDescriptionForFailureType(analysis.failure_type), 'build_start_time': time_util.FormatDatetime(analysis.build_start_time), }
def _GetRevertCLData(start_date, end_date): data = { 'start_date': time_util.FormatDatetime(start_date), 'end_date': time_util.FormatDatetime(end_date), 'processed': [], 'undetermined': [] } query = WfSuspectedCL.query(WfSuspectedCL.identified_time >= start_date, WfSuspectedCL.identified_time < end_date) more = True cursor = None all_suspected_cls = [] while more: suspected_cls, cursor, more = query.fetch_page(_DEFAULT_PAGE_SIZE, start_cursor=cursor) all_suspected_cls.extend(suspected_cls) for suspected_cl in all_suspected_cls: processed, review_url, outcome = _CheckRevertStatusOfSuspectedCL( suspected_cl) result = { 'cr_notification_time': time_util.FormatDatetime(suspected_cl.cr_notification_time or suspected_cl.updated_time), 'outcome': revert_cl_status.STATUS_TO_DESCRIPTION.get(outcome), 'url': review_url, } if processed: data['processed'].append(result) elif processed is None: # pragma: no branch data['undetermined'].append(result) return data
def HandleGet(self): # pragma: no cover """Shows the metrics of revert CLs created.""" start = self.request.get('start_date') end = self.request.get('end_date') if not start and not end: # Default to 1 week of data, starting from 1 day before the most previous # midnight. previous_utc_midnight = time_util.GetMostRecentUTCMidnight() start_date = previous_utc_midnight - timedelta(days=8) end_date = previous_utc_midnight - timedelta(days=1) else: start_date, end_date = time_util.GetStartEndDates(start, end) suspected_cls = _GetAnalysesWithinDateRange(start_date, end_date) data = _GenerateFinditMetrics(suspected_cls) data['tree_closures'] = _GenerateTreeClosureMetrics('chromium', 'compile', start_date, end_date) data['start_date'] = time_util.FormatDatetime(start_date) data['end_date'] = time_util.FormatDatetime(end_date) return {'template': 'auto_revert_metrics.html', 'data': data}
def testFilterWithSuspectsTriagedUnsure(self): expected_result = { 'client': self.handler.client, 'crashes': [self.crashes[4]], 'end_date': time_util.FormatDatetime(self.default_end_date), 'regression_range_triage_status': '-1', 'suspected_cls_triage_status': str(triage_status.TRIAGED_UNSURE), 'found_suspects': '-1', 'has_regression_range': '-1', 'start_date': time_util.FormatDatetime(self.default_start_date), 'signature': '', 'top_cursor': '', 'bottom_cursor': '', } response_json = self.test_app.get( '/mock-dashboard?suspected_cls_triage_status=%d&format=json' '&start_date=%s&end_date=%s' % (triage_status.TRIAGED_UNSURE, self.default_start_date.strftime(dashboard_util.DATE_FORMAT), self.default_end_date.strftime(dashboard_util.DATE_FORMAT))) self.assertEqual(200, response_json.status_int) self.assertEqual(expected_result, response_json.json_body)
def _GenerateComponentReportJson(component_reports): """Generates component report in json format. Args: component_reports ([ComponentFlakinessReport]) """ component_reports_json = [] for report in component_reports: report_data = report.ToSerializable() report_data['report_time'] = time_util.FormatDatetime( report.report_time, day_only=True) component_reports_json.append(report_data) return component_reports_json
def _GenerateDisplayData(self, analysis): if analysis.stack_trace: raw_stacktrace = analysis.stack_trace else: stack_strs = [] for stack in analysis.stacktrace.stacks if analysis.stacktrace else []: stack_strs.append('\n'.join( [str(frame) for frame in stack.frames])) raw_stacktrace = '\n'.join(stack_strs) return { 'client': self.handler.client, 'crash_url': '', 'signature': analysis.signature, 'version': analysis.crashed_version, 'channel': analysis.channel, 'platform': analysis.platform, 'regression_range': analysis.result.get('regression_range'), 'culprit_regression_range': analysis.culprit_regression_range, 'historical_metadata': analysis.historical_metadata, 'stack_trace': raw_stacktrace, 'suspected_cls': analysis.result.get('suspected_cls'), 'culprit_cls': analysis.culprit_cls, 'suspected_project': analysis.result.get('suspected_project'), 'culprit_project': analysis.culprit_project, 'suspected_components': analysis.result.get('suspected_components'), 'culprit_components': analysis.culprit_components, 'request_time': time_util.FormatDatetime(analysis.requested_time), 'analysis_completed': analysis.completed, 'analysis_failed': analysis.failed, 'log': analysis.result.get('log'), 'triage_history': result_feedback._GetTriageHistory(analysis), 'analysis_correct': { 'regression_range': analysis.regression_range_triage_status, 'suspected_cls': analysis.suspected_cls_triage_status, 'suspected_project': analysis.suspected_project_triage_status, 'suspected_components': analysis.suspected_components_triage_status, }, 'note': analysis.note, 'key': analysis.key.urlsafe(), }
def _GetTriageHistory(analysis): if (not users.is_current_user_admin() or not analysis.completed or not analysis.triage_history): return None triage_history = [] for triage_record in analysis.triage_history: triage_history.append({ 'triage_time': time_util.FormatDatetime( datetime.utcfromtimestamp(triage_record['triage_timestamp'])), 'result_property': triage_record['result_property'], 'user_name': triage_record['user_name'], 'triage_status': triage_record['triage_status'] }) return triage_history
def GetTriageHistory(self): # Gets the triage history of a triaged model as a list of dicts. triage_history = [] for triage_record in self.triage_history: triage_history.append({ 'triaged_time': time_util.FormatDatetime(triage_record.triaged_time), 'user_name': triage_record.user_name, 'suspect_info': triage_record.suspect_info, 'triage_result': (triage_status.TRIAGE_STATUS_TO_DESCRIPTION.get( triage_record.triage_result)), 'findit_version': triage_record.findit_version, 'version_number': triage_record.version_number }) return triage_history
def CanAutoCommitRevertByFindit(): """Checks if the revert can be auto committed by Findit. The revert can be committed if: 1. Auto revert and Auto commit is turned on; 2. The number of commits of reverts in past 24 hours is less than the daily limit; 3. Culprit author has not landed another change yet. """ action_settings = waterfall_config.GetActionSettings() if (not bool(action_settings.get('auto_commit_revert')) or not bool(action_settings.get('auto_create_revert'))): return False auto_commit_revert_daily_threshold_test = action_settings.get( 'auto_commit_revert_daily_threshold_test', _DEFAULT_AUTO_COMMIT_REVERT_DAILY_THRESHOLD_TEST) if _GetDailyNumberOfCommits(auto_commit_revert_daily_threshold_test ) >= auto_commit_revert_daily_threshold_test: logging.info('Auto commits on %s has met daily limit.', time_util.FormatDatetime(time_util.GetUTCNow())) return False return True
def CanAutoCreateRevert(): """Checks if Findit can auto create a revert. Findit can auto create a revert if both of below are True: 1. Auto create revert for compile is turned on; 2. The number of reverts in past 24 hours is less than the daily limit. """ action_settings = waterfall_config.GetActionSettings() # Auto revert has been turned off. if not bool(action_settings.get('auto_create_revert')): return False auto_create_revert_daily_threshold_compile = action_settings.get( 'auto_create_revert_daily_threshold_compile', _DEFAULT_AUTO_CREATE_REVERT_DAILY_THRESHOLD_COMPILE) # Auto revert has exceeded daily limit. if _GetDailyNumberOfRevertedCulprits( auto_create_revert_daily_threshold_compile ) >= auto_create_revert_daily_threshold_compile: logging.info('Auto reverts for compile culprits on %s has met daily limit.', time_util.FormatDatetime(time_util.GetUTCNow())) return False return True
def HandleGet(self): status_code = int( self.request.get('result_status', result_status.UNSPECIFIED)) step_name = self.request.get('step_name').strip() test_name = self.request.get('test_name').strip() triage = self.request.get('triage') == '1' # Only allow querying by start/end dates for admins during triage to avoid # overcomplicating the UI for other users. start_date, end_date = self._GetStartAndEndDates(triage) master_flake_analysis_query = _GetFlakeAnalysisFilterQuery( MasterFlakeAnalysis.query(), step_name, test_name, start_date, end_date, status_code) # If filters by step_name and/or test_name, don't do paging. if step_name or test_name: analyses = master_flake_analysis_query.order( -MasterFlakeAnalysis.request_time).fetch() prev_cursor = '' cursor = '' else: analyses, prev_cursor, cursor = dashboard_util.GetPagedResults( master_flake_analysis_query, MasterFlakeAnalysis.request_time, self.request.get('cursor'), self.request.get('direction').strip(), page_size=PAGE_SIZE) data = { 'master_flake_analyses': [], 'result_status_filter': status_code, 'step_name_filter': step_name, 'test_name_filter': test_name, 'prev_cursor': prev_cursor, 'cursor': cursor, } if triage: # pragma: no cover data['triage'] = triage data['start_date'] = start_date data['end_date'] = end_date for master_flake_analysis in analyses: data['master_flake_analyses'].append({ 'build_analysis_status': master_flake_analysis.status_description, 'build_number': master_flake_analysis.build_number, 'builder_name': master_flake_analysis.builder_name, 'confidence_in_suspected_build': (master_flake_analysis.confidence_in_suspected_build), 'culprit': (master_flake_analysis.culprit.ToDict() if master_flake_analysis.culprit else {}), 'key': master_flake_analysis.key.urlsafe(), 'master_name': master_flake_analysis.master_name, 'request_time': time_util.FormatDatetime(master_flake_analysis.request_time), 'result_status': result_status.RESULT_STATUS_TO_DESCRIPTION.get( master_flake_analysis.result_status), 'step_name': master_flake_analysis.step_name, 'suspected_build': master_flake_analysis.suspected_flake_build_number, 'test_name': master_flake_analysis.test_name, 'try_job_status': analysis_status.STATUS_TO_DESCRIPTION.get( master_flake_analysis.try_job_status), }) return {'template': 'flake/dashboard.html', 'data': data}
def HandleGet(self): """Shows a list of Findit try job results and statuses in an HTML page.""" category = self.request.get('category') start = self.request.get('start_date') end = self.request.get('end_date') start_date, end_date = _GetStartEndDates(start, end) if category.lower() == 'flake': try_job_data_list = FlakeTryJobData.query( FlakeTryJobData.created_time >= start_date, FlakeTryJobData.created_time < end_date).fetch() elif category.lower() == 'waterfall': try_job_data_list = WfTryJobData.query( WfTryJobData.created_time >= start_date, WfTryJobData.created_time < end_date).fetch() else: wf_try_job_query = WfTryJobData.query( WfTryJobData.created_time >= start_date, WfTryJobData.created_time < end_date) flake_try_job_query = FlakeTryJobData.query( FlakeTryJobData.created_time >= start_date, FlakeTryJobData.created_time < end_date) try_job_data_list = wf_try_job_query.fetch( ) + flake_try_job_query.fetch() # Sort try job data list by most recent first. try_job_data_list.sort(key=lambda x: x.created_time, reverse=True) try_jobs_in_progress = [] try_jobs_with_error = [] successfully_completed_try_jobs = [] for try_job_data in try_job_data_list: display_data = _FormatDisplayData(try_job_data) if not try_job_data.end_time and not try_job_data.error: start_time = try_job_data.request_time or try_job_data.created_time now = time_util.GetUTCNow() display_data['elapsed_time'] = (_FormatDuration( start_time, now) if start_time else None) display_data['status'] = ('running' if try_job_data.start_time else 'pending') try_jobs_in_progress.append(display_data) elif try_job_data.error: display_data['error'] = try_job_data.error['message'] # It is possible end_time is not available if the error was timeout. display_data['execution_time'] = _FormatDuration( try_job_data.start_time, try_job_data.end_time) try_jobs_with_error.append(display_data) else: display_data['culprit_found'] = (bool( try_job_data.culprits) if isinstance( try_job_data, WfTryJobData) else 'N/A') display_data['execution_time'] = _FormatDuration( try_job_data.start_time, try_job_data.end_time) successfully_completed_try_jobs.append(display_data) data = { 'start_date': time_util.FormatDatetime(start_date), 'end_date': time_util.FormatDatetime(end_date), 'category': category, 'try_jobs_in_progress': try_jobs_in_progress, 'try_jobs_with_error': try_jobs_with_error, 'successfully_completed_try_jobs': successfully_completed_try_jobs } return {'template': 'try_job_dashboard.html', 'data': data}
def serialize(self): data = super(Commit, self).serialize() data['timestamp'] = time_util.FormatDatetime(self.timestamp) return data
def serialize(self): return { 'patchset_id': self.patchset_id, 'committing_user_email': self.committing_user_email, 'timestamp': time_util.FormatDatetime(self.last_cq_timestamp), }
def HandleGet(self): key = self.request.get('key') if not key: return self.CreateError('No key was provided.', 404) analysis = ndb.Key(urlsafe=key).get() if not analysis: return self.CreateError('Analysis of flake is not found.', 404) suspected_flake = _GetSuspectedFlakeInfo(analysis) culprit = _GetCulpritInfo(analysis) build_level_number, revision_level_number = _GetNumbersOfDataPointGroups( analysis.data_points) regression_range = analysis.GetLatestRegressionRange() culprit_confidence = culprit.get('confidence', 0) def AsPercentString(val): """0-1 as a percent, rounded and returned as a string""" return "{0:d}".format(int(round(val * 100.0))) if val else '' culprit_confidence = AsPercentString(culprit_confidence) status = analysis.status if analysis.heuristic_analysis_status == analysis_status.ERROR: status = analysis_status.ERROR # Just use utc now when request_time is missing, but don't save it. if not analysis.request_time: analysis.request_time = time_util.GetUTCNow() # Just use utc now when end_time is missing, but don't save it. if not analysis.end_time: analysis.end_time = time_util.GetUTCNow() analysis_complete = self._AnalysisCompleted(analysis) data = { 'key': analysis.key.urlsafe(), 'pass_rates': [], 'last_attempted_swarming_task': _GetLastAttemptedSwarmingTaskDetails(analysis), 'last_attempted_try_job': _GetLastAttemptedTryJobDetails(analysis), 'version_number': analysis.version_number, 'suspected_flake': suspected_flake, 'suspected_culprits': _GetSuspectsInfoForAnalysis(analysis), 'culprit': culprit, 'request_time': time_util.FormatDatetime(analysis.request_time), 'ended_days_ago': str(time_util.GetUTCNow() - analysis.end_time).split('.')[0], 'duration': str(analysis.end_time - analysis.request_time).split('.')[0], 'last_updated': str(time_util.GetUTCNow() - analysis.updated_time).split('.')[0], 'analysis_complete': analysis_complete, 'build_level_number': build_level_number, 'revision_level_number': revision_level_number, 'error': analysis.error_message, 'show_admin_options': self._ShowCustomRunOptions(analysis), 'show_debug_options': self._ShowDebugInfo(), 'pipeline_status_path': analysis.pipeline_status_path, # new ui stuff 'master_name': analysis.original_master_name or analysis.master_name, 'builder_name': analysis.original_builder_name or analysis.builder_name, 'build_number': analysis.original_build_number or analysis.build_number, 'step_name': analysis.original_step_name or analysis.step_name, 'test_name': analysis.original_test_name or analysis.test_name, 'regression_range_upper': (regression_range.upper.commit_position if regression_range.upper else None), 'regression_range_lower': (regression_range.lower.commit_position if regression_range.lower else None), 'culprit_url': culprit.get('url', ''), 'culprit_revision': (culprit.get('commit_position', 0) or culprit.get('git_hash', '')), 'culprit_confidence': culprit_confidence, 'bug_id': str(analysis.bug_id) if analysis.bug_id else '', 'status': analysis_status.STATUS_TO_DESCRIPTION.get(status).lower(), } if (auth_util.IsCurrentUserAdmin() and analysis.completed and analysis.triage_history): data['triage_history'] = analysis.GetTriageHistory() data['pending_time'] = time_util.FormatDuration( analysis.request_time, analysis.start_time or time_util.GetUTCNow()) data['duration'] = _GetDurationForAnalysis(analysis) data['pass_rates'] = _GetCoordinatesData(analysis) # Show the most up-to-date flakiness. data['most_recent_flakiness'] = _GetRecentFlakinessInfo(analysis) return {'template': 'flake/result.html', 'data': data}