def testPost(self): self._AddSampleData() # The cache starts out empty. self.assertIsNone(update_test_suites.FetchCachedTestSuites()) self.testapp.post('/update_test_suites') # After the request is made, it will no longer be empty. self.assertEqual( { 'dromaeo': { 'mas': { 'Chromium': { 'mac': False, 'win7': False } }, }, 'scrolling': { 'mas': { 'Chromium': { 'mac': False, 'win7': False } }, }, 'really': { 'mas': { 'Chromium': { 'mac': False, 'win7': False } }, }, }, update_test_suites.FetchCachedTestSuites())
def testPost_ForcesCacheUpdate(self): key = update_test_suites._NamespaceKey( update_test_suites._LIST_SUITES_CACHE_KEY) stored_object.Set(key, {'foo': 'bar'}) self.assertEqual( {'foo': 'bar'}, update_test_suites.FetchCachedTestSuites()) self._AddSampleData() # Because there is something cached, the cache is # not automatically updated when new data is added. self.assertEqual( {'foo': 'bar'}, update_test_suites.FetchCachedTestSuites()) # Making a request to /udate_test_suites forces an update. self.testapp.post('/update_test_suites') self.assertEqual( { 'dromaeo': { 'mas': {'Chromium': {'mac': False, 'win7': False}}, }, 'scrolling': { 'mas': {'Chromium': {'mac': False, 'win7': False}}, }, 'really': { 'mas': {'Chromium': {'mac': False, 'win7': False}}, }, }, update_test_suites.FetchCachedTestSuites())
def testFetchCachedTestSuites_Empty_UpdatesWhenFetching(self): # If the cache is not set at all, then FetchCachedTestSuites # just updates the cache before returning the list. self._AddSampleData() self.assertEqual( { 'dromaeo': { 'mas': { 'Chromium': { 'mac': False, 'win7': False } }, }, 'scrolling': { 'mas': { 'Chromium': { 'mac': False, 'win7': False } }, }, 'really': { 'mas': { 'Chromium': { 'mac': False, 'win7': False } }, }, }, update_test_suites.FetchCachedTestSuites())
def post(self): """Returns dynamic data for /group_report with some set of alerts. The set of alerts is determined by the sid, keys, bug ID, or revision given. Request parameters: keys: A comma-separated list of urlsafe Anomaly keys (optional). bug_id: A bug number on the Chromium issue tracker (optional). rev: A revision number (optional). sid: A hash of a group of keys from /short_uri (optional). Outputs: JSON for the /group_report page XHR request. """ bug_id = self.request.get('bug_id') rev = self.request.get('rev') keys = self.request.get('keys') hash_code = self.request.get('sid') # sid takes precedence. if hash_code: state = ndb.Key(page_state.PageState, hash_code).get() if state: keys = json.loads(state.value) elif keys: keys = keys.split(',') try: alert_list = None if bug_id: try: alert_list, _, _ = anomaly.Anomaly.QueryAsync( bug_id=bug_id, limit=_QUERY_LIMIT).get_result() except ValueError: raise request_handler.InvalidInputError( 'Invalid bug ID "%s".' % bug_id) elif keys: alert_list = GetAlertsForKeys(keys) elif rev: alert_list = GetAlertsAroundRevision(rev) else: raise request_handler.InvalidInputError( 'No anomalies specified.') alert_dicts = alerts.AnomalyDicts( [a for a in alert_list if a.key.kind() == 'Anomaly']) values = { 'alert_list': alert_dicts, 'test_suites': update_test_suites.FetchCachedTestSuites(), } if bug_id: values['bug_id'] = bug_id if keys: values['selected_keys'] = keys self.GetDynamicVariables(values) self.response.out.write(json.dumps(values)) except request_handler.InvalidInputError as error: self.response.out.write(json.dumps({'error': str(error)}))
def testFetchCachedTestSuites_NotEmpty(self): # If the cache is set, then whatever's there is returned. key = namespaced_stored_object.NamespaceKey( update_test_suites._LIST_SUITES_CACHE_KEY) stored_object.Set(key, {'foo': 'bar'}) self.assertEqual({'foo': 'bar'}, update_test_suites.FetchCachedTestSuites())
def _ShowAlerts(self, alert_list, bug_id=None): """Responds to an XHR from /group_report page with a JSON list of alerts. Args: alert_list: A list of Anomaly and/or StoppageAlert entities. bug_id: An integer bug ID. """ anomaly_dicts = alerts.AnomalyDicts( [a for a in alert_list if a.key.kind() == 'Anomaly']) stoppage_alert_dicts = alerts.StoppageAlertDicts( [a for a in alert_list if a.key.kind() == 'StoppageAlert']) alert_dicts = anomaly_dicts + stoppage_alert_dicts values = { 'alert_list': alert_dicts[:_DISPLAY_LIMIT], # This code for getting the subtests is supposed to be used to sort out # which metrics are "core" vs "non-core". But it's extremely slow, and # also doesn't seem to work very well. Turn it off for now: # https://github.com/catapult-project/catapult/issues/2877 #'subtests': _GetSubTestsForAlerts(alert_dicts), 'bug_id': bug_id, 'test_suites': update_test_suites.FetchCachedTestSuites(), } self.GetDynamicVariables(values) self.response.out.write(json.dumps(values))
def testPost_InternalOnly(self): self.SetCurrentUser('*****@*****.**') self._AddSampleData() master_key = ndb.Key('Master', 'Chromium') graph_data.Bot( id='internal_mac', parent=master_key, internal_only=True).put() graph_data.TestMetadata( id='Chromium/internal_mac/internal_test', internal_only=True).put() self.testapp.post('/update_test_suites?internal_only=true') self.assertEqual( { 'dromaeo': { 'mas': {'Chromium': {'mac': False, 'win7': False}}, }, 'internal_test': { 'mas': {'Chromium': {'internal_mac': False}}, }, 'scrolling': { 'mas': {'Chromium': {'mac': False, 'win7': False}}, }, 'really': { 'mas': {'Chromium': {'mac': False, 'win7': False}}, }, }, update_test_suites.FetchCachedTestSuites())
def _ShowAlerts(self, alert_list, bug_id=None): """Responds to an XHR from /group_report page with a JSON list of alerts. Args: alert_list: A list of Anomaly and/or StoppageAlert entities. bug_id: An integer bug ID. """ anomaly_dicts = alerts.AnomalyDicts( [a for a in alert_list if a.key.kind() == 'Anomaly']) stoppage_alert_dicts = alerts.StoppageAlertDicts( [a for a in alert_list if a.key.kind() == 'StoppageAlert']) alert_dicts = anomaly_dicts + stoppage_alert_dicts owner_info = None if bug_id and ndb.Key('Bug', bug_id).get(): owner_info = _GetOwnerInfo(alert_dicts) values = { 'alert_list': alert_dicts[:_DISPLAY_LIMIT], 'subtests': _GetSubTestsForAlerts(alert_dicts), 'bug_id': bug_id, 'owner_info': owner_info, 'test_suites': update_test_suites.FetchCachedTestSuites(), } self.GetDynamicVariables(values) self.response.out.write(json.dumps(values))
def _CreateHealthReport(self, name, num_days, master): if not name: name = _DefaultReportName() report = benchmark_health_data.BenchmarkHealthReport( id=name, num_days=int(num_days), master=master) report.put() # Currently there are two ways to list benchmarks: what the dashboard # knows of, and what is in the go/chrome-benchmarks spreadsheet. In the # short term, list benchmarks from both sources. benchmark_names = set() dashboard_benchmarks = update_test_suites.FetchCachedTestSuites() for benchmark in dashboard_benchmarks: benchmark_names.add(benchmark) benchmark_health_data.BenchmarkHealthData(parent=report.key, id=benchmark, name=benchmark).put() spreadsheet_benchmarks = google_sheets_service.GetRange( _BENCHMARK_SHEET_ID, _BENCHMARK_SHEET_NAME, _BENCHMARK_RANGE) if not spreadsheet_benchmarks: logging.error('Failed to load go/chrome-benchmarks') else: for row in spreadsheet_benchmarks: if len(row) == 0: continue benchmark = row[0] owner = None if len(row) == 2: owner = row[1] benchmark_names.add(benchmark) data = ndb.Key('BenchmarkHealthReport', name, 'BenchmarkHealthData', benchmark).get() if not data: benchmark_health_data.BenchmarkHealthData( parent=report.key, id=benchmark, name=benchmark, owner=owner).put() else: data.owner = owner data.put() report.expected_num_benchmarks = len(benchmark_names) report.put() for benchmark_name in benchmark_names: params = { 'benchmark': benchmark_name, 'report_name': name, 'num_days': num_days, 'master': master, } taskqueue.add(url='/generate_benchmark_health_report', params=params, queue_name=_TASK_QUEUE_NAME)
def get(self): """Renders the UI for selecting graphs.""" query_string = self._GetQueryStringForOldUri() if query_string: self.redirect('/report?' + query_string) return self.RenderHtml('report.html', { 'test_suites': json.dumps(update_test_suites.FetchCachedTestSuites()), })
def _GetResponseValuesForMaster(self, master): benchmarks = update_test_suites.FetchCachedTestSuites() benchmarks = [{ 'name': b, 'monitored': bool(benchmarks[b].get('mon')), 'bots': sorted([bot for bot in benchmarks[b]['mas'][master].keys()]), } for b in benchmarks if master in benchmarks[b]['mas']] return { 'benchmarks': sorted(benchmarks, key=operator.itemgetter('name')) }
def post(self): """Gets dynamic data for selecting graphs""" values = {} self.GetDynamicVariables(values) self.response.out.write(json.dumps({ 'is_internal_user': values['is_internal_user'], 'login_url': values['login_url'], 'revision_info': values['revision_info'], 'warning_bug': values['warning_bug'], 'warning_message': values['warning_message'], 'xsrf_token': values['xsrf_token'], 'test_suites': update_test_suites.FetchCachedTestSuites(), }))
def get(self): """Renders the UI for selecting graphs.""" query_string = self._GetQueryStringForOldUri() if query_string: self.redirect('/report?' + query_string) return dev_version = ('Development' in os.environ['SERVER_SOFTWARE'] or self.request.host == 'chrome-perf.googleplex.com') self.RenderHtml( 'report.html', { 'dev_version': dev_version, 'test_suites': json.dumps(update_test_suites.FetchCachedTestSuites()), })
def _GetResponseValuesForBenchmark(self, benchmark, num_days, master): values = {} # The cached test suite info contains info about monitoring and bots. query = anomaly.Anomaly.query( anomaly.Anomaly.benchmark_name == benchmark, anomaly.Anomaly.master_name == master, anomaly.Anomaly.is_improvement == False, anomaly.Anomaly.timestamp > datetime.datetime.now() - datetime.timedelta(days=num_days)) query = query.order(-anomaly.Anomaly.timestamp) anomalies = query.fetch() values['alerts'] = alerts.AnomalyDicts(anomalies) benchmarks = update_test_suites.FetchCachedTestSuites() if benchmarks[benchmark].get('mon'): values['monitored'] = True else: values['monitored'] = False values['bots'] = benchmarks[benchmark]['mas'][master].keys() return values
def _ShowAlerts(self, alert_list, selected_keys=None, bug_id=None): """Responds to an XHR from /group_report page with a JSON list of alerts. Args: alert_list: A list of Anomaly and/or StoppageAlert entities. bug_id: An integer bug ID. """ anomaly_dicts = alerts.AnomalyDicts( [a for a in alert_list if a.key.kind() == 'Anomaly']) stoppage_alert_dicts = alerts.StoppageAlertDicts( [a for a in alert_list if a.key.kind() == 'StoppageAlert']) alert_dicts = anomaly_dicts + stoppage_alert_dicts values = { 'alert_list': alert_dicts[:_DISPLAY_LIMIT], 'bug_id': bug_id, 'test_suites': update_test_suites.FetchCachedTestSuites(), 'selected_keys': selected_keys, } self.GetDynamicVariables(values) self.response.out.write(json.dumps(values))
def testFetchSuites_BasicDescription(self): self._AddSampleData() for test_path in ['Chromium/win7/scrolling', 'Chromium/mac/scrolling']: test = utils.TestKey(test_path).get() test.description = 'Description string.' test.put() self.assertEqual( { 'dromaeo': { 'mas': {'Chromium': {'mac': False, 'win7': False}}, }, 'scrolling': { 'mas': {'Chromium': {'mac': False, 'win7': False}}, 'des': 'Description string.' }, 'really': { 'mas': {'Chromium': {'mac': False, 'win7': False}}, }, }, update_test_suites.FetchCachedTestSuites())
def _GetResponseValuesForBenchmark(self, benchmark, num_days, master): values = {} # The cached test suite info contains info about monitoring and bots. benchmarks = update_test_suites.FetchCachedTestSuites() sheriff = self._GetSheriffForBenchmark(benchmark, master, benchmarks) if sheriff: query = anomaly.Anomaly.query(anomaly.Anomaly.sheriff == sheriff) query = query.filter(anomaly.Anomaly.is_improvement == False) query = query.filter( anomaly.Anomaly.timestamp > datetime.datetime.now() - datetime.timedelta(days=num_days)) query = query.order(-anomaly.Anomaly.timestamp) anomalies = query.fetch() anomalies = [a for a in anomalies if self._BenchmarkName(a) == benchmark] values['monitored'] = True values['alerts'] = alerts.AnomalyDicts(anomalies) else: values['monitored'] = False values['bots'] = benchmarks[benchmark]['mas'][master].keys() return values
def _ShowAlerts(self, alert_list, bug_id=None): """Renders the template group_report.html with a list of alerts. Args: alert_list: A list of Anomaly and/or StoppageAlert entities. bug_id: An integer bug ID. """ anomaly_dicts = alerts.AnomalyDicts( [a for a in alert_list if a.key.kind() == 'Anomaly']) stoppage_alert_dicts = alerts.StoppageAlertDicts( [a for a in alert_list if a.key.kind() == 'StoppageAlert']) alert_dicts = anomaly_dicts + stoppage_alert_dicts owner_info = None if bug_id and ndb.Key('Bug', bug_id).get(): owner_info = _GetOwnerInfo(alert_dicts) self.RenderHtml('group_report.html', { 'alert_list': json.dumps(alert_dicts[:_DISPLAY_LIMIT]), 'subtests': json.dumps(_GetSubTestsForAlerts(alert_dicts)), 'bug_id': bug_id, 'owner_info': json.dumps(owner_info), 'test_suites': json.dumps(update_test_suites.FetchCachedTestSuites()), })
def _GetResponseValuesForMaster(self, master): benchmarks = update_test_suites.FetchCachedTestSuites() benchmarks = [b for b in benchmarks if master in benchmarks[b]['mas']] return {'benchmarks': sorted(benchmarks)}
def _FillBenchmarkDetailsToHealthReport(self, benchmark_name, report_name, num_days, master): benchmark = ndb.Key('BenchmarkHealthReport', report_name, 'BenchmarkHealthData', benchmark_name).get() if not benchmark: return cached_data = update_test_suites.FetchCachedTestSuites().get( benchmark_name) if not cached_data: benchmark.is_complete = True benchmark.put() return bots = cached_data['mas'].get(master, {}).keys() monitored_paths = cached_data.get('mon', []) futures = set() for bot in bots: for path in monitored_paths: test_path = '%s/%s/%s/%s' % (master, bot, benchmark_name, path) query = graph_data.Row.query(graph_data.Row.parent_test == utils.OldStyleTestKey(test_path)) query = query.order(-graph_data.Row.revision) futures.add(query.get_async()) while futures: f = ndb.Future.wait_any(futures) futures.remove(f) row = f.get_result() if not row: continue bot = utils.TestPath(row.parent_test).split('/')[1] benchmark.bots.append( benchmark_health_data.BotHealthData(name=bot, last_update=row.timestamp)) bug_ids = set() query = anomaly.Anomaly.query( anomaly.Anomaly.benchmark_name == benchmark_name, anomaly.Anomaly.master_name == master, anomaly.Anomaly.is_improvement == False, anomaly.Anomaly.timestamp > datetime.datetime.now() - datetime.timedelta(days=int(num_days))) query = query.order(-anomaly.Anomaly.timestamp) anomalies = query.fetch() for alert in anomalies: bug_id = alert.bug_id if bug_id and bug_id > 0: bug_ids.add(bug_id) benchmark.alerts.append( benchmark_health_data.AlertHealthData( bug_id=bug_id, test_path=utils.TestPath(alert.GetTestMetadataKey()), percent_changed=alert.GetDisplayPercentChanged(), absolute_delta=alert.GetDisplayAbsoluteChanged())) for bug_id in bug_ids: details = bug_details.GetBugDetails(bug_id) benchmark.bugs.append( benchmark_health_data.BugHealthData( bug_id=bug_id, num_comments=len(details['comments']), published=details['published'], state=details['state'], status=details['status'], summary=details['summary'])) for review in details['review_urls']: benchmark.reviews.append( benchmark_health_data.ReviewData(review_url=review, bug_id=bug_id)) for bisect in details['bisects']: benchmark.bisects.append( benchmark_health_data.BisectHealthData( bug_id=bug_id, buildbucket_link=bisect['buildbucket_link'], metric=bisect['metric'], status=bisect['status'], bot=bisect['bot'])) benchmark.is_complete = True benchmark.put()
def post(self): """Returns dynamic data for /group_report with some set of alerts. The set of alerts is determined by the sid, keys, bug ID, or revision given. Request parameters: keys: A comma-separated list of urlsafe Anomaly keys (optional). bug_id: A bug number on the Chromium issue tracker (optional). rev: A revision number (optional). sid: A hash of a group of keys from /short_uri (optional). Outputs: JSON for the /group_report page XHR request. """ bug_id = self.request.get('bug_id') rev = self.request.get('rev') keys = self.request.get('keys') hash_code = self.request.get('sid') # sid takes precedence. if hash_code: state = ndb.Key(page_state.PageState, hash_code).get() if state: keys = json.loads(state.value) elif keys: keys = keys.split(',') try: alert_list = None if bug_id: alert_list, extra_columns = GetAlertsWithBugId(bug_id) elif keys: alert_list, extra_columns = GetAlertsForKeys(keys) elif rev: alert_list, extra_columns = GetAlertsAroundRevision(rev) else: # TODO(qyearsley): Instead of just showing an error here, show a form # where the user can input a bug ID or revision. raise request_handler.InvalidInputError( 'No anomalies specified.') anomaly_dicts = alerts.AnomalyDicts( [a for a in alert_list if a.key.kind() == 'Anomaly']) stoppage_alert_dicts = alerts.StoppageAlertDicts( [a for a in alert_list if a.key.kind() == 'StoppageAlert']) alert_dicts = anomaly_dicts + stoppage_alert_dicts values = { 'alert_list': alert_dicts[:_DISPLAY_LIMIT], 'extra_columns': extra_columns, 'test_suites': update_test_suites.FetchCachedTestSuites(), } if bug_id: values['bug_id'] = bug_id if keys: values['selected_keys'] = keys self.GetDynamicVariables(values) self.response.out.write(json.dumps(values)) except request_handler.InvalidInputError as error: self.response.out.write(json.dumps({'error': str(error)}))
def testFetchCachedTestSuites_Empty_ReturnsNone(self): # If the cache is not set, then FetchCachedTestSuites # just returns None; compiling the list of test suites would # take too long. self._AddSampleData() self.assertIsNone(update_test_suites.FetchCachedTestSuites())