def _CreateTestKeys(self): desc = self._descriptor.Clone() self._statistic_columns = [ col for col in self._columns if col in descriptor.STATISTICS ] if desc.statistic and desc.statistic not in self._statistic_columns: self._statistic_columns.append(desc.statistic) desc.statistic = None unsuffixed_test_paths = desc.ToTestPathsSync() self._unsuffixed_test_metadata_keys = [ utils.TestMetadataKey(path) for path in unsuffixed_test_paths ] test_paths = [] for statistic in self._statistic_columns: desc.statistic = statistic test_paths.extend(desc.ToTestPathsSync()) test_metadata_keys = [ utils.TestMetadataKey(path) for path in test_paths ] test_metadata_keys.extend(self._unsuffixed_test_metadata_keys) test_paths.extend(unsuffixed_test_paths) test_old_keys = [utils.OldStyleTestKey(path) for path in test_paths] self._test_keys = test_old_keys + test_metadata_keys
def _GetInternalOnly(template): futures = [] for table_row in template['rows']: for desc in report_query.TableRowDescriptors(table_row): for test_path in desc.ToTestPathsSync(): futures.append(utils.TestMetadataKey(test_path).get_async()) desc.statistic = 'avg' for test_path in desc.ToTestPathsSync(): futures.append(utils.TestMetadataKey(test_path).get_async()) ndb.Future.wait_all(futures) tests = [future.get_result() for future in futures] return any(test.internal_only for test in tests if test)
def GetAlertsForTestAsync(cls, test_key, limit=None): result = yield cls.query( cls.test.IN([ utils.TestMetadataKey(test_key), utils.OldStyleTestKey(test_key) ])).fetch_async(limit=limit) raise ndb.Return(result)
def CreateTableConfig(name, bots, tests, layout, username, override): """Performs checks to verify bots and layout are valid, sets internal_only. Args: name: User friendly name for the report. bots: List of master/bot pairs. tests: List of test paths. layout: JSON serializable layout for the table. username: Email address of user creating the report. Returns: The created table_config and a success. If nothing is created (because of bad inputs), returns None, error message. """ internal_only = False valid_bots = [] try: for bot in bots: if '/' in bot: bot_name = bot.split('/')[1] master_name = bot.split('/')[0] entity_key = ndb.Key('Master', master_name, 'Bot', bot_name) entity = entity_key.get() if entity: valid_bots.append(entity_key) if entity.internal_only: internal_only = True else: raise BadRequestError('Invalid Master/Bot: %s' % bot) else: raise BadRequestError('Invalid Master/Bot: %s' % bot) table_check = ndb.Key('TableConfig', name).get() if table_check and not override: raise BadRequestError('%s already exists.' % name) for bot in bots: for test in tests: test_key = utils.TestMetadataKey(bot + '/' + test) if not test_key.get(): raise BadRequestError('%s is not a valid test.' % test) except BadRequestError as error: raise BadRequestError(error.message) try: json.loads(layout) # TODO(jessimb): Verify that the layout matches what is expected except ValueError: raise BadRequestError('Invalid JSON for table layout') # Input validates, create table now. table_config = TableConfig(id=name, bots=valid_bots, tests=tests, table_layout=layout, internal_only=internal_only, username=username) table_config.put() return table_config
def testTestMetadataKey_Test(self): key = utils.TestMetadataKey( ndb.Key('Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric')) self.assertEqual('TestMetadata', key.kind()) self.assertEqual('m/b/suite/metric', key.id()) self.assertEqual(('TestMetadata', 'm/b/suite/metric'), key.flat())
def _GetStatistic(self, datum, desc, rev, stat): desc = desc.Clone() desc.statistic = stat test_paths = yield desc.ToTestPathsAsync() logging.info('_GetStatistic %r', test_paths) suffixed_tests = yield [ utils.TestMetadataKey(test_path).get_async() for test_path in test_paths ] suffixed_tests = [t for t in suffixed_tests if t] if not suffixed_tests: raise ndb.Return(None) last_data_row = None for test in suffixed_tests: if stat == 'avg': datum['units'] = test.units datum['improvement_direction'] = test.improvement_direction test_path = utils.TestPath(test.key) data_row = yield self._GetDataRow(test_path, rev) if not last_data_row or data_row.revision > last_data_row.revision: last_data_row = data_row if not last_data_row: raise ndb.Return(None) datum['revision'] = last_data_row.revision raise ndb.Return(last_data_row.value)
def _AddMockDataForTestingUnits(self, with_units, with_test=True): """Adds a sample anomaly without units. Args: with_units: Boolean specifying if the anomaly.test should have units. """ testing_common.AddTests(['ChromiumPerf'], ['mac'], _TESTS) test_row = utils.TestMetadataKey( 'ChromiumPerf/mac/suite/graph_a/trace_a').get() # Test row must have units. if with_units: test_row.units = 'ms' test_row.put() if not with_test: test_row.key = None anomaly_row = anomaly.Anomaly( start_revision=12345, end_revision=12355, test=test_row.key, ).put() return anomaly_row
def testTestKeys(self): self._CreateAnomaly() test_path = 'adept/android/lodging/assessment/story' self._CreateAnomaly(test=test_path) anomalies, _, _ = anomaly.Anomaly.QueryAsync( test_keys=[utils.TestMetadataKey(test_path)]).get_result() self.assertEqual(1, len(anomalies)) self.assertEqual(test_path, anomalies[0].test.id())
def GetTestMetadataKey(self): """Get the key for the TestMetadata entity of this alert. We are in the process of converting from Test entities to TestMetadata. Until this is done, it's possible that an alert may store either Test or TestMetadata in the 'test' KeyProperty. This gets the TestMetadata key regardless of what's stored. """ return utils.TestMetadataKey(self.test)
def _FetchHistogram(self, test, revision): query = histogram.Histogram.query( histogram.Histogram.test == utils.TestMetadataKey(test), histogram.Histogram.revision == revision) hist = yield query.get_async() if hist is None: return if hist.internal_only: self._private = True self._Datum(hist.revision)['histogram'] = hist.data
def TestKeysForReportTemplate(template_id): template = ndb.Key('ReportTemplate', int(template_id)).get() if not template: return for table_row in template.template['rows']: for desc in report_query.TableRowDescriptors(table_row): for test_path in desc.ToTestPathsSync(): yield utils.TestMetadataKey(test_path) yield utils.OldStyleTestKey(test_path)
def _GetDataRow(self, test_path, rev): entities = yield [ self._GetDataRowForKey(utils.TestMetadataKey(test_path), rev), self._GetDataRowForKey(utils.OldStyleTestKey(test_path), rev)] entities = [e for e in entities if e] if not entities: raise ndb.Return(None) if len(entities) > 1: logging.warn('Found too many Row entities: %r %r', rev, test_path) raise ndb.Return(None) raise ndb.Return(entities[0])
def _GetTestToUnitsMap(bots, tests): """Grabs the units on each test for only one bot.""" units_map = {} if bots: bot = bots[0] for test in tests: test_path = bot + '/' + test test_entity = utils.TestMetadataKey(test_path).get() if test_entity: units_map[test] = test_entity.units return units_map
def testGetAlertsForTest(self): old_style_key1 = utils.OldStyleTestKey('master/bot/test1/metric') new_style_key1 = utils.TestMetadataKey('master/bot/test1/metric') old_style_key2 = utils.OldStyleTestKey('master/bot/test2/metric') new_style_key2 = utils.TestMetadataKey('master/bot/test2/metric') anomaly.Anomaly(id="old_1", test=old_style_key1).put() anomaly.Anomaly(id="old_1a", test=old_style_key1).put() anomaly.Anomaly(id="old_2", test=old_style_key2).put() anomaly.Anomaly(id="new_1", test=new_style_key1).put() anomaly.Anomaly(id="new_2", test=new_style_key2).put() anomaly.Anomaly(id="new_2a", test=new_style_key2).put() key1_alerts = anomaly.Anomaly.GetAlertsForTest(new_style_key1) self.assertEqual(['new_1', 'old_1', 'old_1a'], [a.key.id() for a in key1_alerts]) key2_alerts = anomaly.Anomaly.GetAlertsForTest(old_style_key2) self.assertEqual(['new_2', 'new_2a', 'old_2'], [a.key.id() for a in key2_alerts]) key2_alerts_limit = anomaly.Anomaly.GetAlertsForTest(old_style_key2, limit=2) self.assertEqual(['new_2', 'new_2a'], [a.key.id() for a in key2_alerts_limit])
def UpdateParentAsync(self, **ctx_options): parent_test = yield utils.TestMetadataKey( self.key.parent().id()).get_async(**ctx_options) if not parent_test: parent_key = self.key.parent() logging.warning( 'Row put without valid TestMetadata. Parent key: %s', parent_key) return if not parent_test.has_rows: parent_test.has_rows = True yield parent_test.put_async(**ctx_options)
def testOnlyDeleteKind(self): anomaly.Anomaly( id='anomaly', test=utils.TestMetadataKey('master/bot/suite/measurement')).put() page_state.PageState(id='page_state').put() self.assertEqual( 1, len(ndb.Query(kind='PageState').fetch(keys_only=True))) self.assertEqual(1, len(ndb.Query(kind='Anomaly').fetch(keys_only=True))) delete_all_entities.DeleteAllEntities('PageState') self.assertEqual( 0, len(ndb.Query(kind='PageState').fetch(keys_only=True))) self.assertEqual(1, len(ndb.Query(kind='Anomaly').fetch(keys_only=True)))
def StoreUnitsInAnomalyEntity(entity): """Puts units field from the TestMetaData entity into the anomaly directly. We would like to store the units in the anomaly directly, for speedier lookup. Args: anomaly: The Anomaly entity to check. Yields: One datastore mutation operation. """ if entity.test: test_key = utils.TestMetadataKey(entity.test) test = test_key.get() if test: entity.units = test.units yield op.db.Put(entity)
def _pre_put_hook(self): """Sets the has_rows property of the parent test before putting this Row. This isn't atomic because the parent_test put() and Row put() don't happen in the same transaction. But in practice it shouldn't be an issue because the parent test will get more points as the test runs. """ parent_test = utils.TestMetadataKey(self.key.parent().id()).get() # If the TestMetadata pointed to by parent_test is not valid, that indicates # that a TestMetadata entity was not properly created in add_point. if not parent_test: parent_key = self.key.parent() logging.warning( 'Row put without valid TestMetadata. Parent key: %s', parent_key) return if not parent_test.has_rows: parent_test.has_rows = True parent_test.put()
def _GetRow(self, tri, table_row, desc): # First try to find the unsuffixed test. test_paths = yield desc.ToTestPathsAsync() logging.info('_GetRow %r', test_paths) unsuffixed_tests = yield [ utils.TestMetadataKey(test_path).get_async() for test_path in test_paths ] unsuffixed_tests = [t for t in unsuffixed_tests if t] if not unsuffixed_tests: # Fall back to suffixed tests. yield [ self._GetSuffixedCell(tri, table_row, desc, rev) for rev in self._revisions ] for test in unsuffixed_tests: test_path = utils.TestPath(test.key) yield [ self._GetUnsuffixedCell(tri, table_row, desc, test, test_path, rev) for rev in self._revisions ]
def _GetStatistic(self, datum, desc, rev, stat): desc = desc.Clone() desc.statistic = stat test_paths = yield desc.ToTestPathsAsync() suffixed_tests = yield [utils.TestMetadataKey(test_path).get_async() for test_path in test_paths] suffixed_tests = [t for t in suffixed_tests if t] if not suffixed_tests: raise ndb.Return(None) if len(suffixed_tests) > 1: logging.warn('Found too many suffixed tests: %r', test_paths) raise ValueError test = suffixed_tests[0] if stat == 'avg': datum['units'] = test.units datum['improvement_direction'] = test.improvement_direction test_path = utils.TestPath(test.key) data_row = yield self._GetDataRow(test_path, rev) if not data_row: raise ndb.Return(None) datum['revision'] = data_row.revision raise ndb.Return(data_row.value)
def _GetRow(self, tri, table_row, desc): # First try to find the unsuffixed test. unsuffixed_tests = yield [utils.TestMetadataKey(test_path).get_async() for test_path in (yield desc.ToTestPathsAsync())] unsuffixed_tests = [t for t in unsuffixed_tests if t] if len(unsuffixed_tests) > 1: logging.warn('Found too many unsuffixed tests: %r', [ utils.TestPath(t.key) for t in unsuffixed_tests]) raise ndb.Return() if unsuffixed_tests: test = unsuffixed_tests[0] test_path = utils.TestPath(test.key) yield [self._GetUnsuffixedCell(tri, table_row, desc, test, test_path, rev) for rev in self._revisions] raise ndb.Return() # Fall back to suffixed tests. yield [self._GetSuffixedCell(tri, table_row, desc, rev) for rev in self._revisions] raise ndb.Return()
def testTestMetadataKey_String(self): key = utils.TestMetadataKey('m/b/suite/metric/page') self.assertEqual('TestMetadata', key.kind()) self.assertEqual('m/b/suite/metric/page', key.id()) self.assertEqual(('TestMetadata', 'm/b/suite/metric/page'), key.flat())
def testTestMetadataKey_TestMetadata(self): original_key = ndb.Key('TestMetadata', 'm/b/suite/metric') key = utils.TestMetadataKey(original_key) self.assertEqual(original_key, key)
def QueryAsync(cls, bot_name=None, bug_id=None, count_limit=0, deadline_seconds=50, inequality_property=None, is_improvement=None, key=None, keys_only=False, limit=100, master_name=None, max_end_revision=None, max_start_revision=None, max_timestamp=None, min_end_revision=None, min_start_revision=None, min_timestamp=None, recovered=None, sheriff=None, start_cursor=None, test=None, test_suite_name=None): if key: # This tasklet isn't allowed to catch the internal_only AssertionError. alert = yield ndb.Key(urlsafe=key).get_async() raise ndb.Return(([alert], None, 1)) # post_filters can cause results to be empty, depending on the shape of the # data and which filters are applied in the query and which filters are # applied after the query. Automatically chase cursors until some results # are found, but stay under the request timeout. results = [] deadline = time.time() + deadline_seconds while not results and time.time() < deadline: query = cls.query() if sheriff is not None: sheriff_key = ndb.Key('Sheriff', sheriff) sheriff_entity = yield sheriff_key.get_async() if sheriff_entity: query = query.filter(cls.sheriff == sheriff_key) if is_improvement is not None: query = query.filter(cls.is_improvement == is_improvement) if bug_id is not None: if bug_id == '': bug_id = None else: bug_id = int(bug_id) query = query.filter(cls.bug_id == bug_id) if recovered is not None: query = query.filter(cls.recovered == recovered) if test: query = query.filter( cls.test.IN([ utils.OldStyleTestKey(test), utils.TestMetadataKey(test) ])) query = query.order(cls.key) if master_name: query = query.filter(cls.master_name == master_name) if bot_name: query = query.filter(cls.bot_name == bot_name) if test_suite_name: query = query.filter(cls.benchmark_name == test_suite_name) query, post_filters = cls._InequalityFilters( query, inequality_property, min_end_revision, max_end_revision, min_start_revision, max_start_revision, min_timestamp, max_timestamp) if post_filters: keys_only = False query = query.order(-cls.timestamp) futures = [ query.fetch_page_async(limit, start_cursor=start_cursor, keys_only=keys_only) ] if count_limit: futures.append(query.count_async(count_limit)) query_duration = timing.WallTimeLogger('query_duration') with query_duration: yield futures results, start_cursor, more = futures[0].get_result() if count_limit: count = futures[1].get_result() else: count = len(results) logging.info('query_results_count=%d', len(results)) if results: logging.info('duration_per_result=%f', query_duration.seconds / len(results)) if post_filters: results = [ alert for alert in results if all( post_filter(alert) for post_filter in post_filters) ] if not more: start_cursor = None if not start_cursor: break raise ndb.Return((results, start_cursor, count))
def testTestMetadataKey_None(self): key = utils.TestMetadataKey(None) self.assertIsNone(key)
def QueryAsync(cls, bot_name=None, bug_id=None, count_limit=0, deadline_seconds=50, inequality_property=None, is_improvement=None, key=None, keys_only=False, limit=100, master_name=None, max_end_revision=None, max_start_revision=None, max_timestamp=None, min_end_revision=None, min_start_revision=None, min_timestamp=None, recovered=None, subscriptions=None, start_cursor=None, test=None, test_keys=None, test_suite_name=None, project_id=None): if key: # This tasklet isn't allowed to catch the internal_only AssertionError. alert = yield ndb.Key(urlsafe=key).get_async() raise ndb.Return(([alert], None, 1)) # post_filters can cause results to be empty, depending on the shape of the # data and which filters are applied in the query and which filters are # applied after the query. Automatically chase cursors until some results # are found, but stay under the request timeout. results = [] deadline = time.time() + deadline_seconds while not results and time.time() < deadline: query = cls.query() equality_properties = [] if subscriptions: # Empty subscriptions is not allowed in query query = query.filter(cls.subscription_names.IN(subscriptions)) equality_properties.append('subscription_names') inequality_property = 'key' if is_improvement is not None: query = query.filter(cls.is_improvement == is_improvement) equality_properties.append('is_improvement') inequality_property = 'key' if bug_id is not None: if bug_id == '': query = query.filter(cls.bug_id == None) equality_properties.append('bug_id') inequality_property = 'key' elif bug_id != '*': query = query.filter(cls.bug_id == int(bug_id)) equality_properties.append('bug_id') inequality_property = 'key' # bug_id='*' translates to bug_id != None, which is handled with the # other inequality filters. if project_id is not None: query = query.filter(cls.project_id == project_id) equality_properties.append('project_id') inequality_property = 'key' if recovered is not None: query = query.filter(cls.recovered == recovered) equality_properties.append('recovered') inequality_property = 'key' if test or test_keys: if not test_keys: test_keys = [] if test: test_keys += [ utils.OldStyleTestKey(test), utils.TestMetadataKey(test) ] query = query.filter(cls.test.IN(test_keys)) query = query.order(cls.key) equality_properties.append('test') inequality_property = 'key' if master_name: query = query.filter(cls.master_name == master_name) equality_properties.append('master_name') inequality_property = 'key' if bot_name: query = query.filter(cls.bot_name == bot_name) equality_properties.append('bot_name') inequality_property = 'key' if test_suite_name: query = query.filter(cls.benchmark_name == test_suite_name) equality_properties.append('benchmark_name') inequality_property = 'key' query, post_filters = cls._InequalityFilters( query, equality_properties, inequality_property, bug_id, min_end_revision, max_end_revision, min_start_revision, max_start_revision, min_timestamp, max_timestamp) if post_filters: keys_only = False query = query.order(-cls.timestamp, cls.key) futures = [ query.fetch_page_async(limit, start_cursor=start_cursor, keys_only=keys_only) ] if count_limit: futures.append(query.count_async(count_limit)) query_duration = timing.WallTimeLogger('query_duration') with query_duration: yield futures results, start_cursor, more = futures[0].get_result() if count_limit: count = futures[1].get_result() else: count = len(results) logging.info('query_results_count=%d', len(results)) if results: logging.info('duration_per_result=%f', query_duration.seconds / len(results)) if post_filters: results = [ alert for alert in results if all( post_filter(alert) for post_filter in post_filters) ] if not more: start_cursor = None if not start_cursor: break raise ndb.Return((results, start_cursor, count))
def GetAlertsForTest(cls, test_key, limit=None): return cls.query( cls.test.IN([ utils.TestMetadataKey(test_key), utils.OldStyleTestKey(test_key) ])).fetch(limit=limit)