def _AddInternalSampleData(self): """Adds some internal-only test data.""" master = graph_data.Master(id='XMaster').put() graph_data.Bot(id='x-bot', parent=master, internal_only=True).put() test = graph_data.TestMetadata(id='XMaster/x-bot/xtest', internal_only=True).put() test_container_key = utils.GetTestContainerKey(test) for i in range(50): graph_data.Row(parent=test_container_key, id=i + 1000, value=i + 1000, internal_only=True).put()
def _AddMockData(self): master = graph_data.Master(id='ChromiumPerf').put() bots = [] for name in ['win7', 'mac']: bot = graph_data.Bot(id=name, parent=master).put() bots.append(bot) test = graph_data.Test(id='dromaeo', parent=bot).put() dom_test = graph_data.Test(id='dom', parent=test, has_rows=True).put() test_container_key = utils.GetTestContainerKey(dom_test) for i in range(15000, 16000, 5): graph_data.Row(parent=test_container_key, id=i, value=float(i * 2.5), error=(i + 5)).put()
def _GetOrCreateBot(name, parent_key, internal_only): """Gets or creates a new Bot under the given Master.""" existing = graph_data.Bot.get_by_id(name, parent=parent_key) if existing: if existing.internal_only != internal_only: existing.internal_only = internal_only existing.put() return existing logging.info('Adding bot %s/%s', parent_key.id(), name) new_entity = graph_data.Bot( id=name, parent=parent_key, internal_only=internal_only) new_entity.put() return new_entity
def testPost_ValidData(self): self._AddInternalBotsToDataStore() _SAMPLE_TABLE_CONFIG['xsrf_token'] = xsrf.GenerateToken( users.get_current_user()) response = self.testapp.post('/create_health_report', _SAMPLE_TABLE_CONFIG) self.assertIn('my_sample_config', response) table_entity = ndb.Key('TableConfig', 'my_sample_config').get() self.assertTrue(table_entity.internal_only) self.assertEqual('*****@*****.**', table_entity.username) self.assertEqual( ['my_test_suite/my_test', 'my_test_suite/my_other_test'], table_entity.tests) master_key = ndb.Key('Master', 'ChromiumPerf') win_bot = graph_data.Bot( id='win', parent=master_key, internal_only=False).key linux_bot = graph_data.Bot( id='linux', parent=master_key, internal_only=False).key bots = [win_bot, linux_bot] self.assertEqual(bots, table_entity.bots) self.assertEqual( '{ "system_health.memory_mobile/foreground/ashmem":' '["Foreground", "Ashmem"]}', table_entity.table_layout)
def _AddMockData(self): """Adds mock data to the datastore, not updating stored_object.""" master_key = graph_data.Master(id='ChromiumPerf').put() for bot_name in ['win7', 'mac']: graph_data.Bot(id=bot_name, parent=master_key).put() graph_data.TestMetadata(id='ChromiumPerf/%s/dromaeo' % bot_name).put() subtest_key = graph_data.TestMetadata( id='ChromiumPerf/%s/dromaeo/dom' % bot_name, has_rows=True).put() test_container_key = utils.GetTestContainerKey(subtest_key) for rev in range(15000, 16000, 5): row = graph_data.Row(parent=test_container_key, id=rev, value=float(rev * 2.5)) row.timestamp = datetime.datetime(2013, 8, 1) row.put()
def testGetOrCreateAncestors_GetsExistingEntities(self): master_key = graph_data.Master(id='ChromiumPerf', parent=None).put() bot_key = graph_data.Bot(id='win7', parent=master_key).put() suite_key = graph_data.Test(id='dromaeo', parent=bot_key).put() subtest_key = graph_data.Test(id='dom', parent=suite_key).put() graph_data.Test(id='modify', parent=subtest_key).put() actual_parent = add_point_queue._GetOrCreateAncestors( 'ChromiumPerf', 'win7', 'dromaeo/dom/modify') self.assertEqual('modify', actual_parent.key.id()) # No extra Test or Bot objects should have been added to the database # beyond the four that were put in before the _GetOrCreateAncestors call. self.assertEqual(1, len(graph_data.Master.query().fetch())) self.assertEqual(1, len(graph_data.Bot.query().fetch())) self.assertEqual(3, len(graph_data.Test.query().fetch()))
def _AddSampleTestData(self): """Adds some sample data used in the tests below.""" master = graph_data.Master(id='TheMaster').put() graph_data.Bot(id='TheBot', parent=master).put() graph_data.TestMetadata(id='TheMaster/TheBot/Suite1').put() graph_data.TestMetadata(id='TheMaster/TheBot/Suite2').put() graph_data.TestMetadata(id='TheMaster/TheBot/Suite1/aaa', has_rows=True).put() graph_data.TestMetadata(id='TheMaster/TheBot/Suite1/bbb', has_rows=True).put() graph_data.TestMetadata(id='TheMaster/TheBot/Suite2/ccc', has_rows=True).put() graph_data.TestMetadata(id='TheMaster/TheBot/Suite2/ddd', has_rows=True).put()
def _PutEntitiesHalfInternal(self): """Puts entities (half internal-only) and returns the keys.""" master = graph_data.Master(id='M').put() graph_data.Bot(parent=master, id='b').put() keys = [ graph_data.TestMetadata(id='M/b/ax', internal_only=True).put(), graph_data.TestMetadata(id='M/b/a', internal_only=False).put(), graph_data.TestMetadata(id='M/b/b', internal_only=False).put(), graph_data.TestMetadata(id='M/b/bx', internal_only=True).put(), graph_data.TestMetadata(id='M/b/c', internal_only=False).put(), graph_data.TestMetadata(id='M/b/cx', internal_only=True).put(), graph_data.TestMetadata(id='M/b/d', internal_only=False).put(), graph_data.TestMetadata(id='M/b/dx', internal_only=True).put(), ] return keys
def testGet_WithMaxTestsParam(self): master = graph_data.Master(id='XMaster').put() graph_data.Bot(id='x-bot', parent=master).put() for i in range(20): test = graph_data.TestMetadata(id='XMaster/x-bot/xtest-%d' % i).put() test_container_key = utils.GetTestContainerKey(test) graph_data.Row(parent=test_container_key, id=1, value=1).put() response = self.testapp.get( '/new_points', {'pattern': '*/*/*', 'max_tests': '12'}) self.assertIn('matched 20 tests', response.body) self.assertIn('first 12 tests', response.body) # 12 points across 12 tests, plus one row for the header. self.assertEqual(13, len(re.findall(r'<tr>', response.body)))
def _PutEntitiesHalfInternal(self): """Puts entities (half internal-only) and returns the keys.""" master = graph_data.Master(id='M').put() bot = graph_data.Bot(parent=master, id='b').put() keys = [ graph_data.Test(id='ax', parent=bot, internal_only=True).put(), graph_data.Test(id='a', parent=bot, internal_only=False).put(), graph_data.Test(id='b', parent=bot, internal_only=False).put(), graph_data.Test(id='bx', parent=bot, internal_only=True).put(), graph_data.Test(id='c', parent=bot, internal_only=False).put(), graph_data.Test(id='cx', parent=bot, internal_only=True).put(), graph_data.Test(id='d', parent=bot, internal_only=False).put(), graph_data.Test(id='dx', parent=bot, internal_only=True).put(), ] return keys
def testPost_InternalOnly(self): self.SetCurrentUser('*****@*****.**') self._AddSampleData() master_key = ndb.Key('Master', 'Chromium') graph_data.Bot(id='internal_mac', parent=master_key, internal_only=True).put() t = graph_data.TestMetadata(id='Chromium/internal_mac/internal_test', internal_only=True) t.UpdateSheriff() t.put() self.testapp.post('/update_test_suites?internal_only=true') self.assertEqual( { 'dromaeo': { 'mas': { 'Chromium': { 'mac': False, 'win7': False } }, }, 'internal_test': { 'mas': { 'Chromium': { 'internal_mac': False } }, }, 'scrolling': { 'mas': { 'Chromium': { 'mac': False, 'win7': False } }, }, 'really': { 'mas': { 'Chromium': { 'mac': False, 'win7': False } }, }, }, update_test_suites.FetchCachedTestSuites())
def _PutEntitiesAllExternal(self): """Puts entities (none internal-only) and returns the keys.""" master = graph_data.Master(id='M').put() graph_data.Bot(parent=master, id='b').put() keys = [ graph_data.TestMetadata(id='M/b/a', internal_only=False), graph_data.TestMetadata(id='M/b/b', internal_only=False), graph_data.TestMetadata(id='M/b/c', internal_only=False), graph_data.TestMetadata(id='M/b/d', internal_only=False), ] for t in keys: t.UpdateSheriff() keys = [k.put() for k in keys] return keys
def AddTests(masters, bots, tests_dict): """Adds data to the mock datastore. Args: masters: List of buildbot master names. bots: List of bot names. tests_dict: Nested dictionary of tests to add; keys are test names and values are nested dictionaries of tests to add. """ for master_name in masters: master_key = graph_data.Master(id=master_name).put() for bot_name in bots: bot_key = graph_data.Bot(id=bot_name, parent=master_key).put() for test_name in tests_dict: test_key = graph_data.Test(id=test_name, parent=bot_key).put() _AddSubtest(test_key, tests_dict[test_name])
def testEdit_RemovePattern(self): """Tests removing a pattern from an AnomalyConfig.""" self.SetCurrentUser('*****@*****.**', is_admin=True) anomaly_config_key = anomaly_config.AnomalyConfig( id='Test Config', config={ 'a': 10 }, patterns=['*/*/one', '*/*/two']).put() master = graph_data.Master(id='TheMaster').put() graph_data.Bot(id='TheBot', parent=master).put() test_one = graph_data.TestMetadata( id='TheMaster/TheBot/one', overridden_anomaly_config=anomaly_config_key, has_rows=True) test_one.UpdateSheriff() test_one = test_one.put() test_two = graph_data.TestMetadata( id='TheMaster/TheBot/two', overridden_anomaly_config=anomaly_config_key, has_rows=True) test_two.UpdateSheriff() test_two = test_two.put() # Verify the state of the data before making the request. self.assertEqual(['*/*/one', '*/*/two'], anomaly_config_key.get().patterns) self.assertEqual(['TheMaster/TheBot/one'], list_tests.GetTestsMatchingPattern('*/*/one')) self.testapp.post( '/edit_anomaly_configs', { 'add-edit': 'edit', 'edit-name': 'Test Config', 'config': '{"a": 10}', 'patterns': ['*/*/two'], 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), }) self.ExecuteDeferredTasks('default') self.ExecuteTaskQueueTasks('/put_entities_task', edit_config_handler._TASK_QUEUE_NAME) self.assertEqual(['*/*/two'], anomaly_config_key.get().patterns) self.assertIsNone(test_one.get().overridden_anomaly_config) self.assertEqual('Test Config', test_two.get().overridden_anomaly_config.string_id())
def _AddMockInternalData(self): master = graph_data.Master(id='ChromiumPerf').put() bots = [] for name in ['win7', 'mac']: bot = graph_data.Bot(id=name, parent=master, internal_only=True).put() bots.append(bot) graph_data.TestMetadata( id='ChromiumPerf/%s/dromaeo' % name, internal_only=True).put() dom_test = graph_data.TestMetadata( id='ChromiumPerf/%s/dromaeo/dom' % name, has_rows=True, internal_only=True).put() test_container_key = utils.GetTestContainerKey(dom_test) for i in range(1, 50): graph_data.Row( parent=test_container_key, id=i, value=float(i * 2), error=(i + 10), internal_only=True).put()
def _AddTestColumns(self, start_rev=15000, end_rev=16500, step=3): """Adds a bunch of test data to the mock datastore. In particular, add Rows with revisions in the given range (but skipping some numbers, so the revisions are non-contiguous) under the dromaeo/dom test for winXP, win7, mac. Args: start_rev: Starting revision number. end_rev: Ending revision number. step: Difference between adjacent revisions. """ master = graph_data.Master(id='ChromiumGPU') master.put() bots = [] rows = [] for name in ['winXP', 'win7', 'mac']: bot = graph_data.Bot(id=name, parent=master.key) bot.put() bots.append(bot) test = graph_data.TestMetadata(id='ChromiumGPU/%s/dromaeo' % name) test.UpdateSheriff() test.put() for sub_name in ['dom', 'jslib']: sub_test = graph_data.TestMetadata( id='%s/%s' % (test.key.id(), sub_name), improvement_direction=anomaly.UP, has_rows=True) sub_test.UpdateSheriff() sub_test.put() test_container_key = utils.GetTestContainerKey(sub_test) for i in range(start_rev, end_rev, step): # Add Rows for one bot with revision numbers that aren't lined up # with the other bots. rev = i + 1 if name == 'mac' else i row = graph_data.Row( parent=test_container_key, id=rev, value=float(i * 2), r_webkit=int(i * 0.25), a_str='some_string', buildnumber=i - start_rev, a_tracing_uri='http://trace/%d' % i) rows.append(row) ndb.put_multi(rows)
def _CreateHistogramWithMeasurementAndAdd(self, status=200): token_id = str(uuid.uuid4()) test_path = 'Chromium/win7/suite/metric' token = upload_completion_token.Token(id=token_id).put().get() token.AddMeasurement(test_path, False).wait() token.UpdateState(upload_completion_token.State.COMPLETED) graph_data.Bot(key=ndb.Key('Master', 'Chromium', 'Bot', 'win7'), internal_only=False).put() params = [{ 'data': TEST_HISTOGRAM, 'test_path': test_path, 'benchmark_description': None, 'revision': 123, 'token': token_id }] upload_data = json.dumps(params) self.testapp.post('/add_histograms_queue', upload_data, status=status) return upload_completion_token.Token.get_by_id(token_id)
def testPostHistogram(self): graph_data.Bot(key=ndb.Key('Master', 'Chromium', 'Bot', 'win7'), internal_only=False).put() test_path = 'Chromium/win7/suite/metric' params = [{ 'data': TEST_HISTOGRAM, 'test_path': test_path, 'benchmark_description': None, 'revision': 123 }] self.testapp.post('/add_histograms_queue', json.dumps(params)) test_key = utils.TestKey(test_path) test = test_key.get() self.assertEqual(test.units, 'count_biggerIsBetter') self.assertEqual(test.improvement_direction, anomaly.UP) master = ndb.Key('Master', 'Chromium').get() self.assertIsNotNone(master) bot = ndb.Key('Master', 'Chromium', 'Bot', 'win7').get() self.assertIsNotNone(bot) tests = graph_data.TestMetadata.query().fetch() self.assertEqual(8, len(tests)) histograms = histogram.Histogram.query().fetch() self.assertEqual(1, len(histograms)) self.assertEqual(TEST_HISTOGRAM['guid'], histograms[0].key.id()) h = histograms[0] h1 = histograms[0].data del h1['guid'] h2 = copy.deepcopy(TEST_HISTOGRAM) del h2['guid'] self.assertEqual(h2, h1) self.assertEqual(test_key, h.test) self.assertEqual(123, h.revision) self.assertFalse(h.internal_only)
def testPostMultipleHistogram_MeasurementExpired(self, mock_log): test_path1 = 'Chromium/win7/suite/metric1' test_path2 = 'Chromium/win7/suite/metric2' token_id = str(uuid.uuid4()) token = upload_completion_token.Token(id=token_id).put().get() token.AddMeasurement(test_path1, False).wait() measurement2 = token.AddMeasurement(test_path2, False).get_result() token.UpdateState(upload_completion_token.State.COMPLETED) measurement2.key.delete() measurement2 = upload_completion_token.Measurement.GetByPath( test_path2, token_id) self.assertEqual(measurement2, None) graph_data.Bot(key=ndb.Key('Master', 'Chromium', 'Bot', 'win7'), internal_only=False).put() params = [ { 'data': TEST_HISTOGRAM, 'test_path': test_path1, 'benchmark_description': None, 'revision': 123, 'token': token_id }, { 'data': TEST_HISTOGRAM, 'test_path': test_path2, 'benchmark_description': None, 'revision': 5, 'token': token_id }, ] upload_data = json.dumps(params) self.testapp.post('/add_histograms_queue', upload_data) token = upload_completion_token.Token.get_by_id(token_id) self.assertEqual(token.state, upload_completion_token.State.COMPLETED) mock_log.assert_called_once_with( 'Upload completion token measurement could not be found. ' 'Token id: %s, measurement test path: %s', token_id, test_path2)
def testPostMultipleHistogram_MeasrementExpired(self): test_path1 = 'Chromium/win7/suite/metric1' test_path2 = 'Chromium/win7/suite/metric2' token_id = str(uuid.uuid4()) token = upload_completion_token.Token(id=token_id).put().get() _, measurement2 = token.PopulateMeasurements([test_path1, test_path2]) token.UpdateStateAsync(upload_completion_token.State.COMPLETED).wait() measurement2.key.delete() measurement2 = upload_completion_token.Measurement.get_by_id( test_path2, parent=token.key) self.assertEqual(measurement2, None) graph_data.Bot( key=ndb.Key('Master', 'Chromium', 'Bot', 'win7'), internal_only=False).put() params = [ { 'data': TEST_HISTOGRAM, 'test_path': test_path1, 'benchmark_description': None, 'revision': 123, 'token': token_id }, { 'data': TEST_HISTOGRAM, 'test_path': test_path2, 'benchmark_description': None, 'revision': 5, 'token': token_id }, ] upload_data = json.dumps(params) self.testapp.post('/add_histograms_queue', upload_data) token = upload_completion_token.Token.get_by_id(token_id) self.assertEqual(token.state, upload_completion_token.State.COMPLETED)
def _AddLongTestColumns(self, start_rev=15000, end_rev=16500, step=3): """Adds test data with long nested sub test to the mock datastore. Args: start_rev: Starting revision number. end_rev: Ending revision number. step: Difference between adjacent revisions. """ master = graph_data.Master(id='master') master.put() bot = graph_data.Bot(id='bot', parent=master.key) bot.put() test = graph_data.TestMetadata(id='master/bot/suite') test.UpdateSheriff() test.put() rows = [] path = 'master/bot/suite' for sub_name in ['sub1', 'sub2', 'sub3', 'sub4', 'sub5']: path = '%s/%s' % (path, sub_name) test = graph_data.TestMetadata(id=path, improvement_direction=anomaly.UP, has_rows=True) test.UpdateSheriff() test.put() test_container_key = utils.GetTestContainerKey(test.key) for i in range(start_rev, end_rev, step): row = graph_data.Row(parent=test_container_key, id=i, value=float(i * 2), r_webkit=int(i * 0.25), a_str='some_string', buildnumber=i - start_rev, a_tracing_uri='http://trace/%d' % i) rows.append(row) ndb.put_multi(rows)
def _AddPublicBotsToDataStore(self): """Adds sample bot/master pairs.""" master_key = ndb.Key('Master', 'ChromiumPerf') graph_data.Bot(id='win', parent=master_key, internal_only=False).put() graph_data.Bot(id='linux', parent=master_key, internal_only=False).put()
def testEdit_AddPattern(self): """Tests changing the patterns list of an existing AnomalyConfig.""" self.SetCurrentUser('*****@*****.**', is_admin=True) master = graph_data.Master(id='TheMaster').put() graph_data.Bot(id='TheBot', parent=master).put() suite1 = graph_data.TestMetadata(id='TheMaster/TheBot/Suite1') suite1.UpdateSheriff() suite1 = suite1.put() suite2 = graph_data.TestMetadata(id='TheMaster/TheBot/Suite2') suite2.UpdateSheriff() suite2 = suite2.put() test_aaa = graph_data.TestMetadata( id='TheMaster/TheBot/Suite1/aaa', has_rows=True) test_aaa.UpdateSheriff() test_aaa = test_aaa.put() test_bbb = graph_data.TestMetadata( id='TheMaster/TheBot/Suite1/bbb', has_rows=True) test_bbb.UpdateSheriff() test_bbb = test_bbb.put() test_ccc = graph_data.TestMetadata( id='TheMaster/TheBot/Suite1/ccc', has_rows=True) test_ccc.UpdateSheriff() test_ccc = test_ccc.put() test_ddd = graph_data.TestMetadata( id='TheMaster/TheBot/Suite2/ddd', has_rows=True) test_ddd.UpdateSheriff() test_ddd = test_ddd.put() anomaly_config.AnomalyConfig(id='1-Suite1-specific', config={'a': 10}).put() anomaly_config.AnomalyConfig(id='2-Suite1-general', config={'b': 20}).put() self.testapp.post('/edit_anomaly_configs', { 'add-edit': 'edit', 'edit-name': '1-Suite1-specific', 'config': '{"a": 10}', 'patterns': '*/*/Suite1/aaa', 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), }) self.ExecuteTaskQueueTasks( '/put_entities_task', edit_config_handler._TASK_QUEUE_NAME) self.testapp.post('/edit_anomaly_configs', { 'add-edit': 'edit', 'edit-name': '2-Suite1-general', 'config': '{"b": 20}', 'patterns': '*/*/Suite1/*', 'xsrf_token': xsrf.GenerateToken(users.get_current_user()), }) self.ExecuteDeferredTasks('default') self.ExecuteTaskQueueTasks( '/put_entities_task', edit_config_handler._TASK_QUEUE_NAME) # The lists of test patterns in the AnomalyConfig entities in the datastore # should be set based on what was added in the two requests above. self.assertEqual( ['*/*/Suite1/*'], anomaly_config.AnomalyConfig.get_by_id('2-Suite1-general').patterns) self.assertEqual( ['*/*/Suite1/aaa'], anomaly_config.AnomalyConfig.get_by_id('1-Suite1-specific').patterns) # The 1-Suite1-specific config applies instead of the other config # because its name comes first according to sort order. self.assertEqual( '1-Suite1-specific', test_aaa.get().overridden_anomaly_config.string_id()) # The 2-Suite1-specific config applies to the other tests under Suite1. self.assertEqual( '2-Suite1-general', test_bbb.get().overridden_anomaly_config.string_id()) self.assertEqual( '2-Suite1-general', test_ccc.get().overridden_anomaly_config.string_id()) # Note that Suite2/ddd has no config, and nor do the parent tests. self.assertIsNone(test_ddd.get().overridden_anomaly_config) self.assertIsNone(suite1.get().overridden_anomaly_config) self.assertIsNone(suite2.get().overridden_anomaly_config)