def testCreateStoppageAlert_DoesNotCreateLargeGroups(self): # First, create |_MAX_GROUP_SIZE| alerts; all of them can be created # and they all belong to the same group. tests = map(str, range(stoppage_alert._MAX_GROUP_SIZE)) testing_common.AddTests(['M'], ['b'], {'suite': {t: {} for t in tests}}) test_paths = ['M/b/suite/' + t for t in tests] rows = [] alerts = [] for path in test_paths: rows = testing_common.AddRows(path, [1]) test = utils.TestKey(path).get() new_alert = stoppage_alert.CreateStoppageAlert(test, rows[0]) self.assertIsNotNone(new_alert) new_alert.put() alerts.append(new_alert) self.assertEqual(stoppage_alert._MAX_GROUP_SIZE, len(alerts)) self.assertTrue(all(a.group == alerts[0].group for a in alerts)) # Making one more stoppage alert that belongs to this group fails. testing_common.AddTests(['M'], ['b'], {'suite': {'another': {}}}) test_path = 'M/b/suite/another' rows = testing_common.AddRows(test_path, [1]) test = utils.TestKey(test_path).get() new_alert = stoppage_alert.CreateStoppageAlert(test, rows[0]) self.assertIsNone(new_alert)
def _AddMockData(self): """Adds sample TestMetadata and Row entities.""" testing_common.AddTests(*_MOCK_DATA) # Add 50 Row entities to some of the tests. for test_path in _TESTS_WITH_ROWS: testing_common.AddRows(test_path, range(15000, 15100, 2))
def testProcessTest_AnomalyDoesNotMatchRefSeries_AlertCreated(self): # Tests that an Anomaly entity is created when non-ref series goes up, but # the ref series stays flat. testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling_benchmark': { 'ref': {} }, }) ref = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/ref').get() non_ref = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark').get() test_container_key = utils.GetTestContainerKey(ref.key) test_container_key_non_ref = utils.GetTestContainerKey(non_ref.key) for row in _TEST_ROW_DATA: graph_data.Row(id=row[0], value=2125.375, parent=test_container_key).put() graph_data.Row(id=row[0], value=row[1], parent=test_container_key_non_ref).put() sheriff.Sheriff(email='*****@*****.**', id='sheriff', patterns=[ref.test_path]).put() sheriff.Sheriff(email='*****@*****.**', id='sheriff', patterns=[non_ref.test_path]).put() ref.put() non_ref.put() find_anomalies.ProcessTest(non_ref.key) new_anomalies = anomaly.Anomaly.query().fetch() self.assertEqual(len(new_anomalies), 1)
def testProcessTest_AnomaliesMatchRefSeries_NoAlertCreated(self): # Tests that a Anomaly entity is not created if both the test and its # corresponding ref build series have the same data. testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling_benchmark': { 'ref': {} }, }) ref = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/ref').get() non_ref = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark').get() test_container_key = utils.GetTestContainerKey(ref.key) test_container_key_non_ref = utils.GetTestContainerKey(non_ref.key) for row in _TEST_ROW_DATA: graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put() graph_data.Row(id=row[0], value=row[1], parent=test_container_key_non_ref).put() sheriff.Sheriff(email='*****@*****.**', id='sheriff', patterns=[non_ref.test_path]).put() ref.put() non_ref.put() find_anomalies.ProcessTest(non_ref.key) new_anomalies = anomaly.Anomaly.query().fetch() self.assertEqual(0, len(new_anomalies))
def _AddSampleData(self): """Adds a Test and Row entities, and returns the Test key.""" testing_common.AddTests(['M'], ['b'], {'suite': {'foo': {}}}) test_path = 'M/b/suite/foo' rows_dict = {x: {'value': y} for x, y in _SAMPLE_SERIES} testing_common.AddRows(test_path, rows_dict) return utils.TestKey(test_path)
def _AddMockData(self): """Adds data which will be used in the around-revision stats tests below.""" sheriff.Sheriff(id='Chromium Perf Sheriff', email='*****@*****.**', patterns=['*/*/*/*/page_load_time', '*/*/*/*/Score']).put() testing_common.AddTests(*_MOCK_DATA) test_paths = [ 'ChromiumPerf/win7/moz/times/page_load_time', 'ChromiumPerf/win7/octane/Total/Score', 'ChromiumPerf/mac/moz/times/page_load_time', 'ChromiumPerf/mac/octane/Total/Score', ] test_keys = map(utils.TestKey, test_paths) row_data = [_WIN7_MOZ, _WIN7_OCTANE, _MAC_MOZ, _MAC_OCTANE] for index, test_key in enumerate(test_keys): test = test_key.get() if test_key.string_id() == 'page_load_time': test.improvement_direction = anomaly.DOWN else: test.improvement_direction = anomaly.UP test.put() parent_key = utils.GetTestContainerKey(test_key) for r in range(15000, 15080, 2): v = row_data[index][(r - 15000) / 2] graph_data.Row(id=r, parent=parent_key, value=v).put()
def _AddTestSuites(self): """Adds sample data and sets the list of test suites.""" # Mock out some data for a test. masters = [ 'ChromiumPerf', 'ChromiumGPU', ] bots = [ 'chromium-rel-win7-gpu-ati', 'linux-release', ] tests = { 'scrolling_benchmark': { 'a_first_listed_test': {}, 'average_commit_time': { 'answers.yahoo.com': {}, 'www.cnn.com': {}, }, 'average_commit_time_ref': {}, }, 'dromaeo': {}, } testing_common.AddTests(masters, bots, tests) for m in masters: for b in bots: for t in tests: t = ndb.Key('TestMetadata', '%s/%s/%s' % (m, b, t)).get() t.description = 'This should show up' t.put() # Before the test suites data gets generated, the cached test suites # data must be updated. self.testapp.post('/update_test_suites')
def testStartNewBisectForBug_WithDefaultRevs_StartsBisect( self, mock_perform_bisect): testing_common.AddTests(['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') testing_common.AddRows( 'ChromiumPerf/linux-release/sunspider/score', { 1199: { 'a_default_rev': 'r_foo', 'r_foo': '9e29b5bcd08357155b2859f87227d50ed60cf857' }, 1250: { 'a_default_rev': 'r_foo', 'r_foo': 'fc34e5346446854637311ad7793a95d56e314042' } }) anomaly.Anomaly(bug_id=333, test=test_key, start_revision=1200, end_revision=1250, median_before_anomaly=100, median_after_anomaly=200).put() auto_bisect.StartNewBisectForBug(333) job = try_job.TryJob.query(try_job.TryJob.bug_id == 333).get() mock_perform_bisect.assert_called_once_with(job)
def testGuessMetric_SummaryMetricWithTIRLabel(self): testing_common.AddTests( ['M'], ['b'], {'benchmark': {'chart': {'tir_label': {'page': {}}}}}) self.assertEqual( 'tir_label-chart/tir_label-chart', start_try_job.GuessMetric('M/b/benchmark/chart/tir_label'))
def testPost_MigratesStoppageAlerts(self): testing_common.AddTests(['Master'], ['b'], {'suite': {'foo': {}}}) test_path = 'Master/b/suite/foo' test_key = utils.TestKey(test_path) test_container_key = utils.GetTestContainerKey(test_key) row_key = graph_data.Row(id=100, parent=test_container_key, value=5).put() stoppage_alert.CreateStoppageAlert(test_key.get(), row_key.get()).put() self.assertIsNotNone( stoppage_alert.GetStoppageAlert('Master/b/suite/foo', 100)) self.assertIsNone( stoppage_alert.GetStoppageAlert('Master/b/suite/bar', 100)) self.testapp.post( '/migrate_test_names', { 'old_pattern': 'Master/b/suite/foo', 'new_pattern': 'Master/b/suite/bar', }) self.ExecuteTaskQueueTasks('/migrate_test_names', migrate_test_names._TASK_QUEUE_NAME) self.assertIsNotNone( stoppage_alert.GetStoppageAlert('Master/b/suite/bar', 100)) self.assertIsNone( stoppage_alert.GetStoppageAlert('Master/b/suite/foo', 100))
def _AddDataToDatastore(self): """Puts a set of entities; some internal-only, some not.""" # Need to be privileged to add Test and Row objects to the datastore because # there is a get() for the parent_test in the pre_put_hook. This should work # correctly in production because Rows and Tests should only be added by # /add_point, which is privileged. self.SetCurrentUser('*****@*****.**') testing_common.AddTests( ['ChromiumPerf'], ['Win7External', 'FooInternal'], { 'TestInternal': { 'SubTestInternal': {} }, 'TestExternal': { 'SubTestExternal': {} }, }) internal_key = ['Master', 'ChromiumPerf', 'Bot', 'FooInternal'] internal_test_key = ['Test', 'TestInternal'] internal_sub_test_key = ['Test', 'SubTestInternal'] external_key = ['Master', 'ChromiumPerf', 'Bot', 'Win7External'] internal_bot = ndb.Key(*internal_key).get() internal_bot.internal_only = True internal_bot.put() internal_test = ndb.Key(*(external_key + internal_test_key)).get() internal_test.internal_only = True internal_test.put() internal_test = ndb.Key(*(internal_key + internal_test_key)).get() internal_test.internal_only = True internal_test.put() internal_sub_test = ndb.Key(*(external_key + internal_test_key + internal_sub_test_key)).get() internal_sub_test.internal_only = True internal_sub_test.put() internal_sub_test = ndb.Key(*(internal_key + internal_test_key + internal_sub_test_key)).get() internal_sub_test.internal_only = True internal_sub_test.put() internal_key = internal_sub_test.key external_key = ndb.Key( *(external_key + ['Test', 'TestExternal', 'Test', 'SubTestExternal'])) internal_test_container_key = utils.GetTestContainerKey(internal_key) external_test_container_key = utils.GetTestContainerKey(external_key) for i in range(0, 100, 10): graph_data.Row(parent=internal_test_container_key, id=i, value=float(i * 2), internal_only=True).put() graph_data.Row(parent=external_test_container_key, id=i, value=float(i * 2)).put() self.UnsetCurrentUser() sheriff.Sheriff(id='external', email='*****@*****.**', internal_only=False).put() sheriff.Sheriff(id='internal', email='*****@*****.**', internal_only=True).put()
def testPost_WithAncestor_AllRowsMoved(self): testing_common.AddTests(['M'], ['b1', 'b2'], {'foo': { 'bar': {}, 'baz': {} }}) for test_path in ('M/b1/foo/bar', 'M/b1/foo/baz', 'M/b2/foo/bar'): # range(1425001000, 1430001000, 6000) includes 834 numbers. testing_common.AddRows( test_path, {i for i in range(1425001000, 1430001000, 6000)}) self.testapp.post('/shrink_timestamp_revisions', {'ancestor': 'M/b1'}) self.ExecuteTaskQueueTasks('/shrink_timestamp_revisions', shrink_timestamp_revisions._QUEUE_NAME) b1_bar_rows = graph_data.Row.query(graph_data.Row.parent_test == utils. TestKey('M/b1/foo/bar')).fetch() b1_baz_rows = graph_data.Row.query(graph_data.Row.parent_test == utils. TestKey('M/b1/foo/baz')).fetch() b2_bar_rows = graph_data.Row.query(graph_data.Row.parent_test == utils. TestKey('M/b2/foo/bar')).fetch() self.assertGreater(len(b1_bar_rows), 600) self.assertGreater(len(b1_baz_rows), 600) self.assertEqual(834, len(b2_bar_rows)) for r in b1_bar_rows: self.assertLess(r.revision, 300000) for r in b1_baz_rows: self.assertLess(r.revision, 300000) for r in b2_bar_rows: self.assertGreater(r.revision, 300000)
def testGet_WithAncestor_AllAlertsUpdated(self): testing_common.AddTests(['M'], ['b1', 'b2'], {'foo': { 'bar': {}, 'baz': {} }}) testing_common.AddRows( 'M/b1/foo/bar', {i for i in range(1431001000, 1432001000, 6000)}) test_key = utils.TestKey('M/b1/foo/bar') # range(1431001000, 1431081000, 6000) includes 14 numbers. for i in range(1431001000, 1431081000, 6000): anomaly.Anomaly(start_revision=i, end_revision=i + 12000, test=test_key, median_before_anomaly=100, median_after_anomaly=200).put() self.testapp.post('/shrink_timestamp_revisions', {'ancestor': 'M'}) self.ExecuteTaskQueueTasks('/shrink_timestamp_revisions', shrink_timestamp_revisions._QUEUE_NAME) anomalies = anomaly.Anomaly.query().fetch() self.assertEqual(14, len(anomalies)) for a in anomalies: self.assertLess(a.start_revision, 300000) self.assertLess(a.end_revision, 300000)
def _AddSampleData(self): testing_common.AddTests( ['Chromium'], ['win7', 'mac'], { 'dromaeo': { 'dom': {}, 'jslib': {}, }, 'scrolling': { 'commit_time': { 'www.yahoo.com': {}, 'www.cnn.com': {}, }, 'commit_time_ref': {}, }, 'really': { 'nested': { 'very': { 'deeply': { 'subtest': {} } }, 'very_very': {} } }, })
def _AddMockDataForDeprecatedTests(self): """Adds some sample data, some of which only has old timestamps.""" testing_common.AddTests(['ChromiumPerf'], ['win7'], _TESTS) trace_a = utils.TestKey( 'ChromiumPerf/win7/suite/graph_a/trace_a').get() trace_b = utils.TestKey( 'ChromiumPerf/win7/suite/graph_b/trace_b').get() suite = utils.TestKey('ChromiumPerf/win7/suite').get() trace_a_test_container_key = utils.GetTestContainerKey(trace_a) trace_b_test_container_key = utils.GetTestContainerKey(trace_b) now = datetime.datetime.now() deprecated_time = datetime.datetime.now() - datetime.timedelta(days=20) for i in range(0, 5): graph_data.Row(id=i, value=(i * 100), parent=trace_a_test_container_key, timestamp=deprecated_time).put() graph_data.Row( id=i, value=i * 100, parent=trace_b_test_container_key, timestamp=(now if i == 4 else deprecated_time)).put() return trace_a, trace_b, suite
def _AddTestData(self, series, sheriff_key, improvement_direction=anomaly.UP): """Adds one sample Test and associated data. Args: series: Either a list of values, or a list of (x, y) pairs. sheriff_key: A Sheriff entity key. improvement_direction: One of {anomaly.UP, anomaly.DOWN, anomaly.UNKNOWN}. Returns: The Test entity key of the Test that was added. """ testing_common.AddTests(['M'], ['b'], {'benchmark': {'t': {}}}) test_path = 'M/b/benchmark/t' test = utils.TestKey(test_path).get() test.improvement_direction = improvement_direction test.sheriff = sheriff_key sheriff_entity = sheriff_key.get() sheriff_entity.patterns.append(test.test_path) sheriff_entity.put() if series and isinstance(series[0], (int, float)): series = enumerate(series, start=1) testing_common.AddRows(test_path, {x: {'value': y} for x, y in series}) return test.put()
def testGetAnomalyConfigDict(self): testing_common.AddTests(['M'], ['b'], {'foo': {'bar': {}}}) test = utils.TestKey('M/b/foo/bar').get() # The sample test has no overridden config. self.assertEqual({}, anomaly_config.GetAnomalyConfigDict(test)) # Override the config for the test added above. # The overridden config is set in the pre-put hook of the Test. my_config = { '_comment': 'Very particular segment sizes.', 'max_window_size': 721, 'min_segment_size': 123, } my_patterns = [test.test_path] anomaly_config.AnomalyConfig(config=my_config, patterns=my_patterns).put() test.put() # The sample test now has an overridden config which is used. # Extraneous "comment" keys are ignored. expected = { 'max_window_size': 721, 'min_segment_size': 123, } self.assertEqual(expected, anomaly_config.GetAnomalyConfigDict(test))
def _AddSampleData(self): """Adds some normal test data from two different tests.""" # Add TestMetadata entities. tests = {'foo': {'mytest': {}, 'other': {}}} testing_common.AddTests(['ChromiumPerf'], ['win7'], tests) mytest_key = utils.TestKey('ChromiumPerf/win7/foo/mytest') mytest_container = utils.GetTestContainerKey(mytest_key) other_key = utils.TestKey('ChromiumPerf/win7/foo/other') other_container = utils.GetTestContainerKey(other_key) # The times of the Row entities will have to be explicitly set, since being # ordered by time is part of what should be tested. start_date = datetime.datetime(2014, 1, 1, 0, 0) # Put some sample Rows in the datastore. for i in range(10): mytest_row = graph_data.Row(parent=mytest_container, id=(10000 + i), value=i) # Put it in twice so that the timestamp can be overwritten. mytest_row.put() mytest_row.timestamp = start_date + datetime.timedelta(hours=i) mytest_row.put() other_row = graph_data.Row(parent=other_container, id=(10000 + i), value=i) # Put it in twice so that the timestamp can be overwritten. other_row.put() other_row.timestamp = start_date + datetime.timedelta(hours=i, minutes=30) other_row.put()
def testPointInfoDict_RowHasNoTracingUri_ResultHasNoTracingUri(self): testing_common.AddTests(['Master'], ['b'], {'my_suite': {}}) rows = testing_common.AddRows('Master/b/my_suite', [345]) # This row has no a_tracing_uri property, so there should be no # trace annotation returned by _PointInfoDict. point_info = graph_json._PointInfoDict(rows[0], {}) self.assertFalse(hasattr(rows[0], 'a_tracing_uri')) self.assertNotIn('a_tracing_uri', point_info)
def testGetOldStdioUri_InternalOnly_NoURIReturned(self): testing_common.AddTests(['Master'], ['b'], {'my_suite': {}}) test = utils.TestKey('Master/b/my_suite').get() test.buildername = 'MyBuilder' row = graph_data.Row(id=345, buildnumber=456) test.masterid = 'my.master.id' test.internal_only = True self.assertIsNone(graph_json._GetOldStdioUri(row, test))
def testGetBotNamesFromAlerts_RemovesDuplicates(self): testing_common.AddTests(['SuperGPU'], ['Bot1'], {'foo': {'bar': {}}}) anomaly.Anomaly(test=utils.TestKey('SuperGPU/Bot1/foo/bar')).put() anomaly.Anomaly(test=utils.TestKey('SuperGPU/Bot1/foo/bar')).put() anomalies = anomaly.Anomaly.query().fetch() bot_names = alert.GetBotNamesFromAlerts(anomalies) self.assertEqual(2, len(anomalies)) self.assertEqual(1, len(bot_names))
def testStartNewBisectForBug_InvalidInputErrorRaised_ReturnsError(self): testing_common.AddTests(['Foo'], ['bar'], {'sunspider': {'score': {}}}) test_key = utils.TestKey('Foo/bar/sunspider/score') anomaly.Anomaly( bug_id=345, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(345) self.assertEqual({'error': 'Some reason'}, result)
def testGet_DumpJson_WithRows(self): # Insert a test with rows. testing_common.AddTests('M', 'b', {'foo': {}}) test_key = utils.TestKey('M/b/foo') test_container_key = utils.GetTestContainerKey(test_key) rows = [] # The upper limit for revision numbers in this test; this was added # so that the test doesn't depend on the value of _DEFAULT_MAX_POINTS. highest_rev = 2000 + dump_graph_json._DEFAULT_MAX_POINTS - 1 for rev in range(1000, highest_rev + 1): row = graph_data.Row(parent=test_container_key, id=rev, value=(rev * 2)) rows.append(row) ndb.put_multi(rows) # There is a maximum number of rows returned by default, and the rows # are listed with latest revisions first. response = self.testapp.get('/dump_graph_json', {'test_path': 'M/b/foo'}) protobuf_strings = json.loads(response.body) entities = map(dump_graph_json.BinaryProtobufToEntity, protobuf_strings) out_rows = _EntitiesOfKind(entities, 'Row') expected_num_rows = dump_graph_json._DEFAULT_MAX_POINTS self.assertEqual(expected_num_rows, len(out_rows)) expected_rev_range = range(highest_rev, highest_rev + 1 - expected_num_rows, -1) for expected_rev, row in zip(expected_rev_range, out_rows): self.assertEqual(expected_rev, row.revision) self.assertEqual(expected_rev * 2, row.value) # Specifying end_rev sets the final revision. response = self.testapp.get('/dump_graph_json', { 'test_path': 'M/b/foo', 'end_rev': 1199 }) protobuf_strings = json.loads(response.body) entities = map(dump_graph_json.BinaryProtobufToEntity, protobuf_strings) out_rows = _EntitiesOfKind(entities, 'Row') expected_num_rows = min(dump_graph_json._DEFAULT_MAX_POINTS, 200) self.assertEqual(expected_num_rows, len(out_rows)) self.assertEqual(1199, out_rows[0].revision) # An alternative max number of rows can be specified. response = self.testapp.get('/dump_graph_json', { 'test_path': 'M/b/foo', 'num_points': 4 }) protobuf_strings = json.loads(response.body) entities = map(dump_graph_json.BinaryProtobufToEntity, protobuf_strings) out_rows = _EntitiesOfKind(entities, 'Row') rev_nums = [row.revision for row in out_rows] expected_rev_range = range(highest_rev, highest_rev - 4, -1) self.assertEqual(expected_rev_range, rev_nums)
def testGet_DumpAnomaliesDataForSheriff(self): # Insert some test, sheriffs and alerts. testing_common.AddTests('M', 'b', {'foo': {}}) testing_common.AddTests('M', 'b', {'bar': {}}) test_key_foo = utils.TestKey('M/b/foo') test_key_bar = utils.TestKey('M/b/bar') test_con_foo_key = utils.GetTestContainerKey(test_key_foo) test_con_bar_key = utils.GetTestContainerKey(test_key_bar) chromium_sheriff = sheriff.Sheriff(id='Chromium Perf Sheriff', email='*****@*****.**').put() qa_sheriff = sheriff.Sheriff(id='QA Perf Sheriff', email='*****@*****.**').put() anomaly.Anomaly(sheriff=chromium_sheriff, test=test_key_foo).put() anomaly.Anomaly(sheriff=qa_sheriff, test=test_key_bar).put() default_max_points = dump_graph_json._DEFAULT_MAX_POINTS # Add some rows. rows = [] for rev in range(1, default_max_points * 2): row = graph_data.Row(parent=test_con_foo_key, id=rev, value=(rev * 2)) rows.append(row) row = graph_data.Row(parent=test_con_bar_key, id=rev, value=(rev * 2)) rows.append(row) ndb.put_multi(rows) # Anomaly entities, Row entities, Test, and Sheriff entities for # parameter 'sheriff' should be returned. response = self.testapp.get('/dump_graph_json', {'sheriff': 'Chromium Perf Sheriff'}) protobuf_strings = json.loads(response.body) self.assertEqual(default_max_points + 5, len(protobuf_strings)) entities = map(dump_graph_json.BinaryProtobufToEntity, protobuf_strings) rows = _EntitiesOfKind(entities, 'Row') anomalies = _EntitiesOfKind(entities, 'Anomaly') sheriffs = _EntitiesOfKind(entities, 'Sheriff') self.assertEqual(default_max_points, len(rows)) self.assertEqual(1, len(anomalies)) self.assertEqual(1, len(sheriffs)) self.assertEqual('Chromium Perf Sheriff', sheriffs[0].key.string_id())
def testStartNewBisectForBug_RevisionTooLow_ReturnsError(self): testing_common.AddTests( ['ChromiumPerf'], ['linux-release'], {'sunspider': {'score': {}}}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') anomaly.Anomaly( bug_id=222, test=test_key, start_revision=1200, end_revision=1250, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(222) self.assertEqual({'error': 'Invalid "good" revision: 1199.'}, result)
def _AddSampleData(self): """Puts a Test and Row in the datastore and returns the entities.""" testing_common.AddTests(['M'], ['b'], {'suite': {'foo': {}}}) sheriff.Sheriff(id='Foo', patterns=['*/*/*/*']).put() test_path = 'M/b/suite/foo' test_key = utils.TestKey(test_path) test = test_key.get() testing_common.AddRows(test_path, {100}) row = graph_data.Row.query().get() return test, row
def testGetBugLabelsForTest(self): bug_label_patterns.AddBugLabelPattern('foo', '*/*/foo') bug_label_patterns.AddBugLabelPattern('f-prefix', '*/*/f*') testing_common.AddTests(['M'], ['b'], {'foo': {}, 'bar': {}}) foo_test = utils.TestKey('M/b/foo').get() bar_test = utils.TestKey('M/b/bar').get() self.assertEqual( ['f-prefix', 'foo'], bug_label_patterns.GetBugLabelsForTest(foo_test)) self.assertEqual( [], bug_label_patterns.GetBugLabelsForTest(bar_test))
def testGetOldStdioUri_WithMasterId_URIReturned(self): testing_common.AddTests(['Master'], ['b'], {'my_suite': {}}) test = utils.TestKey('Master/b/my_suite').get() test.buildername = 'MyBuilder' row = graph_data.Row(id=345, buildnumber=456) test.masterid = 'my.master.id' self.assertEqual( ('http://build.chromium.org/p/my.master.id/builders/MyBuilder' '/builds/456/steps/my_suite/logs/stdio'), graph_json._GetOldStdioUri(row, test))
def testGetOrCreateAncestors_UpdatesStoppageAlert(self): testing_common.AddTests(['M'], ['b'], {'suite': {'foo': {}}}) row = testing_common.AddRows('M/b/suite/foo', {123})[0] test = utils.TestKey('M/b/suite/foo').get() alert_key = stoppage_alert.CreateStoppageAlert(test, row).put() test.stoppage_alert = alert_key test.put() add_point_queue._GetOrCreateAncestors('M', 'b', 'suite/foo') self.assertIsNone(test.key.get().stoppage_alert) self.assertTrue(alert_key.get().recovered)
def testStartNewBisectForBug_UnbisectableTest_ReturnsError(self): testing_common.AddTests(['V8'], ['x86'], {'v8': {'sunspider': {}}}) # The test suite "v8" is in the black-list of test suite names. test_key = utils.TestKey('V8/x86/v8/sunspider') anomaly.Anomaly( bug_id=444, test=test_key, start_revision=155000, end_revision=155100, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(444) self.assertEqual({'error': 'Could not select a test.'}, result)