def _AddAlertsForFreakinHugeRegressions(self): sheriff_key = sheriff.Sheriff( id='Chromium Perf Sheriff', email='*****@*****.**', summarize=True, internal_only=False).put() testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling-benchmark': { 'first_paint': {}, 'mean_frame_time': {}, } }) anomaly_time = datetime.datetime.now() - datetime.timedelta(hours=18) self._AddAnomalies(10000, 10020, 0, 100, sheriff_key, anomaly_time)
def testAnomalyInfoDicts_MissingTest_AnomalySkipped(self): testing_common.AddTests(['M'], ['b'], {'t': {'foo': {}}}) foo_key = utils.TestKey('M/b/t/foo') foo_anomaly = anomaly.Anomaly( start_revision=14999, end_revision=15000, test=foo_key, bug_id=12345, median_before_anomaly=100, median_after_anomaly=200) foo_anomaly.put() self.assertEqual([], main._AnomalyInfoDicts([foo_anomaly], {}))
def testPost_StoppageAlertWithBogusRow_LogsErrorAndShowsTable( self, mock_logging_error): sheriff.Sheriff(id='Sheriff', patterns=['M/b/*/*']).put() testing_common.AddTests(['M'], ['b'], {'foo': {'bar': {}}}) test_key = utils.TestKey('M/b/foo/bar') row_parent = utils.GetTestContainerKey(test_key) row = graph_data.Row(parent=row_parent, id=1234) stoppage_alert.CreateStoppageAlert(test_key.get(), row).put() response = self.testapp.post('/alerts?sheriff=Sheriff') stoppage_alert_list = self.GetJsonValue(response, 'stoppage_alert_list') self.assertEqual(1, len(stoppage_alert_list)) self.assertEqual(1, mock_logging_error.call_count)
def testStartNewBisectForBug_UnbisectableTest_ReturnsError(self): testing_common.AddTests(['V8'], ['x86'], {'v8': {'sunspider': {}}}) # The test suite "v8" is in the black-list of test suite names. test_key = utils.TestKey('V8/x86/v8/sunspider') anomaly.Anomaly(bug_id=444, test=test_key, start_revision=155000, end_revision=155100, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(444) self.assertEqual({'error': 'Could not select a test.'}, result)
def _AddTests(self): """Adds sample Tests and returns a list of their keys.""" testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling-benchmark': { 'first_paint': {}, 'mean_frame_time': {}, } }) return map(utils.TestKey, [ 'ChromiumGPU/linux-release/scrolling-benchmark/first_paint', 'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time', ])
def _AddAnomalies(self): """Adds a set of sample data used in the tests below.""" testing_common.AddTests(['ChromiumGPU'], ['linux-release'], {'scrolling_benchmark': { 'first_paint': {} }}) first_paint_key = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/first_paint') first_paint_test = first_paint_key.get() first_paint_test.improvement_direction = anomaly.DOWN first_paint_test.put() group_keys = [ alert_group.AlertGroup(start_revision=3000, end_revision=4000, alert_kind='Anomaly', test_suites=['scrolling_benchmark']).put(), alert_group.AlertGroup(start_revision=6000, end_revision=8000, alert_kind='Anomaly', test_suites=['scrolling_benchmark']).put(), ] anomaly_keys = [ anomaly.Anomaly(start_revision=2000, end_revision=4000, bug_id=12345, test=first_paint_key).put(), anomaly.Anomaly(start_revision=3000, end_revision=5000, bug_id=12345, test=first_paint_key).put(), anomaly.Anomaly(start_revision=6000, end_revision=8000, bug_id=None, test=first_paint_key).put(), ] anomalies = ndb.get_multi(anomaly_keys) # Add these anomalies to groups and put them again. anomalies[0].group = group_keys[0] anomalies[0].put() anomalies[1].group = group_keys[0] anomalies[1].put() anomalies[2].group = group_keys[1] anomalies[2].put() # Note that after these anomalies are added, the state of the two groups # is updated. Also, the first two anomalies are in the same group. self.assertEqual(anomalies[0].group, anomalies[1].group) self.assertNotEqual(anomalies[0].group, anomalies[2].group) return anomalies
def testPost_DeprecateOldTestDeletesData(self, mock_delete): testing_common.AddTests(*_TESTS_MULTIPLE) self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t', _REMOVAL_DAYS) self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t_ref', 0) self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo1', 0) self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo2', 0) self.testapp.post('/deprecate_tests') self.ExecuteTaskQueueTasks( '/deprecate_tests', deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME) test = utils.TestKey('ChromiumPerf/mac/SunSpider/Total/t').get() mock_delete.assert_called_once_with(test)
def testGetGraphJson_ManyUnselected_ReturnsNothing(self): testing_common.AddTests( ['M'], ['b'], {'suite': {str(i): {} for i in range(100)}}) test_paths = ['M/b/suite/%s' % i for i in range(100)] for p in test_paths: testing_common.AddRows(p, [1]) path_list = list_tests.GetTestsForTestPathDict( {p: [] for p in test_paths}, False)['tests'] response = graph_json.GetGraphJson(path_list, is_selected=False) self.assertEqual( {'data': {}, 'annotations': {}, 'error_bars': {}}, json.loads(response))
def _AddMockData(self): testing_common.AddTests( ['ChromiumPerf'], ['windows'], { 'sunspider': { 'Total': {}, 'ref': {}, 'BenchmarkDuration': {}, }, 'page_cycler': { 'warm': { 'cnn.com': {}, 'yahoo.com': {}, } } }) testing_common.AddRows( 'ChromiumPerf/windows/sunspider/BenchmarkDuration', { 12345: { 'timestamp': datetime.datetime.now(), 'value': 5 }, 12344: { 'timestamp': datetime.datetime.now() - datetime.timedelta(days=10), 'value': 7 }, }) anomaly.Anomaly( bug_id=None, test=utils.TestKey('ChromiumPerf/windows/sunspider/Total'), is_improvement=False, median_before_anomaly=5, median_after_anomaly=7).put() anomaly.Anomaly( bug_id=12345, test=utils.TestKey('ChromiumPerf/windows/sunspider/Total'), is_improvement=False, median_before_anomaly=7, median_after_anomaly=9).put() anomaly.Anomaly( bug_id=99999, test=utils.TestKey('ChromiumPerf/windows/sunspider/Total'), is_improvement=False, median_before_anomaly=5, median_after_anomaly=7).put() anomaly.Anomaly( bug_id=-1, test=utils.TestKey('ChromiumPerf/windows/sunspider/Total'), is_improvement=False, median_before_anomaly=5, median_after_anomaly=7).put()
def _AddMockData(self): mock_tests = [['ChromiumPerf'], ['windows'], { 'sunspider': { 'Total': {}, 'ref': {}, }, 'page_cycler': { 'warm': { 'cnn.com': {}, 'yahoo.com': {}, } } }] testing_common.AddTests(*mock_tests) test_suites = { 'sunspider': { 'mon': ['Total'], 'mas': {'ChromiumPerf': {'windows': False}}, }, 'page_cycler': { 'mon': ['warm/cnn.com'], 'mas': {'ChromiumPerf': {'windows': False}}, }, } stored_object.Set('internal_only__list_tests_get_test_suites', test_suites) testing_common.AddRows('ChromiumPerf/windows/sunspider/Total', { 12345: {'timestamp': datetime.datetime.now(), 'value': 5}, 12344: { 'timestamp': datetime.datetime.now() - datetime.timedelta(days=10), 'value': 7 }, }) anomaly.Anomaly(bug_id=None, test=utils.TestKey('ChromiumPerf/windows/sunspider/Total'), is_improvement=False, median_before_anomaly=5, median_after_anomaly=7).put() anomaly.Anomaly(bug_id=12345, test=utils.TestKey('ChromiumPerf/windows/sunspider/Total'), is_improvement=False, median_before_anomaly=7, median_after_anomaly=9).put() anomaly.Anomaly(bug_id=99999, test=utils.TestKey('ChromiumPerf/windows/sunspider/Total'), is_improvement=False, median_before_anomaly=5, median_after_anomaly=7).put() anomaly.Anomaly(bug_id=-1, test=utils.TestKey('ChromiumPerf/windows/sunspider/Total'), is_improvement=False, median_before_anomaly=5, median_after_anomaly=7).put()
def testStartNewBisectForBug_StartsBisect(self, mock_perform_bisect): testing_common.AddTests( ['ChromiumPerf'], ['linux-release'], {'sunspider': {'score': { 'page_1': {}, 'page_2': {}}}}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') anomaly.Anomaly( bug_id=111, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() auto_bisect.StartNewBisectForBug(111) job = try_job.TryJob.query(try_job.TryJob.bug_id == 111).get() self.assertNotIn('--story-filter', job.config) mock_perform_bisect.assert_called_once_with(job)
def _AddAlertsWithDifferentMasterAndBenchmark(self): """Adds 10 alerts with different benchmark/master.""" master = 'FakeMaster' testing_common.AddTests([master], ['win'], { 'my_fake_suite': { 'my_fake_test': {}, }, }) keys = [ utils.TestKey(master + '/win/my_fake_suite/my_fake_test'), ] self._AddRows(keys) self._AddAlertsToDataStore(keys)
def testStartNewBisectForBug_UnbisectableTest_ReturnsError(self): testing_common.AddTests(['Sizes'], ['x86'], {'sizes': {'abcd': {}}}) # The test suite "sizes" is in the black-list of test suite names. test_key = utils.TestKey('Sizes/x86/sizes/abcd') anomaly.Anomaly(bug_id=444, project_id='test_project', test=test_key, start_revision=155000, end_revision=155100, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(444, 'test_project') self.assertEqual({'error': 'Could not select a test.'}, result)
def _AddOldAlertsForSheriffWithSummary(self): """Adds alerts for two separate sheriffs which both have summarize=True.""" sheriff_key = sheriff.Sheriff( id='Chromium Perf Sheriff', email='*****@*****.**', labels=['Performance-Sheriff'], summarize=True, internal_only=False).put() testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling-benchmark': { 'first_paint': {}, 'mean_frame_time': {}, } }) anomaly_time = datetime.datetime.now() - datetime.timedelta(hours=36) self._AddAnomalies(10000, 10020, 100, 200, sheriff_key, anomaly_time)
def testPointInfoDict_StdioUriMarkdown(self): testing_common.AddTests(['Master'], ['b'], {'my_suite': {}}) test = utils.TestKey('Master/b/my_suite').get() test.buildername = 'MyBuilder' test_container_key = utils.GetTestContainerKey(test) row = graph_data.Row(id=345, buildnumber=456, parent=test_container_key) row.a_stdio_uri = ( '[Build stdio](http://build.chromium.org/p/my.master.id/' 'builders/MyBuilder/builds/456/steps/my_suite/logs/' 'stdio)') point_info = graph_json._PointInfoDict(row, {}) self.assertEqual(row.a_stdio_uri, point_info['a_stdio_uri'])
def testStartNewBisectForBug_RevisionTooLow_ReturnsError(self): testing_common.AddTests(['ChromiumPerf'], ['linux-release'], {'sunspider': { 'score': {} }}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') anomaly.Anomaly(bug_id=222, test=test_key, start_revision=1200, end_revision=1250, median_before_anomaly=100, median_after_anomaly=200).put() result = auto_bisect.StartNewBisectForBug(222) self.assertEqual({'error': 'Invalid "good" revision: 1199.'}, result)
def _AddTestToStubDataStore(self): """Adds a test which will be used in the methods below.""" bug_label_patterns.AddBugLabelPattern('label1', '*/*/dromaeo/dom') bug_label_patterns.AddBugLabelPattern('label2', '*/*/other/test') testing_common.AddTests( ['ChromiumPerf'], ['Win7'], {'dromaeo': {'dom': {}}}) test = utils.TestKey('ChromiumPerf/Win7/dromaeo/dom').get() test.improvement_direction = anomaly.DOWN sheriff.Sheriff( id='Chromium Perf Sheriff', url=_SHERIFF_URL, email=_SHERIFF_EMAIL, labels=['Performance-Sheriff']).put() return test
def testProcessTest_RefineAnomalyPlacement_MinSize0Max2Elements(self): testing_common.AddTests(['ChromiumPerf'], ['linux-perf'], {'sizes': { 'method_count': {} }}) test = utils.TestKey( ('ChromiumPerf/linux-perf/sizes/method_count')).get() test_container_key = utils.GetTestContainerKey(test.key) custom_config = { 'max_window_size': 10, 'min_absolute_change': 50, 'min_relative_change': 0, 'min_segment_size': 0, } anomaly_config.AnomalyConfig(config=custom_config, patterns=[test.test_path]).put() test.UpdateSheriff() test.put() self.assertEqual(custom_config, anomaly_config.GetAnomalyConfigDict(test)) sample_data = [ (6990, 100), (6991, 100), (6992, 100), (6993, 100), (6994, 100), (6995, 100), (6996, 100), (6997, 100), (6998, 100), (6999, 100), (7000, 100), (7001, 155), (7002, 155), (7003, 155), ] for row in sample_data: graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put() test.UpdateSheriff() test.put() with mock.patch.object(SheriffConfigClient, 'Match', mock.MagicMock(return_value=([], None))) as m: find_anomalies.ProcessTests([test.key]) self.assertEqual(m.call_args_list, [mock.call(test.test_path)]) new_anomalies = anomaly.Anomaly.query().fetch() self.assertEqual(1, len(new_anomalies)) self.assertEqual(anomaly.UP, new_anomalies[0].direction) self.assertEqual(7001, new_anomalies[0].start_revision) self.assertEqual(7001, new_anomalies[0].end_revision)
def testMakeAnomalyEntity_AddsOwnership(self): data_samples = [{ 'type': 'GenericSet', 'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826', 'values': ['*****@*****.**', '*****@*****.**'] }, { 'type': 'GenericSet', 'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827', 'values': ['abc'] }] test_key = utils.TestKey('ChromiumPerf/linux/page_cycler_v2/cnn') testing_common.AddTests( ['ChromiumPerf'], ['linux'], { 'page_cycler_v2': { 'cnn': {}, 'cnn_ref': {}, 'yahoo': {}, 'nytimes': {}, }, }) test = test_key.get() testing_common.AddRows(test.test_path, [100, 200, 300, 400]) suite_key = utils.TestKey('ChromiumPerf/linux/page_cycler_v2') entity = histogram.SparseDiagnostic(data=data_samples[0], test=suite_key, start_revision=1, end_revision=sys.maxsize, id=data_samples[0]['guid'], name=reserved_infos.OWNERS.name) entity.put() entity = histogram.SparseDiagnostic( data=data_samples[1], test=suite_key, start_revision=1, end_revision=sys.maxsize, id=data_samples[1]['guid'], name=reserved_infos.BUG_COMPONENTS.name) entity.put() alert = find_anomalies._MakeAnomalyEntity( _MakeSampleChangePoint(10011, 50, 100), test, 'avg', self._DataSeries()).get_result() self.assertEqual(alert.ownership['component'], 'abc') self.assertListEqual(alert.ownership['emails'], ['*****@*****.**', '*****@*****.**'])
def testPost_DeprecateOldTest(self, mock_delete): testing_common.AddTests(*_TESTS_MULTIPLE) self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t', _DEPRECATE_DAYS) self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t_ref', 0) self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo1', 0) self._AddMockRows('ChromiumPerf/mac/OtherTest/OtherMetric/foo2', 0) self.testapp.post('/deprecate_tests') self.ExecuteTaskQueueTasks( '/deprecate_tests', deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME) self.AssertDeprecated('ChromiumPerf/mac/SunSpider', False) self.AssertDeprecated('ChromiumPerf/mac/SunSpider/Total/t', True) self.AssertDeprecated('ChromiumPerf/mac/SunSpider/Total/t_ref', False) self.assertFalse(mock_delete.called)
def testMakeAnomalyEntity_NoRefBuild(self): testing_common.AddTests(['ChromiumPerf'], ['linux'], { 'page_cycler_v2': { 'cnn': {}, 'yahoo': {}, 'nytimes': {}, }, }) test = utils.TestKey('ChromiumPerf/linux/page_cycler_v2').get() testing_common.AddRows(test.test_path, [100, 200, 300, 400]) alert = find_anomalies._MakeAnomalyEntity( _MakeSampleChangePoint(10011, 50, 100), test, 'avg', self._DataSeries()).get_result() self.assertIsNone(alert.ref_test)
def testPost_FailedJobRunTwice_JobRestarted(self, mock_perform_bisect): testing_common.AddTests( ['ChromiumPerf'], ['linux-release'], {'sunspider': {'score': {}}}) test_key = utils.TestKey('ChromiumPerf/linux-release/sunspider/score') anomaly.Anomaly( bug_id=111, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() try_job.TryJob( bug_id=111, status='failed', last_ran_timestamp=datetime.datetime.now() - datetime.timedelta(days=8), run_count=2).put() self.testapp.post('/auto_bisect') mock_perform_bisect.assert_called_once_with( try_job.TryJob.query(try_job.TryJob.bug_id == 111).get())
def testPost_FailedJobRunTwice_InvalidConfig_ResourceSizes( self, mock_perform_bisect): testing_common.AddTests( ['ChromiumPerf'], ['linux-release'], {'resource_sizes': {}}) test_key = utils.TestKey('ChromiumPerf/linux-release/resource_sizes') anomaly.Anomaly( bug_id=111, test=test_key, start_revision=300100, end_revision=300200, median_before_anomaly=100, median_after_anomaly=200).put() try_job.TryJob( bug_id=111, status='failed', last_ran_timestamp=datetime.datetime.now() - datetime.timedelta(days=8), run_count=2).put() self.testapp.post('/auto_bisect') self.assertFalse(mock_perform_bisect.called)
def _AddFourNewAlertsWithSummaryForOnlyTwo(self, internal_only=False): sheriff_key = sheriff.Sheriff( id='Chromium Perf Sheriff', email='*****@*****.**', summarize=True, labels=['Performance-Sheriff'], internal_only=internal_only).put() testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling-benchmark': { 'first_paint': {}, 'mean_frame_time': {}, } }) anomaly_time = datetime.datetime.now() - datetime.timedelta(hours=18) self._AddAnomalies(10000, 10020, 100, 200, sheriff_key, anomaly_time) anomaly_time = datetime.datetime.now() - datetime.timedelta(hours=36) self._AddAnomalies(10120, 10140, 100, 150, sheriff_key, anomaly_time)
def testPartialTestSuites(self): testing_common.AddTests( ['master'], ['bot'], { 'TEST_PARTIAL_TEST_SUITE': { 'COMPOSITE': { 'measurement': {}, }, }, }) self.testapp.post('/update_test_suites') self.assertEqual( ['TEST_PARTIAL_TEST_SUITE:COMPOSITE'], update_test_suites.FetchCachedTestSuites2())
def _AddSampleData(self): """Puts a TestMetadata and Row in the datastore and returns the entities.""" testing_common.AddTests(['M'], ['b'], {'suite': { 'foo': {}, 'bar': {}, 'baz': {} }}) sheriff.Sheriff(email='*****@*****.**', id='Foo', patterns=['*/*/*/*'], stoppage_alert_delay=3).put() for name in ('foo', 'bar', 'baz'): test_path = 'M/b/suite/%s' % name testing_common.AddRows(test_path, {100})
def testCreateStoppageAlert_DisplayRevIfClank(self): testing_common.AddTests(['ClankInternal'], ['b'], {'suite': { 'foo': {} }}) sheriff.Sheriff(id='Foo', patterns=['*/*/*/*']).put() test_path = 'ClankInternal/b/suite/foo' test_key = utils.TestKey(test_path) test = test_key.get() testing_common.AddRows(test_path, {100}) row = graph_data.Row.query().get() row.r_commit_pos = 102 alert = stoppage_alert.CreateStoppageAlert(test, row) self.assertEqual(alert.display_start, 102) self.assertEqual(alert.display_end, 102)
def _AddNewAlertsForSheriffsWithNoSummary(self): """Adds a sheriff with summarize set to False, and some alerts.""" sheriff_key = sheriff.Sheriff( id='Chromium Perf Sheriff', email='*****@*****.**', summarize=False, labels=['Performance-Sheriff']).put() testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling-benchmark': { 'first_paint': {}, 'mean_frame_time': {}, } }) anomaly_time = datetime.datetime.now() - datetime.timedelta(hours=18) self._AddAnomalies(10000, 10020, 100, 200, sheriff_key, anomaly_time) anomaly_time = datetime.datetime.now() - datetime.timedelta(hours=36) self._AddAnomalies(10120, 10140, 100, 150, sheriff_key, anomaly_time)
def _AddDataForTests(self): testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling_benchmark': { 'ref': {}, }, }) ref = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/ref').get() for i in range(9000, 10070, 5): # Internal-only data should be found. test_container_key = utils.GetTestContainerKey(ref.key) graph_data.Row(id=i + 1, value=float(i * 3), parent=test_container_key, internal_only=True).put()
def testPinpointParams_Metric_TopLevelOnly(self): testing_common.AddTests( ['ChromiumPerf'], ['mac'], {'blink_perf': {'foo': {}}}) params = { 'test_path': 'ChromiumPerf/mac/blink_perf/foo', 'start_commit': 'abcd1234', 'end_commit': 'efgh5678', 'bug_id': 1, 'bisect_mode': 'performance', 'story_filter': '', 'pin': '', } results = pinpoint_request.PinpointParamsFromBisectParams(params) self.assertEqual('foo', results['chart'])