def testProcessTest_AnomaliesMatchRefSeries_NoAlertCreated(self): # Tests that a Anomaly entity is not created if both the test and its # corresponding ref build series have the same data. testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling_benchmark': { 'ref': {} }, }) ref = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/ref').get() non_ref = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark').get() test_container_key = utils.GetTestContainerKey(ref.key) test_container_key_non_ref = utils.GetTestContainerKey(non_ref.key) for row in _TEST_ROW_DATA: graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put() graph_data.Row(id=row[0], value=row[1], parent=test_container_key_non_ref).put() sheriff.Sheriff(email='*****@*****.**', id='sheriff', patterns=[non_ref.test_path]).put() ref.put() non_ref.put() find_anomalies.ProcessTest(non_ref.key) new_anomalies = anomaly.Anomaly.query().fetch() self.assertEqual(0, len(new_anomalies))
def testProcessTest_AnomalyDoesNotMatchRefSeries_AlertCreated(self): # Tests that an Anomaly entity is created when non-ref series goes up, but # the ref series stays flat. testing_common.AddTests(['ChromiumGPU'], ['linux-release'], { 'scrolling_benchmark': { 'ref': {} }, }) ref = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/ref').get() non_ref = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark').get() test_container_key = utils.GetTestContainerKey(ref.key) test_container_key_non_ref = utils.GetTestContainerKey(non_ref.key) for row in _TEST_ROW_DATA: graph_data.Row(id=row[0], value=2125.375, parent=test_container_key).put() graph_data.Row(id=row[0], value=row[1], parent=test_container_key_non_ref).put() sheriff.Sheriff(email='*****@*****.**', id='sheriff', patterns=[ref.test_path]).put() sheriff.Sheriff(email='*****@*****.**', id='sheriff', patterns=[non_ref.test_path]).put() ref.put() non_ref.put() find_anomalies.ProcessTest(non_ref.key) new_anomalies = anomaly.Anomaly.query().fetch() self.assertEqual(len(new_anomalies), 1)
def testProcessTest_InternalOnlyTest(self, mock_email_sheriff): self._AddDataForTests() test = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/ref').get() test.internal_only = True sheriff.Sheriff(email='*****@*****.**', id='sheriff', patterns=[test.test_path]).put() test.put() find_anomalies.ProcessTest(test.key) expected_calls = [ mock.call( ModelMatcher('sheriff'), ModelMatcher( 'ChromiumGPU/linux-release/scrolling_benchmark/ref'), EndRevisionMatcher(10011)) ] self.assertEqual(expected_calls, mock_email_sheriff.call_args_list) anomalies = anomaly.Anomaly.query().fetch() self.assertEqual(len(anomalies), 1) self.assertEqual(test.key, anomalies[0].test) self.assertEqual(100, anomalies[0].percent_changed) self.assertEqual(anomaly.UP, anomalies[0].direction) self.assertEqual(10007, anomalies[0].start_revision) self.assertEqual(10011, anomalies[0].end_revision) self.assertTrue(anomalies[0].internal_only)
def testProcessTest_FiltersOutImprovements(self, mock_email_sheriff): self._AddDataForTests() test = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/ref').get() sheriff.Sheriff( email='*****@*****.**', id='sheriff', patterns=[test.test_path]).put() test.improvement_direction = anomaly.UP test.put() find_anomalies.ProcessTest(test.key) mock_email_sheriff.assert_called_once_with( ModelMatcher('sheriff'), ModelMatcher('ref'), EndRevisionMatcher(10041))
def testProcessTest_ImprovementMarkedAsImprovement(self): self._AddDataForTests() test = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/ref').get() sheriff.Sheriff( email='*****@*****.**', id='sheriff', patterns=[test.test_path]).put() test.improvement_direction = anomaly.DOWN test.put() find_anomalies.ProcessTest(test.key) anomalies = anomaly.Anomaly.query().fetch() self.assertEqual(len(anomalies), 1) self.assertTrue(anomalies[0].is_improvement)
def post(self): """Adds a set of points from the post data. Request parameters: data: JSON encoding of a list of dictionaries. Each dictionary represents one point to add. For each dict, one Row entity will be added, and any required Test or Master or Bot entities will be created. """ datastore_hooks.SetPrivilegedRequest() data = json.loads(self.request.get('data')) _PrewarmGets(data) bot_whitelist = stored_object.Get(BOT_WHITELIST_KEY) all_put_futures = [] added_rows = [] monitored_test_keys = [] for row_dict in data: try: new_row, parent_test, put_futures = _AddRow( row_dict, bot_whitelist) added_rows.append(new_row) is_monitored = parent_test.sheriff and parent_test.has_rows if is_monitored: monitored_test_keys.append(parent_test.key) all_put_futures.extend(put_futures) except add_point.BadRequestError as e: logging.error('Could not add %s, it was invalid.', e.message) except datastore_errors.BadRequestError as e: logging.error('Datastore request failed: %s.', e.message) return ndb.Future.wait_all(all_put_futures) # Updating of the cached graph revisions should happen after put because # it requires the new row to have a timestamp, which happens upon put. graph_revisions.AddRowsToCache(added_rows) for test_key in monitored_test_keys: if not _IsRefBuild(test_key): find_anomalies.ProcessTest(test_key) else: logging.warn('Ref data marked as monitored: %s', str(test_key))
def testProcessTest_CreatesAnAnomaly(self): testing_common.AddTests( ['ChromiumGPU'], ['linux-release'], { 'scrolling_benchmark': {'ref': {}}, }) ref = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/ref').get() test_container_key = utils.GetTestContainerKey(ref.key) for row in _TEST_ROW_DATA: graph_data.Row(id=row[0], value=row[1], parent=test_container_key).put() sheriff.Sheriff( email='*****@*****.**', id='sheriff', patterns=[ref.test_path]).put() ref.put() find_anomalies.ProcessTest(ref.key) new_anomalies = anomaly.Anomaly.query().fetch() self.assertEqual(1, len(new_anomalies)) self.assertEqual(anomaly.UP, new_anomalies[0].direction) self.assertEqual(241536, new_anomalies[0].start_revision) self.assertEqual(241537, new_anomalies[0].end_revision)
def testProcessTest_LastAlertedRevisionTooHigh_PropertyReset( self, mock_logging_error): # If the last_alerted_revision property of the TestMetadata is too high, # then the property should be reset and an error should be logged. self._AddDataForTests() test = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/ref').get() test.last_alerted_revision = 1234567890 test.put() find_anomalies.ProcessTest(test.key) self.assertIsNone(test.key.get().last_alerted_revision) calls = [ mock.call( 'last_alerted_revision %d is higher than highest rev %d for test ' '%s; setting last_alerted_revision to None.', 1234567890, 10066, 'ChromiumGPU/linux-release/scrolling_benchmark/ref'), mock.call('No rows fetched for %s', 'ChromiumGPU/linux-release/scrolling_benchmark/ref') ] mock_logging_error.assert_has_calls(calls, any_order=True)
def testProcessTest_NoSheriff_ErrorLogged(self, mock_logging_error): self._AddDataForTests() ref = utils.TestKey( 'ChromiumGPU/linux-release/scrolling_benchmark/ref').get() find_anomalies.ProcessTest(ref.key) mock_logging_error.assert_called_with('No sheriff for %s', ref.key)
def testProcessTest(self, mock_email_sheriff): self._AddDataForTests() test_path = 'ChromiumGPU/linux-release/scrolling_benchmark/ref' test = utils.TestKey(test_path).get() sheriff.Sheriff(email='*****@*****.**', id='sheriff', patterns=[test_path]).put() test.put() find_anomalies.ProcessTest(test.key) expected_calls = [ mock.call( ModelMatcher('sheriff'), ModelMatcher( 'ChromiumGPU/linux-release/scrolling_benchmark/ref'), EndRevisionMatcher(10011)), mock.call( ModelMatcher('sheriff'), ModelMatcher( 'ChromiumGPU/linux-release/scrolling_benchmark/ref'), EndRevisionMatcher(10041)), mock.call( ModelMatcher('sheriff'), ModelMatcher( 'ChromiumGPU/linux-release/scrolling_benchmark/ref'), EndRevisionMatcher(10061)) ] self.assertEqual(expected_calls, mock_email_sheriff.call_args_list) anomalies = anomaly.Anomaly.query().fetch() self.assertEqual(len(anomalies), 3) def AnomalyExists(anomalies, test, percent_changed, direction, start_revision, end_revision, sheriff_name, internal_only, units, absolute_delta): for a in anomalies: if (a.test == test and a.percent_changed == percent_changed and a.direction == direction and a.start_revision == start_revision and a.end_revision == end_revision and a.sheriff.string_id() == sheriff_name and a.internal_only == internal_only and a.units == units and a.absolute_delta == absolute_delta): return True return False self.assertTrue( AnomalyExists(anomalies, test.key, percent_changed=100, direction=anomaly.UP, start_revision=10007, end_revision=10011, sheriff_name='sheriff', internal_only=False, units='ms', absolute_delta=50)) self.assertTrue( AnomalyExists(anomalies, test.key, percent_changed=-50, direction=anomaly.DOWN, start_revision=10037, end_revision=10041, sheriff_name='sheriff', internal_only=False, units='ms', absolute_delta=-100)) self.assertTrue( AnomalyExists(anomalies, test.key, percent_changed=sys.float_info.max, direction=anomaly.UP, start_revision=10057, end_revision=10061, sheriff_name='sheriff', internal_only=False, units='ms', absolute_delta=100)) # This is here just to verify that AnomalyExists returns False sometimes. self.assertFalse( AnomalyExists(anomalies, test.key, percent_changed=100, direction=anomaly.DOWN, start_revision=10037, end_revision=10041, sheriff_name='sheriff', internal_only=False, units='ms', absolute_delta=500))