def _ProcessTest(test_key): """Processes a test to find new anomalies. Args: test_key: The ndb.Key for a TestMetadata. """ test = yield test_key.get_async() config = anomaly_config.GetAnomalyConfigDict(test) max_num_rows = config.get('max_window_size', DEFAULT_NUM_POINTS) rows = yield GetRowsToAnalyzeAsync(test, max_num_rows) # If there were no rows fetched, then there's nothing to analyze. if not rows: # In some cases (e.g. if some points are deleted) it might be possible # that last_alerted_revision is incorrect. In this case, reset it. highest_rev = yield _HighestRevision(test_key) if test.last_alerted_revision > highest_rev: logging.error( 'last_alerted_revision %d is higher than highest rev %d ' 'for test %s; setting last_alerted_revision to None.', test.last_alerted_revision, highest_rev, test.test_path) test.last_alerted_revision = None yield test.put_async() logging.error('No rows fetched for %s', test.test_path) raise ndb.Return(None) sheriff = yield _GetSheriffForTest(test) if not sheriff: logging.error('No sheriff for %s', test_key) raise ndb.Return(None) # Get anomalies and check if they happen in ref build also. change_points = FindChangePointsForTest(rows, config) change_points = yield _FilterAnomaliesFoundInRef(change_points, test_key, len(rows)) anomalies = [_MakeAnomalyEntity(c, test, rows) for c in change_points] # If no new anomalies were found, then we're done. if not anomalies: return logging.info('Found at least one anomaly in: %s', test.test_path) # Update the last_alerted_revision property of the test. test.last_alerted_revision = anomalies[-1].end_revision yield test.put_async() yield alert_group.GroupAlertsAsync(anomalies, utils.TestSuiteName(test.key), 'Anomaly') # TODO(simonhatch): email_sheriff.EmailSheriff() isn't a tasklet yet, so this # code will run serially. # Email sheriff about any new regressions. for anomaly_entity in anomalies: if (anomaly_entity.bug_id is None and not anomaly_entity.is_improvement and not sheriff.summarize): email_sheriff.EmailSheriff(sheriff, test, anomaly_entity) yield ndb.put_multi_async(anomalies)
def _ProcessAlertsForBot(bot_name, alerts): alerts_total = _CreateHistogram('chromium.perf.alerts', 'count') alerts_total.AddSample(len(alerts)) count_by_suite = {} for a in alerts: test_suite_name = utils.TestSuiteName(a.test) if test_suite_name not in count_by_suite: count_by_suite[test_suite_name] = 0 count_by_suite[test_suite_name] += 1 hists_by_suite = {} for s, c in count_by_suite.iteritems(): hists_by_suite[s] = _CreateHistogram('chromium.perf.alerts', 'count', story=s) hists_by_suite[s].AddSample(c) hs = _CreateHistogramSet('ChromiumPerfFyi', bot_name, 'chromeperf.stats', int(time.time()), [alerts_total] + hists_by_suite.values()) deferred.defer(add_histograms.ProcessHistogramSet, hs.AsDicts())
def _ProcessAlerts(): sheriff = ndb.Key('Sheriff', 'Chromium Perf Sheriff') ts_start = datetime.datetime.now() - datetime.timedelta(days=1) q = anomaly.Anomaly.query() q = q.filter(anomaly.Anomaly.timestamp > ts_start) q = q.filter(anomaly.Anomaly.sheriff == sheriff) q = q.order(-anomaly.Anomaly.timestamp) alerts = yield q.fetch_async() if not alerts: raise ndb.Return() alerts_total = _CreateHistogram('chromium.perf.alerts', 'count') alerts_total.AddSample(len(alerts)) count_by_suite = {} for a in alerts: test_suite_name = utils.TestSuiteName(a.test) if test_suite_name not in count_by_suite: count_by_suite[test_suite_name] = 0 count_by_suite[test_suite_name] += 1 hists_by_suite = {} for s, c in count_by_suite.iteritems(): hists_by_suite[s] = _CreateHistogram('chromium.perf.alerts', 'count', story=s) hists_by_suite[s].AddSample(c) hs = _CreateHistogramSet('ChromiumPerfFyi', 'test1', 'chromeperf.stats', int(time.time()), [alerts_total] + hists_by_suite.values()) deferred.defer(add_histograms.ProcessHistogramSet, hs.AsDicts())
def testTestSuiteName_Basic(self): key = utils.TestKey('Master/bot/suite-foo/sub/x/y/z') self.assertEqual('suite-foo', utils.TestSuiteName(key))