def testPost_DeprecateOldTest_ExistingStoppageAlert_NoAlert(
            self, mock_delete):
        sheriff.Sheriff(id='ref_sheriff',
                        email='*****@*****.**',
                        patterns=['*/*/*/*', '*/*/*/*/*'],
                        stoppage_alert_delay=1).put()

        testing_common.AddTests(*_TESTS_SIMPLE)

        self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t',
                          _DEPRECATE_DAYS)
        self._AddMockRows('ChromiumPerf/mac/SunSpider/Total/t_ref', 0)

        test_path = 'ChromiumPerf/mac/SunSpider/Total/t'
        test_key = utils.TestKey(test_path)
        test_parent = utils.OldStyleTestKey(test_key)
        query = graph_data.Row.query(graph_data.Row.parent_test == test_parent)
        query = query.order(-graph_data.Row.timestamp)
        last_row = query.get()

        stoppage_alert.CreateStoppageAlert(test_key.get(), last_row).put()

        self.testapp.post('/deprecate_tests')
        self.ExecuteTaskQueueTasks(
            '/deprecate_tests',
            deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME)

        self.AssertDeprecated('ChromiumPerf/mac/SunSpider/Total/t', True)
        self.AssertDeprecated('ChromiumPerf/mac/SunSpider/Total/t_ref', False)

        self.assertFalse(mock_delete.called)

        alerts = stoppage_alert.StoppageAlert.query().fetch()
        self.assertEqual(1, len(alerts))
 def testGet_ThreeAlertsOneSheriff_EmailSent(self):
     self._AddSampleData()
     for name in ('foo', 'bar', 'baz'):
         test = utils.TestKey('M/b/suite/%s' % name).get()
         row = graph_data.Row.query(graph_data.Row.parent_test ==
                                    utils.OldStyleTestKey(test.key)).get()
         stoppage_alert.CreateStoppageAlert(test, row).put()
     self.testapp.get('/send_stoppage_alert_emails')
     messages = self.mail_stub.get_sent_messages()
     self.assertEqual(1, len(messages))
     self.assertEqual('*****@*****.**', messages[0].sender)
     self.assertEqual('*****@*****.**', messages[0].to)
     self.assertIn('3', messages[0].subject)
     body = str(messages[0].body)
     self.assertIn('foo', body)
     self.assertIn('bar', body)
     self.assertIn('baz', body)
     self.assertIn(
         'http://build.chromium.org/p/chromium.perf/builders/Mac/builds/187',
         body)
     self.assertIn(
         'http://build.chromium.org/p/chromium.perf/builders/Win/builds/184',
         body)
     self.assertIn(
         'https://luci-logdog.appspot.com/v/?s=chrome%2Fbb%2Fchromium.perf%2F'
         'Win%2F184%2F%2B%2Frecipes%2Fsteps%2Fmedia.mse_cases%2F0%2Fstdout',
         body)
     stoppage_alerts = stoppage_alert.StoppageAlert.query().fetch()
     for alert in stoppage_alerts:
         self.assertTrue(alert.mail_sent)
示例#3
0
 def _AssertNotExists(self, test_paths):
   for test_path in test_paths:
     test_key = utils.TestKey(test_path)
     num_rows = graph_data.Row.query(
         graph_data.Row.parent_test == utils.OldStyleTestKey(test_key)).count()
     self.assertEqual(0, num_rows)
     self.assertIsNone(test_key.get())
示例#4
0
def _FetchRowsByStat(test_key, stat, last_alert_future, max_num_rows):
    # If stats are specified, we only want to alert on those, otherwise alert on
    # everything.
    if stat == 'avg':
        query = graph_data.Row.query(
            projection=['revision', 'timestamp', 'value'])
    else:
        query = graph_data.Row.query()

    query = query.filter(
        graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))

    # The query is ordered in descending order by revision because we want
    # to get the newest points.
    if last_alert_future:
        last_alert = yield last_alert_future
        if last_alert:
            query = query.filter(
                graph_data.Row.revision > last_alert.end_revision)
    query = query.order(-graph_data.Row.revision)

    # However, we want to analyze them in ascending order.
    rows = yield query.fetch_async(limit=max_num_rows)

    vals = []
    for r in list(reversed(rows)):
        if stat == 'avg':
            vals.append((r.revision, r, r.value))
        elif stat == 'std':
            vals.append((r.revision, r, r.error))
        else:
            vals.append((r.revision, r, getattr(r, 'd_%s' % stat)))

    raise ndb.Return(vals)
示例#5
0
def _UpdateNewestRevInMilestoneDict(bots, tests, milestone_dict):
    """Updates the most recent rev in the milestone dict.

  The global milestone dicts are declared with 'None' for the end of the
  current milestone range. If we might be using the last milestone, update
  the end of the current milestone range to be the most recent revision.
  """
    if bots and tests:
        test_path = bots[0] + '/' + tests[0]
        test_key = utils.TestKey(test_path)
        # Need to allow set this request as privileged in order to bypass datastore
        # hooks. This is okay here because table_config is internal_only protected
        # and will ensure that only the correct users can see internal_only data.
        datastore_hooks.SetSinglePrivilegedRequest()
        query = graph_data.Row.query()
        query = query.filter(
            graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
        query = query.order(-graph_data.Row.revision)
        row = query.get()
        if row:
            milestone_dict[CURRENT_MILESTONE] = (
                milestone_dict[CURRENT_MILESTONE][0], row.revision)
        else:
            milestone_dict[CURRENT_MILESTONE] = (
                milestone_dict[CURRENT_MILESTONE][0],
                milestone_dict[CURRENT_MILESTONE][0])
示例#6
0
def _UpdateCache(test_key):
    """Queries Rows for a test then updates the cache.

  Args:
    test_key: ndb.Key for a TestMetadata entity.

  Returns:
    The list of triplets that was just fetched and set in the cache.
  """
    test = test_key.get()
    if not test:
        return []
    assert utils.IsInternalUser() or not test.internal_only
    datastore_hooks.SetSinglePrivilegedRequest()

    # A projection query queries just for the values of particular properties;
    # this is faster than querying for whole entities.
    query = graph_data.Row.query(projection=['revision', 'value', 'timestamp'])
    query = query.filter(
        graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))

    # Using a large batch_size speeds up queries with > 1000 Rows.
    rows = map(_MakeTriplet, query.iter(batch_size=1000))
    # Note: Unit tests do not call datastore_hooks with the above query, but
    # it is called in production and with more recent SDK.
    datastore_hooks.CancelSinglePrivilegedRequest()
    SetCache(utils.TestPath(test_key), rows)
    return rows
示例#7
0
    def _CreateTestKeys(self):
        desc = self._descriptor.Clone()

        self._statistic_columns = [
            col for col in self._columns if col in descriptor.STATISTICS
        ]
        if desc.statistic and desc.statistic not in self._statistic_columns:
            self._statistic_columns.append(desc.statistic)

        desc.statistic = None
        unsuffixed_test_paths = desc.ToTestPathsSync()
        self._unsuffixed_test_metadata_keys = [
            utils.TestMetadataKey(path) for path in unsuffixed_test_paths
        ]

        test_paths = []
        for statistic in self._statistic_columns:
            desc.statistic = statistic
            test_paths.extend(desc.ToTestPathsSync())

        test_metadata_keys = [
            utils.TestMetadataKey(path) for path in test_paths
        ]
        test_metadata_keys.extend(self._unsuffixed_test_metadata_keys)
        test_paths.extend(unsuffixed_test_paths)

        test_old_keys = [utils.OldStyleTestKey(path) for path in test_paths]
        self._test_keys = test_old_keys + test_metadata_keys
示例#8
0
def GetLatestRowsForTest(test_key, num_points, keys_only=False):
    """Gets the latest num_points Row entities for a test."""
    test_key = utils.OldStyleTestKey(test_key)
    query = Row.query(Row.parent_test == test_key)
    query = query.order(-Row.revision)

    return query.fetch(limit=num_points, batch_size=100, keys_only=keys_only)
    def testProcessTest_UsesLastAlert_Avg(self, mock_process_stat):
        mock_process_stat.side_effect = _MockTasklet

        self._AddDataForTests()
        test_path = 'ChromiumGPU/linux-release/scrolling_benchmark/ref'
        test = utils.TestKey(test_path).get()

        a = anomaly.Anomaly(test=test.key,
                            start_revision=10061,
                            end_revision=10062,
                            statistic='avg')
        a.put()

        test.UpdateSheriff()
        test.put()

        with mock.patch.object(SheriffConfigClient, 'Match',
                               mock.MagicMock(return_value=([], None))):
            find_anomalies.ProcessTests([test.key])
        self.ExecuteDeferredTasks('default')

        query = graph_data.Row.query(
            projection=['revision', 'timestamp', 'value'])
        query = query.filter(graph_data.Row.revision > 10062)
        query = query.filter(
            graph_data.Row.parent_test == utils.OldStyleTestKey(test.key))
        row_data = query.fetch()
        rows = [(r.revision, r, r.value) for r in row_data]
        mock_process_stat.assert_called_with(mock.ANY, mock.ANY, mock.ANY,
                                             rows, None)

        anomalies = anomaly.Anomaly.query().fetch()
        self.assertEqual(len(anomalies), 1)
示例#10
0
def GetRowsForTestBeforeAfterRev(test_key,
                                 rev,
                                 num_rows_before,
                                 num_rows_after,
                                 privileged=False):
    """Gets up to |num_points| Row entities for a test centered on a revision."""
    test_key = utils.OldStyleTestKey(test_key)

    if privileged:
        datastore_hooks.SetSinglePrivilegedRequest()
    query_up_to_rev = Row.query(Row.parent_test == test_key,
                                Row.revision <= rev)
    query_up_to_rev = query_up_to_rev.order(-Row.revision)
    rows_up_to_rev = list(
        reversed(query_up_to_rev.fetch(limit=num_rows_before, batch_size=100)))

    if privileged:
        datastore_hooks.SetSinglePrivilegedRequest()
    query_after_rev = Row.query(Row.parent_test == test_key,
                                Row.revision > rev)
    query_after_rev = query_after_rev.order(Row.revision)
    rows_after_rev = query_after_rev.fetch(limit=num_rows_after,
                                           batch_size=100)

    return rows_up_to_rev + rows_after_rev
示例#11
0
 def GetAlertsForTestAsync(cls, test_key, limit=None):
     result = yield cls.query(
         cls.test.IN([
             utils.TestMetadataKey(test_key),
             utils.OldStyleTestKey(test_key)
         ])).fetch_async(limit=limit)
     raise ndb.Return(result)
示例#12
0
def GetRevisions(test_key, revision):
  row = graph_data.Row.query(
      ndb.AND(graph_data.Row.parent_test == utils.OldStyleTestKey(test_key),
              graph_data.Row.revision == revision)).get()
  if row is None:
    return {}
  return {k: v for k, v in row.to_dict().items() if k.startswith('r_')}
示例#13
0
 def testOldStyleTestKey_String(self):
     key = utils.OldStyleTestKey('m/b/suite/metric')
     self.assertEqual('Test', key.kind())
     self.assertEqual('metric', key.id())
     self.assertEqual(
         ('Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric'),
         key.flat())
示例#14
0
def GetRowsForTestInRange(test_key, start_rev, end_rev):
  """Gets all the Row entities for a test between a given start and end."""
  test_key = utils.OldStyleTestKey(test_key)
  query = Row.query(
      Row.parent_test == test_key,
      Row.revision >= start_rev,
      Row.revision <= end_rev)
  return query.fetch(batch_size=100)
示例#15
0
def GetRowsForTestAroundRev(test_key, rev, num_points):
    """Gets up to |num_points| Row entities for a test centered on a revision."""
    test_key = utils.OldStyleTestKey(test_key)
    num_rows_before = int(num_points / 2) + 1
    num_rows_after = int(num_points / 2)

    return GetRowsForTestBeforeAfterRev(test_key, rev, num_rows_before,
                                        num_rows_after)
示例#16
0
 def parent_test(self):  # pylint: disable=invalid-name
   # The Test entity that a Row belongs to isn't actually its parent in
   # the datastore. Rather, the parent key of each Row contains a test path,
   # which contains the information necessary to get the actual Test
   # key. The Test key will need to be converted back to a new style
   # TestMetadata key to get information back out. This is because we have
   # over 3 trillion Rows in the datastore and cannot convert them all :(
   return utils.OldStyleTestKey(utils.TestKey(self.key.parent().string_id()))
示例#17
0
def GetRowsForTestInRange(test_key, start_rev, end_rev, privileged=False):
    """Gets all the Row entities for a test between a given start and end."""
    test_key = utils.OldStyleTestKey(test_key)
    if privileged:
        datastore_hooks.SetSinglePrivilegedRequest()
    query = Row.query(Row.parent_test == test_key, Row.revision >= start_rev,
                      Row.revision <= end_rev)
    return query.fetch(batch_size=100)
示例#18
0
def _HighestRevision(test_key):
    """Gets the revision number of the Row with the highest ID for a test."""
    query = graph_data.Row.query(
        graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
    query = query.order(-graph_data.Row.revision)
    highest_row_key = yield query.get_async(keys_only=True)
    if highest_row_key:
        raise ndb.Return(highest_row_key.id())
    raise ndb.Return(None)
示例#19
0
def DeprecateTestsMapper(entity):
  """Marks a TestMetadata entity as deprecated if the last row is too old.

  What is considered "too old" is defined by _DEPRECATION_REVISION_DELTA. Also,
  if all of the subtests in a test have been marked as deprecated, then that
  parent test will be marked as deprecated.

  This mapper doesn't un-deprecate tests if new data has been added; that
  happens in add_point.py.

  Args:
    entity: The TestMetadata entity to check.

  Yields:
    Zero or more datastore mutation operations.
  """
  # Fetch the last row.
  datastore_hooks.SetPrivilegedRequest()
  query = graph_data.Row.query(
      graph_data.Row.parent_test == utils.OldStyleTestKey(entity.key))
  query = query.order(-graph_data.Row.timestamp)
  last_row = query.get()

  # Check if the test should be deleted entirely.
  now = datetime.datetime.now()
  logging.info('checking %s', entity.test_path)
  if not last_row or last_row.timestamp < now - _REMOVAL_REVISON_DELTA:
    descendants = list_tests.GetTestDescendants(entity.key, keys_only=True)
    if entity.key in descendants:
      descendants.remove(entity.key)
    if not descendants:
      logging.info('removing')
      if last_row:
        logging.info('last row timestamp: %s', last_row.timestamp)
      else:
        logging.info('no last row, no descendants')
      taskqueue.add(
          url='/delete_test_data',
          params={
              'test_path': utils.TestPath(entity.key),  # For manual inspection.
              'test_key': entity.key.urlsafe(),
              'notify': 'false',
          },
          queue_name=_DELETE_TASK_QUEUE_NAME)
      return


  if entity.deprecated or not last_row:
    return

  if last_row.timestamp < now - _DEPRECATION_REVISION_DELTA:
    for operation in _MarkDeprecated(entity):
      yield operation

  for operation in _CreateStoppageAlerts(entity, last_row):
    yield operation
示例#20
0
def TestKeysForReportTemplate(template_id):
    template = ndb.Key('ReportTemplate', int(template_id)).get()
    if not template:
        return

    for table_row in template.template['rows']:
        for desc in report_query.TableRowDescriptors(table_row):
            for test_path in desc.ToTestPathsSync():
                yield utils.TestMetadataKey(test_path)
                yield utils.OldStyleTestKey(test_path)
示例#21
0
def GetLatestRowsForTest(
    test_key, num_points, keys_only=False, privileged=False):
  """Gets the latest num_points Row entities for a test."""
  test_key = utils.OldStyleTestKey(test_key)
  if privileged:
    datastore_hooks.SetSinglePrivilegedRequest()
  query = Row.query(Row.parent_test == test_key)
  query = query.order(-Row.revision)

  return query.fetch(limit=num_points, batch_size=100, keys_only=keys_only)
示例#22
0
 def _GetDataRow(self, test_path, rev):
   entities = yield [
       self._GetDataRowForKey(utils.TestMetadataKey(test_path), rev),
       self._GetDataRowForKey(utils.OldStyleTestKey(test_path), rev)]
   entities = [e for e in entities if e]
   if not entities:
     raise ndb.Return(None)
   if len(entities) > 1:
     logging.warn('Found too many Row entities: %r %r', rev, test_path)
     raise ndb.Return(None)
   raise ndb.Return(entities[0])
示例#23
0
 def testGetAlertsForTest(self):
     old_style_key1 = utils.OldStyleTestKey('master/bot/test1/metric')
     new_style_key1 = utils.TestMetadataKey('master/bot/test1/metric')
     old_style_key2 = utils.OldStyleTestKey('master/bot/test2/metric')
     new_style_key2 = utils.TestMetadataKey('master/bot/test2/metric')
     anomaly.Anomaly(id="old_1", test=old_style_key1).put()
     anomaly.Anomaly(id="old_1a", test=old_style_key1).put()
     anomaly.Anomaly(id="old_2", test=old_style_key2).put()
     anomaly.Anomaly(id="new_1", test=new_style_key1).put()
     anomaly.Anomaly(id="new_2", test=new_style_key2).put()
     anomaly.Anomaly(id="new_2a", test=new_style_key2).put()
     key1_alerts = anomaly.Anomaly.GetAlertsForTest(new_style_key1)
     self.assertEqual(['new_1', 'old_1', 'old_1a'],
                      [a.key.id() for a in key1_alerts])
     key2_alerts = anomaly.Anomaly.GetAlertsForTest(old_style_key2)
     self.assertEqual(['new_2', 'new_2a', 'old_2'],
                      [a.key.id() for a in key2_alerts])
     key2_alerts_limit = anomaly.Anomaly.GetAlertsForTest(old_style_key2,
                                                          limit=2)
     self.assertEqual(['new_2', 'new_2a'],
                      [a.key.id() for a in key2_alerts_limit])
示例#24
0
def GetRowsToAnalyzeAsync(test, max_num_rows):
    query = graph_data.Row.query(projection=['revision', 'value'])
    query = query.filter(
        graph_data.Row.parent_test == utils.OldStyleTestKey(test.key))

    # The query is ordered in descending order by revision because we want
    # to get the newest points.
    query = query.filter(graph_data.Row.revision > test.last_alerted_revision)
    query = query.order(-graph_data.Row.revision)

    # However, we want to analyze them in ascending order.
    rows = yield query.fetch_async(limit=max_num_rows)
    raise ndb.Return(list(reversed(rows)))
示例#25
0
 def _FetchRowsAsync(self, test_keys, num_points):
     """Fetches recent Row asynchronously across all 'test_keys'."""
     rows = []
     futures = []
     for test_key in test_keys:
         q = graph_data.Row.query()
         q = q.filter(
             graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
         q = q.order(-graph_data.Row.revision)
         futures.append(q.fetch_async(limit=num_points))
     ndb.Future.wait_all(futures)
     for future in futures:
         rows.extend(future.get_result())
     return rows
示例#26
0
    def AuthorizedPost(self, *args):
        """Returns timeseries data in response to API requests.

    Argument:
      test_path: Full path of test timeseries

    Outputs:
      JSON timeseries data for the test_path, see README.md.
    """
        try:
            days = int(self.request.get('num_days', 30))
        except ValueError:
            raise api_request_handler.BadRequestError(
                'Invalid num_days parameter %s' % self.request.get('num_days'))
        if days <= 0:
            raise api_request_handler.BadRequestError(
                'num_days cannot be negative (%s)' % days)
        before = datetime.datetime.now() - datetime.timedelta(days=days)

        test_path = args[0]
        test_key = utils.TestKey(test_path)
        test = test_key.get()
        if not test:
            raise api_request_handler.BadRequestError('Invalid test_path %s' %
                                                      test_path)

        assert (datastore_hooks.IsUnalteredQueryPermitted()
                or not test.internal_only)
        datastore_hooks.SetSinglePrivilegedRequest()

        q = graph_data.Row.query()
        q = q.filter(
            graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
        q = q.filter(graph_data.Row.timestamp > before)

        rows = q.fetch()
        if not rows:
            return []
        revisions = [rev for rev in rows[0].to_dict() if rev.startswith('r_')]
        header = ['revision', 'value', 'timestamp'] + revisions
        timeseries = [header]
        for row in sorted(rows, key=lambda r: r.revision):
            timeseries.append([self._GetValue(row, a) for a in header])

        return {
            'timeseries': timeseries,
            'test_path': test_path,
            'revision_logs': namespaced_stored_object.Get('revision_info'),
            'improvement_direction': test.improvement_direction,
        }
 def _AssertNotExists(self, test_paths):
     for test_path in test_paths:
         test_key = utils.TestKey(test_path)
         num_rows = graph_data.Row.query(
             graph_data.Row.parent_test == utils.OldStyleTestKey(
                 test_key)).count()
         self.assertEqual(0, num_rows)
         num_histograms = histogram.Histogram.query(
             histogram.Histogram.test == test_key).count()
         self.assertEqual(0, num_histograms)
         num_diagnostics = histogram.SparseDiagnostic.query(
             histogram.SparseDiagnostic.test == test_key).count()
         self.assertEqual(0, num_diagnostics)
         self.assertIsNone(test_key.get())
示例#28
0
  def testPost_NanFiltered(self):
    self._AddTestColumns(start_rev=15700, end_rev=16000, step=1)

    test_key = utils.OldStyleTestKey('ChromiumGPU/win7/dromaeo/jslib')
    row_key = utils.GetRowKey(test_key, 15900)
    row = row_key.get()
    row.value = float('nan')
    row.put()

    graphs = {'test_path_dict': {'ChromiumGPU/win7/dromaeo/jslib': [],}}
    # If the request is valid, a valid response will be returned.
    response = self.testapp.post('/graph_json', {'graphs': json.dumps(graphs)})
    flot_json_str = response.body
    rows = json.loads(flot_json_str)['data']['0']['data']
    self.assertEqual(149, len(rows))
示例#29
0
def _CreateTestBench(test_key):
    """Fetches and stores test and row data that would be used to run bench."""
    # Get rows entity.
    query = graph_data.Row.query(projection=['revision', 'value'])
    query = query.filter(
        graph_data.Row.parent_test == utils.OldStyleTestKey(test_key))
    query = query.order(-graph_data.Row.revision)
    rows = list(reversed(query.fetch(limit=_NUM_ROWS_TO_BENCH)))
    data_series = [(row.revision, row.value) for row in rows]

    # Add TestBench entity.
    test_bench = TestBench(test=test_key, data_series=data_series)
    _UpdateInvalidAndConfirmedAnomalyRevs(test_bench)
    _RunBaseAlertProcessing(test_bench)
    test_bench.put()
示例#30
0
  def testDeprecateTestsMapper_AllSubtestsDeprecated_UpdatesSuite(self):
    (trace_a, trace_b, suite) = self._AddMockDataForDeprecatedTests()
    last_b = graph_data.Row.query(
        graph_data.Row.parent_test == utils.OldStyleTestKey(trace_b.key),
        graph_data.Row.revision == 4).get()
    last_b.timestamp = datetime.datetime.now() - datetime.timedelta(days=20)
    last_b.put()

    for operation in mr.DeprecateTestsMapper(trace_a):
      self._ExecOperation(operation)
    for operation in mr.DeprecateTestsMapper(trace_b):
      self._ExecOperation(operation)

    self.assertTrue(trace_a.deprecated)
    self.assertTrue(trace_b.deprecated)
    self.assertTrue(suite.deprecated)