def testPinpointParams_ComparisonMagnitude_Added(self):
        test_key = utils.TestKey('ChromiumPerf/mac/cc_perftests/foo')
        anomaly_entity = anomaly.Anomaly(start_revision=1,
                                         end_revision=2,
                                         test=test_key,
                                         median_before_anomaly=1,
                                         median_after_anomaly=10)
        anomaly_entity.put()

        params = {
            'test_path': 'ChromiumPerf/mac/cc_perftests/foo',
            'start_commit': 'abcd1234',
            'end_commit': 'efgh5678',
            'bug_id': 1,
            'bisect_mode': 'performance',
            'story_filter': '',
            'pin': '',
            'alerts': json.dumps([anomaly_entity.key.urlsafe()])
        }
        results = pinpoint_request.PinpointParamsFromBisectParams(params)

        self.assertEqual(9, results['comparison_magnitude'])
        self.assertEqual(anomaly_entity.key.urlsafe(),
                         json.loads(results['tags'])['alert'])
示例#2
0
def GuessStoryFilter(test_path):
  """Returns a suitable "story filter" to use in the bisect config.

  Args:
    test_path: The slash-separated test path used by the dashboard.

  Returns:
    A regex pattern that matches the story referred to by the test_path, or
    an empty string if the test_path does not refer to a story and no story
    filter should be used.
  """
  test_path_parts = test_path.split('/')
  suite_name, story_name = test_path_parts[2], test_path_parts[-1]
  if any([
      _IsNonTelemetrySuiteName(suite_name),
      suite_name in _DISABLE_STORY_FILTER_SUITE_LIST,
      suite_name.startswith('media.') and '.html?' not in story_name,
      suite_name.startswith('webrtc.'),
      story_name in _DISABLE_STORY_FILTER_STORY_LIST]):
    return ''
  test_key = utils.TestKey(test_path)
  subtest_keys = list_tests.GetTestDescendants(test_key)
  try:
    subtest_keys.remove(test_key)
  except ValueError:
    pass
  if subtest_keys:  # Stories do not have subtests.
    return ''

  # During import, some chars in story names got replaced by "_" so they
  # could be safely included in the test_path. At this point we don't know
  # what the original characters were. Additionally, some special characters
  # and argument quoting are not interpreted correctly, e.g. by bisect
  # scripts (crbug.com/662472). We thus keep only a small set of "safe chars"
  # and replace all others with match-any-character regex dots.
  return re.sub(r'[^a-zA-Z0-9]', '.', story_name)
示例#3
0
    def _AddDataForTests(self, stats=None, masters=None):
        if not masters:
            masters = ['ChromiumGPU']
        testing_common.AddTests(masters, ['linux-release'], {
            'scrolling_benchmark': {
                'ref': {},
            },
        })

        for m in masters:
            ref = utils.TestKey('%s/linux-release/scrolling_benchmark/ref' %
                                m).get()
            ref.units = 'ms'
            for i in range(9000, 10070, 5):
                # Internal-only data should be found.
                test_container_key = utils.GetTestContainerKey(ref.key)
                r = graph_data.Row(id=i + 1,
                                   value=float(i * 3),
                                   parent=test_container_key,
                                   internal_only=True)
                if stats:
                    for s in stats:
                        setattr(r, s, i)
                r.put()
    def testAddRowsToCache(self):
        self._AddMockData()
        rows = []

        stored_object.Set(
            'externally_visible__num_revisions_ChromiumPerf/win7/dromaeo/dom',
            [[10, 2, 3], [15, 4, 5], [100, 6, 7]])

        test_key = utils.TestKey('ChromiumPerf/win7/dromaeo/dom')
        test_container_key = utils.GetTestContainerKey(test_key)
        ts1 = datetime.datetime(2013, 1, 1)
        row1 = graph_data.Row(parent=test_container_key,
                              id=1,
                              value=9,
                              timestamp=ts1)
        rows.append(row1)
        ts2 = datetime.datetime(2013, 1, 2)
        row2 = graph_data.Row(parent=test_container_key,
                              id=12,
                              value=90,
                              timestamp=ts2)
        rows.append(row2)
        ts3 = datetime.datetime(2013, 1, 3)
        row3 = graph_data.Row(parent=test_container_key,
                              id=102,
                              value=99,
                              timestamp=ts3)
        rows.append(row3)
        graph_revisions.AddRowsToCache(rows)

        self.assertEqual(
            [[1, 9, utils.TimestampMilliseconds(ts1)], [10, 2, 3],
             [12, 90, utils.TimestampMilliseconds(ts2)], [15, 4, 5],
             [100, 6, 7], [102, 99, utils.TimestampMilliseconds(ts3)]],
            stored_object.Get('externally_visible__num_revisions_'
                              'ChromiumPerf/win7/dromaeo/dom'))
 def _CreateRows(self):
     test_path = 'Chromium/win7/suite/metric'
     test_key = utils.TestKey(test_path)
     stat_names_to_test_keys = {
         'avg': utils.TestKey('Chromium/win7/suite/metric_avg'),
         'std': utils.TestKey('Chromium/win7/suite/metric_std'),
         'count': utils.TestKey('Chromium/win7/suite/metric_count'),
         'max': utils.TestKey('Chromium/win7/suite/metric_max'),
         'min': utils.TestKey('Chromium/win7/suite/metric_min'),
         'sum': utils.TestKey('Chromium/win7/suite/metric_sum')
     }
     histograms = add_histograms_queue_test.TEST_HISTOGRAM
     histograms['diagnostics'][
         reserved_infos.CHROMIUM_COMMIT_POSITIONS.name]['values'] = [99]
     ndb.put_multi(
         add_histograms_queue.CreateRowEntities(histograms, test_key,
                                                stat_names_to_test_keys,
                                                99))
     histograms['diagnostics'][
         reserved_infos.CHROMIUM_COMMIT_POSITIONS.name]['values'] = [200]
     ndb.put_multi(
         add_histograms_queue.CreateRowEntities(
             add_histograms_queue_test.TEST_HISTOGRAM, test_key,
             stat_names_to_test_keys, 200))
示例#6
0
    def testPost_DeletesMasterAndBot(self):
        testing_common.AddTests(*_TESTS_MULTIPLE_MASTERS_AND_BOTS)
        utils.TestKey('ChromiumPerf/mac/SunSpider/Total/t').delete()
        utils.TestKey('ChromiumPerf/mac/SunSpider/Total').delete()
        utils.TestKey('ChromiumPerf/mac/SunSpider').delete()
        utils.TestKey('ChromiumPerf/linux/SunSpider/Total/t').delete()
        utils.TestKey('ChromiumPerf/linux/SunSpider/Total').delete()
        utils.TestKey('ChromiumPerf/linux/SunSpider').delete()

        for m in _TESTS_MULTIPLE_MASTERS_AND_BOTS[0]:
            for b in _TESTS_MULTIPLE_MASTERS_AND_BOTS[1]:
                master_key = ndb.Key('Master', m)
                bot_key = ndb.Key('Bot', b, parent=master_key)
                self.assertIsNotNone(bot_key.get())
                self.assertIsNotNone(master_key.get())

        self.testapp.get('/deprecate_tests')

        self.ExecuteDeferredTasks(
            deprecate_tests._DEPRECATE_TESTS_TASK_QUEUE_NAME)

        expected_deleted_bots = [
            ndb.Key('Master', 'ChromiumPerf', 'Bot', 'mac'),
            ndb.Key('Master', 'ChromiumPerf', 'Bot', 'linux')
        ]
        expected_deleted_masters = [ndb.Key('Master', 'ChromiumPerf')]
        for m in _TESTS_MULTIPLE_MASTERS_AND_BOTS[0]:
            master_key = ndb.Key('Master', m)
            if master_key in expected_deleted_masters:
                self.assertIsNone(master_key.get())
            else:
                self.assertIsNotNone(master_key.get())

            for b in _TESTS_MULTIPLE_MASTERS_AND_BOTS[1]:
                bot_key = ndb.Key('Bot', b, parent=master_key)
                if bot_key in expected_deleted_bots:
                    self.assertIsNone(bot_key.get())
                else:
                    self.assertIsNotNone(bot_key.get())
示例#7
0
def _MakeAnomalyEntity(change_point, test, stat, rows):
    """Creates an Anomaly entity.

  Args:
    change_point: A find_change_points.ChangePoint object.
    test: The TestMetadata entity that the anomalies were found on.
    stat: The TestMetadata stat that the anomaly was found on.
    rows: List of Row entities that the anomalies were found on.

  Returns:
    An Anomaly entity, which is not yet put in the datastore.
  """
    end_rev = change_point.extended_end
    start_rev = _GetImmediatelyPreviousRevisionNumber(
        change_point.extended_start, rows) + 1
    print(change_point.extended_start, change_point.extended_end)
    display_start = display_end = None
    if test.master_name == 'ClankInternal':
        display_start, display_end = _GetDisplayRange(change_point.x_value,
                                                      rows)
    median_before = change_point.median_before
    median_after = change_point.median_after

    suite_key = test.key.id().split('/')[:3]
    suite_key = '/'.join(suite_key)
    suite_key = utils.TestKey(suite_key)

    queried_diagnostics = yield (
        histogram.SparseDiagnostic.GetMostRecentDataByNamesAsync(
            suite_key,
            set([
                reserved_infos.BUG_COMPONENTS.name,
                reserved_infos.OWNERS.name,
                reserved_infos.INFO_BLURB.name,
                reserved_infos.ALERT_GROUPING.name,
            ])))

    bug_components = queried_diagnostics.get(
        reserved_infos.BUG_COMPONENTS.name, {}).get('values')
    if bug_components:
        bug_components = bug_components[0]
        # TODO(902796): Remove this typecheck.
        if isinstance(bug_components, list):
            bug_components = bug_components[0]

    ownership_information = {
        'emails':
        queried_diagnostics.get(reserved_infos.OWNERS.name, {}).get('values'),
        'component':
        bug_components,

        # Info blurbs should be a single string, and we'll only take the firs
        #  element of the list of values.
        'info_blurb':
        queried_diagnostics.get(reserved_infos.INFO_BLURB.name,
                                {}).get('values', [None])[0],
    }

    alert_grouping = queried_diagnostics.get(
        reserved_infos.ALERT_GROUPING.name, {}).get('values', [])

    # Compute additional anomaly metadata.
    def MinMax(iterable):
        min_ = max_ = None
        for val in iterable:
            if min_ is None:
                min_ = max_ = val
            else:
                min_ = min(min_, val)
                max_ = max(max_, val)
        return min_, max_

    earliest_input_timestamp, latest_input_timestamp = MinMax(
        r.timestamp for unused_rev, r, unused_val in rows)

    new_anomaly = anomaly.Anomaly(
        start_revision=start_rev,
        end_revision=end_rev,
        median_before_anomaly=median_before,
        median_after_anomaly=median_after,
        segment_size_before=change_point.size_before,
        segment_size_after=change_point.size_after,
        window_end_revision=change_point.window_end,
        std_dev_before_anomaly=change_point.std_dev_before,
        t_statistic=change_point.t_statistic,
        degrees_of_freedom=change_point.degrees_of_freedom,
        p_value=change_point.p_value,
        is_improvement=_IsImprovement(test, median_before, median_after),
        ref_test=_GetRefBuildKeyForTest(test),
        test=test.key,
        statistic=stat,
        internal_only=test.internal_only,
        units=test.units,
        display_start=display_start,
        display_end=display_end,
        ownership=ownership_information,
        alert_grouping=alert_grouping,
        earliest_input_timestamp=earliest_input_timestamp,
        latest_input_timestamp=latest_input_timestamp)
    raise ndb.Return(new_anomaly)
示例#8
0
    def _AddAlertsToDataStore(self):
        """Adds sample data, including triaged and non-triaged alerts."""
        key_map = {}

        subscription = Subscription(
            name='Chromium Perf Sheriff',
            notification_email='*****@*****.**',
        )
        testing_common.AddTests(
            ['ChromiumGPU'], ['linux-release'], {
                'scrolling-benchmark': {
                    'first_paint': {},
                    'first_paint_ref': {},
                    'mean_frame_time': {},
                    'mean_frame_time_ref': {},
                }
            })
        first_paint = utils.TestKey(
            'ChromiumGPU/linux-release/scrolling-benchmark/first_paint')
        mean_frame_time = utils.TestKey(
            'ChromiumGPU/linux-release/scrolling-benchmark/mean_frame_time')

        # By default, all TestMetadata entities have an improvement_direction of
        # UNKNOWN, meaning that neither direction is considered an improvement.
        # Here we set the improvement direction so that some anomalies are
        # considered improvements.
        for test_key in [first_paint, mean_frame_time]:
            test = test_key.get()
            test.improvement_direction = anomaly.DOWN
            test.put()

        # Add some (12) non-triaged alerts.
        for end_rev in range(10000, 10120, 10):
            test_key = first_paint if end_rev % 20 == 0 else mean_frame_time
            ref_test_key = utils.TestKey('%s_ref' % utils.TestPath(test_key))
            anomaly_entity = anomaly.Anomaly(
                start_revision=end_rev - 5,
                end_revision=end_rev,
                test=test_key,
                median_before_anomaly=100,
                median_after_anomaly=200,
                ref_test=ref_test_key,
                subscriptions=[subscription],
                subscription_names=[subscription.name],
            )
            anomaly_entity.SetIsImprovement()
            anomaly_key = anomaly_entity.put()
            key_map[end_rev] = anomaly_key.urlsafe()

        # Add some (2) already-triaged alerts.
        for end_rev in range(10120, 10140, 10):
            test_key = first_paint if end_rev % 20 == 0 else mean_frame_time
            ref_test_key = utils.TestKey('%s_ref' % utils.TestPath(test_key))
            bug_id = -1 if end_rev % 20 == 0 else 12345
            anomaly_entity = anomaly.Anomaly(
                start_revision=end_rev - 5,
                end_revision=end_rev,
                test=test_key,
                median_before_anomaly=100,
                median_after_anomaly=200,
                ref_test=ref_test_key,
                bug_id=bug_id,
                subscriptions=[subscription],
                subscription_names=[subscription.name],
            )
            anomaly_entity.SetIsImprovement()
            anomaly_key = anomaly_entity.put()
            key_map[end_rev] = anomaly_key.urlsafe()
            if bug_id > 0:
                bug_data.Bug.New(project='chromium', bug_id=bug_id).put()

        # Add some (6) non-triaged improvements.
        for end_rev in range(10140, 10200, 10):
            test_key = mean_frame_time
            ref_test_key = utils.TestKey('%s_ref' % utils.TestPath(test_key))
            anomaly_entity = anomaly.Anomaly(
                start_revision=end_rev - 5,
                end_revision=end_rev,
                test=test_key,
                median_before_anomaly=200,
                median_after_anomaly=100,
                ref_test=ref_test_key,
                subscriptions=[subscription],
                subscription_names=[subscription.name],
            )
            anomaly_entity.SetIsImprovement()
            anomaly_key = anomaly_entity.put()
            self.assertTrue(anomaly_entity.is_improvement)
            key_map[end_rev] = anomaly_key.urlsafe()
        return key_map
示例#9
0
    def testCreateRowEntities(self):
        test_path = 'Chromium/win7/suite/metric'
        test_key = utils.TestKey(test_path)
        stat_names_to_test_keys = {
            'avg': utils.TestKey('Chromium/win7/suite/metric_avg'),
            'std': utils.TestKey('Chromium/win7/suite/metric_std'),
            'count': utils.TestKey('Chromium/win7/suite/metric_count'),
            'max': utils.TestKey('Chromium/win7/suite/metric_max'),
            'min': utils.TestKey('Chromium/win7/suite/metric_min'),
            'sum': utils.TestKey('Chromium/win7/suite/metric_sum')
        }
        rows_to_put = add_histograms_queue.CreateRowEntities(
            TEST_HISTOGRAM, test_key, stat_names_to_test_keys, 123)
        ndb.put_multi(rows_to_put)

        rows = graph_data.Row.query().fetch()
        rows_by_path = {}
        for row in rows:
            rows_by_path[row.key.parent().id()] = row

        avg_row = rows_by_path.pop('Chromium/win7/suite/metric_avg')
        self.assertAlmostEqual(2.0, avg_row.value)
        self.assertAlmostEqual(1.0, avg_row.error)
        std_row = rows_by_path.pop('Chromium/win7/suite/metric_std')
        self.assertAlmostEqual(1.0, std_row.value)
        self.assertEqual(0.0, std_row.error)
        count_row = rows_by_path.pop('Chromium/win7/suite/metric_count')
        self.assertEqual(3, count_row.value)
        self.assertEqual(0.0, count_row.error)
        max_row = rows_by_path.pop('Chromium/win7/suite/metric_max')
        self.assertAlmostEqual(3.0, max_row.value)
        self.assertEqual(0.0, max_row.error)
        min_row = rows_by_path.pop('Chromium/win7/suite/metric_min')
        self.assertAlmostEqual(1.0, min_row.value)
        self.assertEqual(0.0, min_row.error)
        sum_row = rows_by_path.pop('Chromium/win7/suite/metric_sum')
        self.assertAlmostEqual(6.0, sum_row.value)
        self.assertEqual(0.0, sum_row.error)

        row = rows_by_path.pop('Chromium/win7/suite/metric')
        self.assertEqual(0, len(rows_by_path))
        fields = row.to_dict().iterkeys()
        d_fields = []
        r_fields = []
        a_fields = []
        for field in fields:
            if field.startswith('d_'):
                d_fields.append(field)
            elif field.startswith('r_'):
                r_fields.append(field)
            elif field.startswith('a_'):
                a_fields.append(field)

        self.assertAlmostEqual(2.0, row.value)
        self.assertAlmostEqual(1.0, row.error)

        self.assertEqual(4, len(d_fields))
        self.assertEqual(3, row.d_count)
        self.assertAlmostEqual(3.0, row.d_max)
        self.assertAlmostEqual(1.0, row.d_min)
        self.assertAlmostEqual(6.0, row.d_sum)

        self.assertEqual(2, len(r_fields))
        self.assertEqual('4cd34ad3320db114ad3a2bd2acc02aba004d0cb4',
                         row.r_v8_rev)
        self.assertEqual('123', row.r_commit_pos)

        self.assertEqual('[Buildbot stdio](http://log.url/)', row.a_stdio_uri)
示例#10
0
    def testGetSubTests_FetchAndCacheBehavior(self):
        self._AddSampleData()

        # Set the has_rows flag to true on two of the TestMetadata entities.
        for test_path in [
                'Chromium/win7/really/nested/very/deeply/subtest',
                'Chromium/win7/really/nested/very_very'
        ]:
            test = utils.TestKey(test_path).get()
            test.has_rows = True
            test.put()

        # A tree-structured dict of dicts is constructed, and the 'has_rows'
        # flag is set to true for two of these tests. These two tests and
        # their parents are all included in the result.
        response = self.testapp.post(
            '/list_tests', {
                'type': 'sub_tests',
                'suite': 'really',
                'bots': 'Chromium/win7,Chromium/mac'
            })
        self.assertEqual('*',
                         response.headers.get('Access-Control-Allow-Origin'))
        expected = {
            'nested': {
                'has_rows': False,
                'sub_tests': {
                    'very': {
                        'has_rows': False,
                        'sub_tests': {
                            'deeply': {
                                'has_rows': False,
                                'sub_tests': {
                                    'subtest': {
                                        'has_rows': True,
                                        'sub_tests': {}
                                    }
                                }
                            }
                        }
                    },
                    'very_very': {
                        'has_rows': True,
                        'sub_tests': {}
                    }
                }
            }
        }
        # The response should be as expected.
        self.assertEqual(expected, json.loads(response.body))

        # The cache should be set for the win7 bot with the expected response.
        self.assertEqual(
            expected,
            json.loads(
                layered_cache.Get(graph_data.LIST_TESTS_SUBTEST_CACHE_KEY %
                                  ('Chromium', 'win7', 'really'))))

        # Change mac subtests in cache. Should be merged with win7.
        mac_subtests = {
            'mactest': {
                'has_rows': False,
                'sub_tests': {
                    'graph': {
                        'has_rows': True,
                        'sub_tests': {}
                    }
                }
            }
        }
        layered_cache.Set(
            graph_data.LIST_TESTS_SUBTEST_CACHE_KEY %
            ('Chromium', 'mac', 'really'), json.dumps(mac_subtests))
        response = self.testapp.post(
            '/list_tests', {
                'type': 'sub_tests',
                'suite': 'really',
                'bots': 'Chromium/win7,Chromium/mac'
            })
        self.assertEqual('*',
                         response.headers.get('Access-Control-Allow-Origin'))
        expected.update(mac_subtests)
        self.assertEqual(expected, json.loads(response.body))
示例#11
0
 def _AssertDoesntMatch(self, test_path, pattern):
     """Asserts that a test path doesn't match a pattern with MatchesPattern."""
     test_key = utils.TestKey(test_path)
     self.assertFalse(utils.TestMatchesPattern(test_key, pattern))
示例#12
0
 def testTestSuiteName_Basic(self):
     key = utils.TestKey('Master/bot/suite-foo/sub/x/y/z')
     self.assertEqual('suite-foo', utils.TestSuiteName(key))
示例#13
0
    def testGet_ComponentsChosenPerTest(self):
        ownership_samples = [
            {
                'type': 'Ownership',
                'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
                'component': 'Abc>Def'
            },
            {
                'type': 'Ownership',
                'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
                'component': '123>456'
            },
        ]

        sheriff_key = sheriff.Sheriff(
            id='Sheriff',
            labels=['Performance-Sheriff', 'Cr-Blink-Javascript']).put()

        test_paths = [
            'ChromiumPerf/linux/scrolling/first_paint',
            'ChromiumPerf/linux/scrolling/mean_frame_time'
        ]
        test_keys = [utils.TestKey(test_path) for test_path in test_paths]

        now_datetime = datetime.datetime.now()

        alert_test_key_0 = anomaly.Anomaly(start_revision=1476193320,
                                           end_revision=1476201870,
                                           test=test_keys[0],
                                           median_before_anomaly=100,
                                           median_after_anomaly=200,
                                           sheriff=sheriff_key,
                                           ownership=ownership_samples[0],
                                           timestamp=now_datetime).put()

        alert_test_key_1 = anomaly.Anomaly(start_revision=1476193320,
                                           end_revision=1476201870,
                                           test=test_keys[1],
                                           median_before_anomaly=100,
                                           median_after_anomaly=200,
                                           sheriff=sheriff_key,
                                           ownership=ownership_samples[1],
                                           timestamp=now_datetime +
                                           datetime.timedelta(10)).put()

        response = self.testapp.post('/file_bug', [
            ('keys', '%s,%s' %
             (alert_test_key_0.urlsafe(), alert_test_key_1.urlsafe())),
            ('summary', 's'),
            ('description', 'd\n'),
            ('label', 'one'),
            ('label', 'two'),
            ('component', 'Foo>Bar'),
        ])

        self.assertIn(
            '<input type="checkbox" checked name="component" value="Abc&gt;Def">',
            response.body)

        self.assertIn(
            '<input type="checkbox" checked name="component" value="123&gt;456">',
            response.body)
示例#14
0
    def testGet_WithAllOwnershipComponents(self):
        ownership_samples = [{
            'type': 'Ownership',
            'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
            'component': 'Abc>Xyz'
        }, {
            'type': 'Ownership',
            'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
            'component': 'Def>123'
        }]

        test_paths = [
            'ChromiumPerf/linux/scrolling/first_paint',
            'ChromiumPerf/linux/scrolling/mean_frame_time'
        ]
        test_keys = [utils.TestKey(test_path) for test_path in test_paths]

        sheriff_key = sheriff.Sheriff(
            id='Sheriff',
            labels=['Performance-Sheriff', 'Cr-Blink-Javascript']).put()

        anomaly_1 = anomaly.Anomaly(start_revision=1476193324,
                                    end_revision=1476201840,
                                    test=test_keys[0],
                                    median_before_anomaly=100,
                                    median_after_anomaly=200,
                                    sheriff=sheriff_key,
                                    ownership=ownership_samples[0]).put()

        anomaly_2 = anomaly.Anomaly(start_revision=1476193320,
                                    end_revision=1476201870,
                                    test=test_keys[1],
                                    median_before_anomaly=100,
                                    median_after_anomaly=200,
                                    sheriff=sheriff_key,
                                    ownership=ownership_samples[1]).put()

        response = self.testapp.post('/file_bug', [
            ('keys', '%s' % (anomaly_1.urlsafe())),
            ('summary', 's'),
            ('description', 'd\n'),
            ('label', 'one'),
            ('label', 'two'),
            ('component', 'Foo>Bar'),
        ])

        self.assertIn(
            '<input type="checkbox" checked name="component" value="Abc&gt;Xyz">',
            response.body)

        response_with_both_anomalies = self.testapp.post(
            '/file_bug', [
                ('keys', '%s,%s' % (anomaly_1.urlsafe(), anomaly_2.urlsafe())),
                ('summary', 's'),
                ('description', 'd\n'),
                ('label', 'one'),
                ('label', 'two'),
                ('component', 'Foo>Bar'),
            ])

        self.assertIn(
            '<input type="checkbox" checked name="component" value="Abc&gt;Xyz">',
            response_with_both_anomalies.body)

        self.assertIn(
            '<input type="checkbox" checked name="component" value="Def&gt;123">',
            response_with_both_anomalies.body)
示例#15
0
def PinpointParamsFromBisectParams(params):
    """Takes parameters from Dashboard's pinpoint-job-dialog and returns
  a dict with parameters for a new Pinpoint job.

  Args:
    params: A dict in the following format:
    {
        'test_path': Test path for the metric being bisected.
        'start_git_hash': Git hash of earlier revision.
        'end_git_hash': Git hash of later revision.
        'bug_id': Associated bug.
    }

  Returns:
    A dict of params for passing to Pinpoint to start a job, or a dict with an
    'error' field.
  """
    if not utils.IsValidSheriffUser():
        user = utils.GetEmail()
        raise InvalidParamsError('User "%s" not authorized.' % user)

    test_path = params['test_path']
    test_path_parts = test_path.split('/')
    bot_name = test_path_parts[1]
    suite = test_path_parts[2]
    story_filter = params['story_filter']
    pin = params.get('pin')

    # If functional bisects are speciied, Pinpoint expects these parameters to be
    # empty.
    bisect_mode = params['bisect_mode']
    if bisect_mode != 'performance' and bisect_mode != 'functional':
        raise InvalidParamsError('Invalid bisect mode %s specified.' %
                                 bisect_mode)

    tir_label = ''
    chart_name = ''
    trace_name = ''
    if bisect_mode == 'performance':
        tir_label, chart_name, trace_name = ParseTIRLabelChartNameAndTraceName(
            test_path_parts)

    start_commit = params['start_commit']
    end_commit = params['end_commit']
    start_git_hash = ResolveToGitHash(start_commit, suite)
    end_git_hash = ResolveToGitHash(end_commit, suite)

    # Pinpoint also requires you specify which isolate target to run the
    # test, so we derive that from the suite name. Eventually, this would
    # ideally be stored in a SparesDiagnostic but for now we can guess.
    target = _GetIsolateTarget(bot_name, suite, start_commit, end_commit)

    email = utils.GetEmail()
    job_name = '%s bisect on %s/%s' % (bisect_mode.capitalize(), bot_name,
                                       suite)

    # Histogram names don't include the statistic, so split these
    chart_name, statistic_name = ParseStatisticNameFromChart(chart_name)

    alert_key = ''
    if params.get('alerts'):
        alert_keys = json.loads(params.get('alerts'))
        if alert_keys:
            alert_key = alert_keys[0]

    alert_magnitude = None
    if alert_key:
        alert = ndb.Key(urlsafe=alert_key).get()
        alert_magnitude = alert.median_after_anomaly - alert.median_before_anomaly

    if not alert_magnitude:
        alert_magnitude = FindMagnitudeBetweenCommits(utils.TestKey(test_path),
                                                      start_commit, end_commit)

    pinpoint_params = {
        'configuration': bot_name,
        'benchmark': suite,
        'chart': chart_name,
        'start_git_hash': start_git_hash,
        'end_git_hash': end_git_hash,
        'bug_id': params['bug_id'],
        'comparison_mode': bisect_mode,
        'target': target,
        'user': email,
        'name': job_name,
        'tags': json.dumps({
            'test_path': test_path,
            'alert': alert_key
        }),
    }

    if alert_magnitude:
        pinpoint_params['comparison_magnitude'] = alert_magnitude
    if pin:
        pinpoint_params['pin'] = pin
    if statistic_name:
        pinpoint_params['statistic'] = statistic_name
    if story_filter:
        pinpoint_params['story'] = story_filter
    if tir_label:
        pinpoint_params['tir_label'] = tir_label
    if trace_name:
        pinpoint_params['trace'] = trace_name

    return pinpoint_params
 def testProcessTest_NoSheriff_ErrorLogged(self, mock_logging_error):
   self._AddDataForTests()
   ref = utils.TestKey(
       'ChromiumGPU/linux-release/scrolling_benchmark/ref').get()
   find_anomalies.ProcessTests([ref.key])
   mock_logging_error.assert_called_with('No sheriff for %s', ref.key)
示例#17
0
    def testGetGraphJson_WithAnomalies_ReturnsCorrectAnomalyAnnotations(self):
        self._AddTestColumns()

        anomaly1 = anomaly.Anomaly(
            start_revision=14999,
            end_revision=15000,
            test=utils.TestKey('ChromiumGPU/win7/dromaeo/dom'),
            median_before_anomaly=100,
            median_after_anomaly=200)
        anomaly1.SetIsImprovement()
        key1 = anomaly1.put()

        anomaly2 = anomaly.Anomaly(
            start_revision=15004,
            end_revision=15006,
            test=utils.TestKey('ChromiumGPU/win7/dromaeo/dom'),
            median_before_anomaly=200,
            median_after_anomaly=100,
            bug_id=12345)
        anomaly2.SetIsImprovement()
        key2 = anomaly2.put()

        old_style_test_key = ndb.Key('Master', 'ChromiumGPU', 'Bot', 'win7',
                                     'Test', 'dromaeo', 'Test', 'dom')
        anomaly3 = anomaly.Anomaly(start_revision=15008,
                                   end_revision=15009,
                                   test=old_style_test_key,
                                   median_before_anomaly=100,
                                   median_after_anomaly=200)
        key3 = anomaly3.put()

        test = utils.TestKey('ChromiumGPU/win7/dromaeo/dom').get()
        test.description = 'About this test'
        test.units = 'ms'
        test.buildername = 'Windows 7 (1)'
        test.UpdateSheriff()
        test.put()

        flot_json_str = graph_json.GetGraphJson(
            {
                'ChromiumGPU/win7/dromaeo/dom': [],
            }, rev=15000, num_points=8)

        flot = json.loads(flot_json_str)
        annotations = flot['annotations']
        self.assertEqual(5, len(annotations['0']))

        # Verify key fields of the annotation dictionary for the first anomaly.
        anomaly_one_annotation = annotations['0']['0']['g_anomaly']
        self.assertEqual(14999, anomaly_one_annotation['start_revision'])
        self.assertEqual(15000, anomaly_one_annotation['end_revision'])
        self.assertEqual('100.0%', anomaly_one_annotation['percent_changed'])
        self.assertIsNone(anomaly_one_annotation['bug_id'])
        self.assertEqual(key1.urlsafe(), anomaly_one_annotation['key'])
        self.assertTrue(anomaly_one_annotation['improvement'])

        # Verify key fields of the annotation dictionary for the second anomaly.
        anomaly_two_annotation = annotations['0']['2']['g_anomaly']
        self.assertEqual(15004, anomaly_two_annotation['start_revision'])
        self.assertEqual(15006, anomaly_two_annotation['end_revision'])
        self.assertEqual('50.0%', anomaly_two_annotation['percent_changed'])
        self.assertEqual(12345, anomaly_two_annotation['bug_id'])
        self.assertEqual(key2.urlsafe(), anomaly_two_annotation['key'])
        self.assertFalse(anomaly_two_annotation['improvement'])

        # Verify the key for the third anomaly.
        anomaly_three_annotation = annotations['0']['3']['g_anomaly']
        self.assertEqual(key3.urlsafe(), anomaly_three_annotation['key'])

        # Verify the tracing link annotations
        self.assertEqual('http://trace/15000',
                         annotations['0']['0']['a_tracing_uri'])
        self.assertEqual('http://trace/15012',
                         annotations['0']['4']['a_tracing_uri'])

        # Verify the series annotations.
        self.assertEqual(
            {
                '0': {
                    'name': 'dom',
                    'path': 'ChromiumGPU/win7/dromaeo/dom',
                    'units': 'ms',
                    'better': 'Higher',
                    'description': 'About this test',
                    'can_bisect': True,
                }
            }, annotations['series'])
示例#18
0
def _HasChildTest(test_path):
  key = utils.TestKey(test_path)
  child = graph_data.TestMetadata.query(
      graph_data.TestMetadata.parent_test == key).get()
  return bool(child)
示例#19
0
 def _AssertMatches(self, test_path, pattern):
     """Asserts that a test path matches a pattern with MatchesPattern."""
     test_key = utils.TestKey(test_path)
     self.assertTrue(utils.TestMatchesPattern(test_key, pattern))
示例#20
0
  def testPostHistogram_AddsNewSparseDiagnostic(self):
    diag_dict = {
        'buildNumber': 0,
        'buildbotMasterName': '',
        'buildbotName': 'buildbotmaster0',
        'displayBotName': 'bot',
        'displayMasterName': 'master',
        'guid': '6ce177ab-3fdb-44cb-aa8d-9ed49765d810',
        'logUri': '',
        'type': 'BuildbotInfo'
    }
    diag = histogram.SparseDiagnostic(
        data=json.dumps(diag_dict), start_revision=1, end_revision=sys.maxint,
        test=utils.TestKey('master/bot/benchmark'))
    diag.put()
    data = json.dumps([
        {
            'benchmarkName': 'benchmark',
            'canonicalUrl': '',
            'guid': '0bc1021b-8107-4db7-bc8c-49d7cf53c5ae',
            'label': '',
            'legacyTIRLabel': '',
            'storyDisplayName': 'story',
            'type': 'TelemetryInfo'
        }, {
            'angle': [],
            'catapult': [],
            'chromium': [],
            'chromiumCommitPosition': 424242,
            'guid': '25f0a111-9bb4-4cea-b0c1-af2609623160',
            'skia': [],
            'type': 'RevisionInfo',
            'v8': [],
            'webrtc': []
        }, {
            'buildNumber': 0,
            'buildbotMasterName': '',
            'buildbotName': 'buildbotmaster1',
            'displayBotName': 'bot',
            'displayMasterName': 'master',
            'guid': 'e9c2891d-2b04-413f-8cf4-099827e67626',
            'logUri': '',
            'type': 'BuildbotInfo'
        }, {
            'binBoundaries': [1, [1, 1000, 20]],
            'diagnostics': {
                'buildbot': 'e9c2891d-2b04-413f-8cf4-099827e67626',
                'revisions': '25f0a111-9bb4-4cea-b0c1-af2609623160',
                'telemetry': '0bc1021b-8107-4db7-bc8c-49d7cf53c5ae'
            },
            'guid': '4989617a-14d6-4f80-8f75-dafda2ff13b0',
            'name': 'foo',
            'unit': 'count'}
    ])
    self.testapp.post('/add_histograms', {'data': data})

    diagnostics = histogram.SparseDiagnostic.query().fetch()
    params_by_guid = self.TaskParamsByGuid()
    params = params_by_guid['4989617a-14d6-4f80-8f75-dafda2ff13b0']
    hist = json.loads(params['data'][0])
    buildbot_info = hist['diagnostics']['buildbot']

    self.assertEqual(2, len(diagnostics))
    self.assertEqual('e9c2891d-2b04-413f-8cf4-099827e67626', buildbot_info)
示例#21
0
 def testGetTestMetadataKey_TestMetadata(self):
     a = anomaly.Anomaly(test=utils.TestKey('a/b/c/d'))
     k = a.GetTestMetadataKey()
     self.assertEqual('TestMetadata', k.kind())
     self.assertEqual('a/b/c/d', k.id())
     self.assertEqual('a/b/c/d', utils.TestPath(k))
示例#22
0
def ProcessHistogramSet(histogram_dicts):
    if not isinstance(histogram_dicts, list):
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON much be a list of dicts')

    bot_whitelist_future = stored_object.GetAsync(
        add_point_queue.BOT_WHITELIST_KEY)

    histograms = histogram_set.HistogramSet()
    histograms.ImportDicts(histogram_dicts)
    histograms.ResolveRelatedHistograms()
    histograms.DeduplicateDiagnostics()

    if len(histograms) == 0:
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON must contain at least one histogram.')

    _LogDebugInfo(histograms)

    InlineDenseSharedDiagnostics(histograms)

    # TODO(eakuefner): Get rid of this.
    # https://github.com/catapult-project/catapult/issues/4242
    _PurgeHistogramBinData(histograms)

    master = _GetDiagnosticValue(reserved_infos.MASTERS.name,
                                 histograms.GetFirstHistogram())
    bot = _GetDiagnosticValue(reserved_infos.BOTS.name,
                              histograms.GetFirstHistogram())
    benchmark = _GetDiagnosticValue(reserved_infos.BENCHMARKS.name,
                                    histograms.GetFirstHistogram())
    benchmark_description = _GetDiagnosticValue(
        reserved_infos.BENCHMARK_DESCRIPTIONS.name,
        histograms.GetFirstHistogram(),
        optional=True)

    _ValidateMasterBotBenchmarkName(master, bot, benchmark)

    suite_key = utils.TestKey('%s/%s/%s' % (master, bot, benchmark))

    logging.info('Suite: %s', suite_key.id())

    revision = ComputeRevision(histograms)

    bot_whitelist = bot_whitelist_future.get_result()
    internal_only = add_point_queue.BotInternalOnly(bot, bot_whitelist)

    # We'll skip the histogram-level sparse diagnostics because we need to
    # handle those with the histograms, below, so that we can properly assign
    # test paths.
    suite_level_sparse_diagnostic_entities = FindSuiteLevelSparseDiagnostics(
        histograms, suite_key, revision, internal_only)

    # TODO(eakuefner): Refactor master/bot computation to happen above this line
    # so that we can replace with a DiagnosticRef rather than a full diagnostic.
    new_guids_to_old_diagnostics = DeduplicateAndPut(
        suite_level_sparse_diagnostic_entities, suite_key, revision)
    for new_guid, old_diagnostic in new_guids_to_old_diagnostics.iteritems():
        histograms.ReplaceSharedDiagnostic(
            new_guid, diagnostic.Diagnostic.FromDict(old_diagnostic))

    tasks = _BatchHistogramsIntoTasks(suite_key.id(), histograms, revision,
                                      benchmark_description)

    _QueueHistogramTasks(tasks)
示例#23
0
def _MakeAnomalyEntity(change_point, test, stat, rows):
    """Creates an Anomaly entity.

  Args:
    change_point: A find_change_points.ChangePoint object.
    test: The TestMetadata entity that the anomalies were found on.
    stat: The TestMetadata stat that the anomaly was found on.
    rows: List of Row entities that the anomalies were found on.

  Returns:
    An Anomaly entity, which is not yet put in the datastore.
  """
    end_rev = change_point.x_value
    start_rev = _GetImmediatelyPreviousRevisionNumber(end_rev, rows) + 1
    display_start = display_end = None
    if test.master_name == 'ClankInternal':
        display_start, display_end = _GetDisplayRange(change_point.x_value,
                                                      rows)
    median_before = change_point.median_before
    median_after = change_point.median_after

    suite_key = test.key.id().split('/')[:3]
    suite_key = '/'.join(suite_key)
    suite_key = utils.TestKey(suite_key)

    queried_diagnostics = yield (
        histogram.SparseDiagnostic.GetMostRecentDataByNamesAsync(
            suite_key,
            set([
                reserved_infos.BUG_COMPONENTS.name, reserved_infos.OWNERS.name
            ])))

    bug_components = queried_diagnostics.get(
        reserved_infos.BUG_COMPONENTS.name, {}).get('values')
    if bug_components:
        bug_components = bug_components[0]
        # TODO(902796): Remove this typecheck.
        if isinstance(bug_components, list):
            bug_components = bug_components[0]

    ownership_information = {
        'emails':
        queried_diagnostics.get(reserved_infos.OWNERS.name, {}).get('values'),
        'component':
        bug_components
    }

    new_anomaly = anomaly.Anomaly(
        start_revision=start_rev,
        end_revision=end_rev,
        median_before_anomaly=median_before,
        median_after_anomaly=median_after,
        segment_size_before=change_point.size_before,
        segment_size_after=change_point.size_after,
        window_end_revision=change_point.window_end,
        std_dev_before_anomaly=change_point.std_dev_before,
        t_statistic=change_point.t_statistic,
        degrees_of_freedom=change_point.degrees_of_freedom,
        p_value=change_point.p_value,
        is_improvement=_IsImprovement(test, median_before, median_after),
        ref_test=_GetRefBuildKeyForTest(test),
        test=test.key,
        statistic=stat,
        sheriff=test.sheriff,
        internal_only=test.internal_only,
        units=test.units,
        display_start=display_start,
        display_end=display_end,
        ownership=ownership_information)
    raise ndb.Return(new_anomaly)
示例#24
0
 def testGet_UsesOnlyMostRecentComponents(self):
     ownership_samples = [
         {
             'type': 'Ownership',
             'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
             'component': 'Abc>Def'
         },
         {
             'type': 'Ownership',
             'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
             'component': '123>456'
         },
     ]
     subscription = Subscription(
         name='Sheriff',
         bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
     test_key = utils.TestKey('ChromiumPerf/linux/scrolling/first_paint')
     now_datetime = datetime.datetime.now()
     older_alert = anomaly.Anomaly(start_revision=1476193320,
                                   end_revision=1476201870,
                                   test=test_key,
                                   median_before_anomaly=100,
                                   median_after_anomaly=200,
                                   subscriptions=[subscription],
                                   subscription_names=[subscription.name],
                                   ownership=ownership_samples[0],
                                   timestamp=now_datetime).put()
     newer_alert = anomaly.Anomaly(start_revision=1476193320,
                                   end_revision=1476201870,
                                   test=test_key,
                                   median_before_anomaly=100,
                                   median_after_anomaly=200,
                                   subscriptions=[subscription],
                                   subscription_names=[subscription.name],
                                   ownership=ownership_samples[1],
                                   timestamp=now_datetime +
                                   datetime.timedelta(10)).put()
     response = self.testapp.post('/file_bug', [
         ('keys', '%s,%s' % (older_alert.urlsafe(), newer_alert.urlsafe())),
         ('summary', 's'),
         ('description', 'd\n'),
         ('label', 'one'),
         ('label', 'two'),
         ('component', 'Foo>Bar'),
     ])
     self.assertNotIn(
         '<input type="checkbox" checked name="component" value="Abc&gt;Def">',
         response.body)
     self.assertIn(
         '<input type="checkbox" checked name="component" value="123&gt;456">',
         response.body)
     response_inverted_order = self.testapp.post('/file_bug', [
         ('keys', '%s,%s' % (newer_alert.urlsafe(), older_alert.urlsafe())),
         ('summary', 's'),
         ('description', 'd\n'),
         ('label', 'one'),
         ('label', 'two'),
         ('component', 'Foo>Bar'),
     ])
     self.assertNotIn(
         '<input type="checkbox" checked name="component" value="Abc&gt;Def">',
         response_inverted_order.body)
     self.assertIn(
         '<input type="checkbox" checked name="component" value="123&gt;456">',
         response_inverted_order.body)
示例#25
0
def _FindOrInsertNamedDiagnosticsOutOfOrder(new_diagnostic, old_diagnostics,
                                            rev):
    logging.info(
        'Inserting diagnostic out of order. Diagnostic: %r,'
        ' revision: %d', new_diagnostic, rev)

    new_guid = new_diagnostic.key.id()
    guid_mapping = {}

    # It happens when all old diagnostics are invalid and are not added to the
    # list.
    if len(old_diagnostics) == 0:
        guid_mapping[new_guid] = new_diagnostic.data
        yield new_diagnostic.put_async()
        raise ndb.Return(guid_mapping)

    for i in itertools.islice(itertools.count(0), len(old_diagnostics)):
        cur = old_diagnostics[i]

        suite_key = utils.TestKey('/'.join(cur.test.id().split('/')[:3]))

        next_diagnostic = None if i == 0 else old_diagnostics[i - 1]

        # Overall there are 2 major cases to handle. Either you're clobbering an
        # existing diagnostic by uploading right to the start of that diagnostic's
        # range, or you're splitting the range.
        #
        # We treat insertions by assuming that the new diagnostic is valid until the
        # next uploaded commit, since that commit will have had a diagnostic on it
        # which will have been diffed and inserted appropriately at the time.

        # Case 1, clobber the existing diagnostic.
        if rev == cur.start_revision:
            if not cur.IsDifferent(new_diagnostic):
                raise ndb.Return(guid_mapping)

            next_revision = yield HistogramRevisionRecord.FindNextRevision(
                suite_key, rev)

            futures = []

            # There's either a next diagnostic or there isn't, check each separately.
            if not next_diagnostic:
                # If this is the last diagnostic in the range, there are only 2 cases
                # to consider.
                #  1. There are no commits after this diagnostic.
                #  2. There are commits, in which case we need to split the range.

                # 1. There are no commits.
                if next_revision == sys.maxsize:
                    cur.data = new_diagnostic.data
                    cur.data['guid'] = cur.key.id()

                    guid_mapping[new_guid] = cur.data
                    new_diagnostic = None

                # 2. There are commits, in which case we need to split the range.
                else:
                    new_diagnostic.start_revision = cur.start_revision
                    new_diagnostic.end_revision = next_revision

                    # Nudge the old diagnostic range forward, that way you don't have to
                    # resave the histograms.
                    cur.start_revision = next_revision + 1

            # There is another diagnostic range after this one.
            else:
                # If there is another diagnostic range after this, we need to check:
                #  1. Are there any commits between this revision and the next
                #     diagnostic
                #   a. If there are, we need to split the range
                #   b. If there aren't, we just overwrite the diagnostic.

                # 1a. There are commits after this revision before the start of the next
                #     diagnostic.
                if next_revision != next_diagnostic.start_revision - 1:
                    new_diagnostic.start_revision = cur.start_revision
                    new_diagnostic.end_revision = next_revision

                    # Nudge the old diagnostic range forward, that way you don't have to
                    # resave the histograms.
                    cur.start_revision = next_revision + 1

                # No commits after before next diagnostic, just straight up overwrite.
                else:
                    # A. They're not the same.
                    if new_diagnostic.IsDifferent(next_diagnostic):
                        cur.data = new_diagnostic.data
                        cur.data['guid'] = cur.key.id()

                        guid_mapping[new_guid] = cur.data
                        new_diagnostic = None

                    # B. They're the same, in which case we just want to extend the next
                    #    diagnostic's range backwards.
                    else:
                        guid_mapping[new_guid] = next_diagnostic.data
                        next_diagnostic.start_revision = cur.start_revision
                        new_diagnostic = None
                        futures.append(cur.key.delete_async())
                        cur = next_diagnostic

            # Finally, check if there was a diagnostic range before this, and wheter
            # it's different than the new one.
            prev_diagnostic = None
            if i + 1 < len(old_diagnostics):
                prev_diagnostic = old_diagnostics[i + 1]

            cur_diagnostic = cur
            if new_diagnostic:
                cur_diagnostic = new_diagnostic

            # Previous diagnostic range is different, so just ignore it.
            if not prev_diagnostic or cur_diagnostic.IsDifferent(
                    prev_diagnostic):
                futures.append(cur.put_async())
                if new_diagnostic:
                    futures.append(new_diagnostic.put_async())

            # Previous range is the same, so merge.
            else:
                guid_mapping[new_guid] = prev_diagnostic.data
                prev_diagnostic.end_revision = cur_diagnostic.end_revision

                futures.append(prev_diagnostic.put_async())
                if new_diagnostic:
                    new_diagnostic = None
                    futures.append(cur.put_async)
                else:
                    futures.append(cur.key.delete_async())

            yield futures
            raise ndb.Return(guid_mapping)

        # Case 2, split the range.
        elif rev > cur.start_revision and rev <= cur.end_revision:
            if not cur.IsDifferent(new_diagnostic):
                raise ndb.Return(guid_mapping)

            next_revision = yield HistogramRevisionRecord.FindNextRevision(
                suite_key, rev)

            cur.end_revision = rev - 1
            new_diagnostic.start_revision = rev
            new_diagnostic.end_revision = next_revision

            futures = [cur.put_async()]

            # There's either a next diagnostic or there isn't, check each separately.
            if not next_diagnostic:
                # There's no commit after this revision, which means we can extend this
                # diagnostic range to infinity.
                if next_revision == sys.maxsize:
                    new_diagnostic.end_revision = next_revision
                else:
                    new_diagnostic.end_revision = next_revision

                    clone_of_cur = SparseDiagnostic(
                        data=cur.data,
                        test=cur.test,
                        start_revision=next_revision + 1,
                        end_revision=sys.maxsize,
                        name=cur.name,
                        internal_only=cur.internal_only)
                    futures.append(clone_of_cur.put_async())

                futures.append(new_diagnostic.put_async())
            else:
                # If there is another diagnostic range after this, we need to check:
                #  1. Are there any commits between this revision and the next
                #     diagnostic
                #   a. If there are, we need to split the range
                #   b. If there aren't, we need to check if the next diagnostic is
                #      any different than the current one, because we may just merge
                #      them together.

                # 1a. There are commits after this revision before the start of the next
                #     diagnostic.
                if next_revision != next_diagnostic.start_revision - 1:
                    new_diagnostic.end_revision = next_revision

                    clone_of_cur = SparseDiagnostic(
                        data=cur.data,
                        test=cur.test,
                        start_revision=next_revision + 1,
                        end_revision=next_diagnostic.start_revision - 1,
                        name=cur.name,
                        internal_only=cur.internal_only)

                    futures.append(clone_of_cur.put_async())
                    futures.append(new_diagnostic.put_async())

                # 1b. There aren't commits between this revision and the start of the
                #     next diagnostic range. In this case there are 2 possible outcomes.
                #   A. They're not the same, so just split the range as normal.
                #   B. That the new diagnostic we're inserting and the next one are the
                #      same, in which case they can be merged.
                else:
                    # A. They're not the same.
                    if new_diagnostic.IsDifferent(next_diagnostic):
                        new_diagnostic.end_revision = next_diagnostic.start_revision - 1
                        futures.append(new_diagnostic.put_async())

                    # B. They're the same, in which case we just want to extend the next
                    #    diagnostic's range backwards.
                    else:
                        guid_mapping[new_guid] = next_diagnostic.data
                        next_diagnostic.start_revision = new_diagnostic.start_revision
                        new_diagnostic = None
                        futures.append(next_diagnostic.put_async())

            yield futures
            raise ndb.Return(guid_mapping)

    # Can't find a spot to put it, which indicates that it should go before any
    # existing diagnostic.
    next_diagnostic = old_diagnostics[-1]

    if not next_diagnostic.IsDifferent(new_diagnostic):
        next_diagnostic.start_revision = rev
        guid_mapping[new_guid] = next_diagnostic.data
        yield next_diagnostic.put_async()
        raise ndb.Return(guid_mapping)

    new_diagnostic.start_revision = rev
    new_diagnostic.end_revision = next_diagnostic.start_revision - 1
    yield new_diagnostic.put_async()
    raise ndb.Return(guid_mapping)
示例#26
0
 def testGet_UsesFirstDefinedComponent(self):
     ownership_samples = [{
         'type': 'Ownership',
         'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb827',
     }, {
         'type': 'Ownership',
         'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
         'component': ''
     }, {
         'type': 'Ownership',
         'guid': 'eb212e80-db58-4cbd-b331-c2245ecbb826',
         'component': 'Abc>Def'
     }]
     now_datetime = datetime.datetime.now()
     test_key = utils.TestKey('ChromiumPerf/linux/scrolling/first_paint')
     subscription = Subscription(
         name='Sheriff',
         bug_labels=['Performance-Sheriff', 'Cr-Blink-Javascript'])
     alert_without_ownership = anomaly.Anomaly(
         start_revision=1476193320,
         end_revision=1476201870,
         test=test_key,
         median_before_anomaly=100,
         median_after_anomaly=200,
         subscriptions=[subscription],
         subscription_names=[subscription.name],
         timestamp=now_datetime).put()
     alert_without_component = anomaly.Anomaly(
         start_revision=1476193320,
         end_revision=1476201870,
         test=test_key,
         median_before_anomaly=100,
         median_after_anomaly=200,
         subscriptions=[subscription],
         subscription_names=[subscription.name],
         ownership=ownership_samples[0],
         timestamp=now_datetime + datetime.timedelta(10)).put()
     alert_with_empty_component = anomaly.Anomaly(
         start_revision=1476193320,
         end_revision=1476201870,
         test=test_key,
         median_before_anomaly=100,
         median_after_anomaly=200,
         subscriptions=[subscription],
         subscription_names=[subscription.name],
         ownership=ownership_samples[1],
         timestamp=now_datetime + datetime.timedelta(20)).put()
     alert_with_component = anomaly.Anomaly(
         start_revision=1476193320,
         end_revision=1476201870,
         test=test_key,
         median_before_anomaly=100,
         median_after_anomaly=200,
         subscriptions=[subscription],
         subscription_names=[subscription.name],
         ownership=ownership_samples[2],
         timestamp=now_datetime + datetime.timedelta(30)).put()
     response = self.testapp.post('/file_bug', [
         ('keys', '%s,%s,%s,%s' % (alert_without_ownership.urlsafe(),
                                   alert_without_component.urlsafe(),
                                   alert_with_empty_component.urlsafe(),
                                   alert_with_component.urlsafe())),
         ('summary', 's'),
         ('description', 'd\n'),
         ('label', 'one'),
         ('label', 'two'),
         ('component', 'Foo>Bar'),
     ])
     self.assertIn(
         '<input type="checkbox" checked name="component" value="Abc&gt;Def">',
         response.body)
示例#27
0
def ProcessHistogramSet(histogram_dicts, completion_token=None):
    if not isinstance(histogram_dicts, list):
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON must be a list of dicts')

    histograms = histogram_set.HistogramSet()

    with timing.WallTimeLogger('hs.ImportDicts'):
        histograms.ImportDicts(histogram_dicts)

    with timing.WallTimeLogger('hs.DeduplicateDiagnostics'):
        histograms.DeduplicateDiagnostics()

    if len(histograms) == 0:
        raise api_request_handler.BadRequestError(
            'HistogramSet JSON must contain at least one histogram.')

    with timing.WallTimeLogger('hs._LogDebugInfo'):
        _LogDebugInfo(histograms)

    with timing.WallTimeLogger('InlineDenseSharedDiagnostics'):
        InlineDenseSharedDiagnostics(histograms)

    # TODO(#4242): Get rid of this.
    # https://github.com/catapult-project/catapult/issues/4242
    with timing.WallTimeLogger('_PurgeHistogramBinData'):
        _PurgeHistogramBinData(histograms)

    with timing.WallTimeLogger('_GetDiagnosticValue calls'):
        master = _GetDiagnosticValue(reserved_infos.MASTERS.name,
                                     histograms.GetFirstHistogram())
        bot = _GetDiagnosticValue(reserved_infos.BOTS.name,
                                  histograms.GetFirstHistogram())
        benchmark = _GetDiagnosticValue(reserved_infos.BENCHMARKS.name,
                                        histograms.GetFirstHistogram())
        benchmark_description = _GetDiagnosticValue(
            reserved_infos.BENCHMARK_DESCRIPTIONS.name,
            histograms.GetFirstHistogram(),
            optional=True)

    with timing.WallTimeLogger('_ValidateMasterBotBenchmarkName'):
        _ValidateMasterBotBenchmarkName(master, bot, benchmark)

    with timing.WallTimeLogger('ComputeRevision'):
        suite_key = utils.TestKey('%s/%s/%s' % (master, bot, benchmark))
        logging.info('Suite: %s', suite_key.id())

        revision = ComputeRevision(histograms)
        logging.info('Revision: %s', revision)

        internal_only = graph_data.Bot.GetInternalOnlySync(master, bot)

    revision_record = histogram.HistogramRevisionRecord.GetOrCreate(
        suite_key, revision)
    revision_record.put()

    last_added = histogram.HistogramRevisionRecord.GetLatest(
        suite_key).get_result()

    # On first upload, a query immediately following a put may return nothing.
    if not last_added:
        last_added = revision_record

    _CheckRequest(last_added, 'No last revision')

    # We'll skip the histogram-level sparse diagnostics because we need to
    # handle those with the histograms, below, so that we can properly assign
    # test paths.
    with timing.WallTimeLogger('FindSuiteLevelSparseDiagnostics'):
        suite_level_sparse_diagnostic_entities = FindSuiteLevelSparseDiagnostics(
            histograms, suite_key, revision, internal_only)

    # TODO(896856): Refactor master/bot computation to happen above this line
    # so that we can replace with a DiagnosticRef rather than a full diagnostic.
    with timing.WallTimeLogger('DeduplicateAndPut'):
        new_guids_to_old_diagnostics = (
            histogram.SparseDiagnostic.FindOrInsertDiagnostics(
                suite_level_sparse_diagnostic_entities, suite_key, revision,
                last_added.revision).get_result())

    with timing.WallTimeLogger('ReplaceSharedDiagnostic calls'):
        for new_guid, old_diagnostic in new_guids_to_old_diagnostics.items():
            histograms.ReplaceSharedDiagnostic(
                new_guid, diagnostic.Diagnostic.FromDict(old_diagnostic))

    with timing.WallTimeLogger('_CreateHistogramTasks'):
        tasks = _CreateHistogramTasks(suite_key.id(), histograms, revision,
                                      benchmark_description, completion_token)

    with timing.WallTimeLogger('_QueueHistogramTasks'):
        _QueueHistogramTasks(tasks)
示例#28
0
def _QueryCaseTags(test_path, case):
    data_by_name = yield histogram.SparseDiagnostic.GetMostRecentDataByNamesAsync(
        utils.TestKey(test_path), [reserved_infos.STORY_TAGS.name])
    data = data_by_name.get(reserved_infos.STORY_TAGS.name)
    tags = list(generic_set.GenericSet.FromDict(data)) if data else []
    raise ndb.Return((case, tags))
    def testPostHistogram_DeduplicatesSameSparseDiagnostic(self):
        diag_dict = {
            'buildNumber': 0,
            'buildbotMasterName': '',
            'buildbotName': 'buildbotmaster',
            'displayBotName': 'bot',
            'displayMasterName': 'master',
            'guid': '6ce177ab-3fdb-44cb-aa8d-9ed49765d810',
            'logUri': '',
            'type': 'BuildbotInfo'
        }
        diag = histogram.SparseDiagnostic(
            id='e9c2891d-2b04-413f-8cf4-099827e67626',
            data=diag_dict,
            start_revision=1,
            end_revision=sys.maxint,
            test=utils.TestKey('master/bot/benchmark'))
        diag.put()
        data = json.dumps([{
            'benchmarkName': 'benchmark',
            'canonicalUrl': '',
            'guid': '0bc1021b-8107-4db7-bc8c-49d7cf53c5ae',
            'label': '',
            'legacyTIRLabel': '',
            'storyDisplayName': 'story',
            'type': 'TelemetryInfo'
        }, {
            'values': [424242],
            'guid': '25f0a111-9bb4-4cea-b0c1-af2609623160',
            'type': 'GenericSet',
        }, {
            'buildNumber': 0,
            'buildbotMasterName': '',
            'buildbotName': 'buildbotmaster',
            'displayBotName': 'bot',
            'displayMasterName': 'master',
            'guid': 'e9c2891d-2b04-413f-8cf4-099827e67626',
            'logUri': '',
            'type': 'BuildbotInfo'
        }, {
            'binBoundaries': [1, [1, 1000, 20]],
            'diagnostics': {
                'buildbot': 'e9c2891d-2b04-413f-8cf4-099827e67626',
                reserved_infos.CHROMIUM_COMMIT_POSITIONS.name:
                '25f0a111-9bb4-4cea-b0c1-af2609623160',
                'telemetry': '0bc1021b-8107-4db7-bc8c-49d7cf53c5ae'
            },
            'guid': '4989617a-14d6-4f80-8f75-dafda2ff13b0',
            'name': 'foo',
            'unit': 'count'
        }])
        self.testapp.post('/add_histograms', {'data': data})

        diagnostics = histogram.SparseDiagnostic.query().fetch()
        params_by_guid = self.TaskParamsByGuid()
        params = params_by_guid['4989617a-14d6-4f80-8f75-dafda2ff13b0']
        hist = json.loads(params['data'][0])
        buildbot_info = hist['diagnostics']['buildbot']

        self.assertEqual(1, len(diagnostics))
        self.assertEqual('6ce177ab-3fdb-44cb-aa8d-9ed49765d810', buildbot_info)
  def testProcessTest(self, mock_email_sheriff):
    self._AddDataForTests()
    test_path = 'ChromiumGPU/linux-release/scrolling_benchmark/ref'
    test = utils.TestKey(test_path).get()
    sheriff.Sheriff(
        email='*****@*****.**', id='sheriff', patterns=[test_path]).put()
    test.put()

    find_anomalies.ProcessTests([test.key])

    expected_calls = [
        mock.call(ModelMatcher('sheriff'),
                  ModelMatcher(
                      'ChromiumGPU/linux-release/scrolling_benchmark/ref'),
                  EndRevisionMatcher(10011)),
        mock.call(ModelMatcher('sheriff'),
                  ModelMatcher(
                      'ChromiumGPU/linux-release/scrolling_benchmark/ref'),
                  EndRevisionMatcher(10041)),
        mock.call(ModelMatcher('sheriff'),
                  ModelMatcher(
                      'ChromiumGPU/linux-release/scrolling_benchmark/ref'),
                  EndRevisionMatcher(10061))]
    self.assertEqual(expected_calls, mock_email_sheriff.call_args_list)

    anomalies = anomaly.Anomaly.query().fetch()
    self.assertEqual(len(anomalies), 3)

    def AnomalyExists(
        anomalies, test, percent_changed, direction,
        start_revision, end_revision, sheriff_name, internal_only, units,
        absolute_delta):
      for a in anomalies:
        if (a.test == test and
            a.percent_changed == percent_changed and
            a.direction == direction and
            a.start_revision == start_revision and
            a.end_revision == end_revision and
            a.sheriff.string_id() == sheriff_name and
            a.internal_only == internal_only and
            a.units == units and
            a.absolute_delta == absolute_delta):
          return True
      return False

    self.assertTrue(
        AnomalyExists(
            anomalies, test.key, percent_changed=100, direction=anomaly.UP,
            start_revision=10007, end_revision=10011, sheriff_name='sheriff',
            internal_only=False, units='ms', absolute_delta=50))

    self.assertTrue(
        AnomalyExists(
            anomalies, test.key, percent_changed=-50, direction=anomaly.DOWN,
            start_revision=10037, end_revision=10041, sheriff_name='sheriff',
            internal_only=False, units='ms', absolute_delta=-100))

    self.assertTrue(
        AnomalyExists(
            anomalies, test.key, percent_changed=sys.float_info.max,
            direction=anomaly.UP, start_revision=10057, end_revision=10061,
            sheriff_name='sheriff', internal_only=False, units='ms',
            absolute_delta=100))

    # This is here just to verify that AnomalyExists returns False sometimes.
    self.assertFalse(
        AnomalyExists(
            anomalies, test.key, percent_changed=100, direction=anomaly.DOWN,
            start_revision=10037, end_revision=10041, sheriff_name='sheriff',
            internal_only=False, units='ms', absolute_delta=500))