Exemplo n.º 1
0
def _AddHistogramFromData(params, revision, test_key, internal_only):
    data_dict = params['data']
    diagnostics = params.get('diagnostics')
    new_guids_to_existing_diagnostics = yield ProcessDiagnostics(
        diagnostics, revision, test_key, internal_only)

    hs = histogram_set.HistogramSet()
    hs.ImportDicts([data_dict])
    for new_guid, existing_diagnostic in (iter(
            new_guids_to_existing_diagnostics.items())):
        hs.ReplaceSharedDiagnostic(
            new_guid,
            diagnostic_ref.DiagnosticRef(existing_diagnostic['guid']))
    data = hs.GetFirstHistogram().AsDict()

    entity = histogram.Histogram(id=str(uuid.uuid4()),
                                 data=data,
                                 test=test_key,
                                 revision=revision,
                                 internal_only=internal_only)
    yield entity.put_async()

    measurement = upload_completion_token.Measurement.GetById(
        params.get('test_path'), params.get('token'))
    if measurement is not None:
        measurement.histogram = entity.key
        measurement.put_async()
    def _AddMockData(self):
        """Adds sample TestMetadata, Row, and Anomaly entities."""
        testing_common.AddTests(*_MOCK_DATA)

        # Add 50 Row entities to one of the tests.
        # Also add 2 Anomaly entities.
        test_path = 'ChromiumPerf/mac/SunSpider/Total/t'
        test_key = utils.TestKey(test_path)
        test_container_key = utils.GetTestContainerKey(test_key)
        for rev in range(15000, 15100, 2):
            graph_data.Row(id=rev, parent=test_container_key,
                           value=(rev * 2)).put()
            if rev % 50 == 0:
                data = generic_set.GenericSet(['foo_%s' % rev])
                data = data.AsDict()
                anomaly.Anomaly(start_revision=(rev - 2),
                                end_revision=rev,
                                median_before_anomaly=100,
                                median_after_anomaly=50,
                                test=test_key).put()
                histogram.SparseDiagnostic(test=test_key,
                                           start_revision=rev - 50,
                                           end_revision=rev - 1,
                                           data=data).put()
                histogram.Histogram(test=test_key).put()
Exemplo n.º 3
0
    def post(self):
        """Adds a single histogram or sparse shared diagnostic to the datastore.

    The |data| request parameter can be either a histogram or a sparse shared
    diagnostic; the set of diagnostics that are considered sparse (meaning that
    they don't normally change on every upload for a given benchmark from a
    given bot) is shown in add_histograms.SPARSE_DIAGNOSTIC_TYPES.

    See https://goo.gl/lHzea6 for detailed information on the JSON format for
    histograms and diagnostics.

    Request parameters:
      data: JSON encoding of a histogram or shared diagnostic.
      revision: a revision, given as an int.
      test_path: the test path to which this diagnostic or histogram should be
          attached.
    """
        datastore_hooks.SetPrivilegedRequest()

        data = self.request.get('data')
        data_dict = json.loads(data)
        revision = int(self.request.get('revision'))
        test_key = utils.TestKey(self.request.get('test_path'))

        if data_dict.get('type') in add_histograms.SPARSE_DIAGNOSTIC_TYPES:
            entity = histogram.SparseDiagnostic(data=data,
                                                test=test_key,
                                                start_revision=revision,
                                                end_revision=revision)
        else:
            entity = histogram.Histogram(data=data,
                                         test=test_key,
                                         revision=revision)

        entity.put()
Exemplo n.º 4
0
 def GetTestHistogram(self,
                      owners_diagnostic=None,
                      commit_position_diagnostic=None):
     if not commit_position_diagnostic:
         commit_position_diagnostic = generic_set.GenericSet([123])
     if not owners_diagnostic:
         owners_diagnostic = generic_set.GenericSet(['owner_name'])
     return histogram.Histogram(
         id=str(uuid.uuid4()),
         data={
             'allBins': {
                 '1': [1],
                 '3': [1],
                 '4': [1]
             },
             'binBoundaries': [1, [1, 1000, 20]],
             'diagnostics': {
                 reserved_infos.CHROMIUM_COMMIT_POSITIONS.name:
                 commit_position_diagnostic.AsDict(),
                 reserved_infos.OWNERS.name:
                 owners_diagnostic.guid,
                 'irrelevant_diagnostic':
                 generic_set.GenericSet([42]).AsDict(),
             },
             'name': 'foo',
             'running': [3, 3, 0.5972531564093516, 2, 1, 6, 2],
             'sampleValues': [1, 2, 3],
             'unit': 'count_biggerIsBetter'
         },
         test=None,
         revision=123,
         internal_only=True)
Exemplo n.º 5
0
  def _MockData(self, path='master/bot/suite/measure/case',
                internal_only=False):
    test = graph_data.TestMetadata(
        has_rows=True,
        id=path,
        improvement_direction=anomaly.DOWN,
        internal_only=internal_only,
        units='units')
    test.UpdateSheriff()
    test.put()

    for i in range(1, 21, 2):
      graph_data.Row(
          error=(i / 2.0),
          id=i,
          parent=test.key,
          r_i2=(i * 2),
          timestamp=datetime.datetime.utcfromtimestamp(i),
          value=float(i)).put()
      histogram.Histogram(
          data=_TEST_HISTOGRAM_DATA,
          id=str(uuid.uuid4()),
          internal_only=internal_only,
          revision=i,
          test=test.key).put()

    anomaly.Anomaly(
        end_revision=11,
        internal_only=internal_only,
        is_improvement=False,
        median_after_anomaly=6,
        median_before_anomaly=4,
        subscriptions=[Subscription(
            name='Taylor',
            notification_email=testing_common.INTERNAL_USER.email(),
        )],
        subscription_names=['Taylor'],
        start_revision=10,
        test=test.key).put()

    histogram.SparseDiagnostic(
        data={'type': 'GenericSet', 'guid': str(uuid.uuid4()), 'values': [1]},
        end_revision=11,
        id=str(uuid.uuid4()),
        internal_only=internal_only,
        name=reserved_infos.DEVICE_IDS.name,
        start_revision=1,
        test=test.key).put()

    histogram.SparseDiagnostic(
        data={'type': 'GenericSet', 'guid': str(uuid.uuid4()), 'values': [2]},
        end_revision=None,
        id=str(uuid.uuid4()),
        internal_only=internal_only,
        name=reserved_infos.DEVICE_IDS.name,
        start_revision=11,
        test=test.key).put()
Exemplo n.º 6
0
    def _AddMockData(self):
        """Adds sample TestMetadata and Row entities."""
        testing_common.AddTests(*_MOCK_DATA)

        # Add 50 Row entities to some of the tests.
        for test_path in _TESTS_WITH_ROWS:
            testing_common.AddRows(test_path, range(15000, 15100, 2))

            histogram.SparseDiagnostic(test=utils.TestKey(test_path)).put()
            histogram.Histogram(test=utils.TestKey(test_path)).put()
Exemplo n.º 7
0
    def post(self):
        """Adds a single histogram or sparse shared diagnostic to the datastore.

    The |data| request parameter can be either a histogram or a sparse shared
    diagnostic; the set of diagnostics that are considered sparse (meaning that
    they don't normally change on every upload for a given benchmark from a
    given bot) is shown in add_histograms.SPARSE_DIAGNOSTIC_TYPES.

    See https://goo.gl/lHzea6 for detailed information on the JSON format for
    histograms and diagnostics.

    Request parameters:
      data: JSON encoding of a histogram or shared diagnostic.
      revision: a revision, given as an int.
      test_path: the test path to which this diagnostic or histogram should be
          attached.
    """
        datastore_hooks.SetPrivilegedRequest()

        data = self.request.get('data')
        revision = int(self.request.get('revision'))
        test_path = self.request.get('test_path')

        data_dict = json.loads(data)
        guid = data_dict['guid']
        is_diagnostic = 'type' in data_dict

        test_path_parts = test_path.split('/')
        master = test_path_parts[0]
        bot = test_path_parts[1]
        test_name = '/'.join(test_path_parts[2:])
        bot_whitelist = stored_object.Get(add_point_queue.BOT_WHITELIST_KEY)
        internal_only = add_point_queue.BotInternalOnly(bot, bot_whitelist)
        extra_args = {} if is_diagnostic else GetUnitArgs(data_dict['unit'])
        # TDOO(eakuefner): Populate benchmark_description once it appears in
        # diagnostics.
        test_key = add_point_queue.GetOrCreateAncestors(
            master, bot, test_name, internal_only, **extra_args).key

        if is_diagnostic:
            entity = histogram.SparseDiagnostic(id=guid,
                                                data=data,
                                                test=test_key,
                                                start_revision=revision,
                                                end_revision=revision,
                                                internal_only=internal_only)
        else:
            entity = histogram.Histogram(id=guid,
                                         data=data,
                                         test=test_key,
                                         revision=revision,
                                         internal_only=internal_only)
            AddRow(data_dict, test_key, revision, test_path, internal_only)

        entity.put()
Exemplo n.º 8
0
def _AddHistogramFromData(params, revision, test_key, internal_only):
  data_dict = params['data']
  diagnostics = params.get('diagnostics')
  new_guids_to_existing_diagnostics = yield ProcessDiagnostics(
      diagnostics, revision, test_key, internal_only)

  hs = histogram_set.HistogramSet()
  hs.ImportDicts([data_dict])
  for new_guid, existing_diagnostic in (
      iter(new_guids_to_existing_diagnostics.items())):
    hs.ReplaceSharedDiagnostic(
        new_guid, diagnostic_ref.DiagnosticRef(
            existing_diagnostic['guid']))
  data = hs.GetFirstHistogram().AsDict()

  entity = histogram.Histogram(
      id=str(uuid.uuid4()), data=data, test=test_key, revision=revision,
      internal_only=internal_only)
  yield entity.put_async()
def _AddHistogramFromData(params, revision, test_key, internal_only):
  data_dict = params['data']
  guid = data_dict['guid']
  diagnostics = params.get('diagnostics')
  new_guids_to_existing_diagnostics = yield ProcessDiagnostics(
      diagnostics, revision, test_key, internal_only)

  # TODO(eakuefner): Move per-histogram monkeypatching logic to Histogram.
  hs = histogram_set.HistogramSet()
  hs.ImportDicts([data_dict])
  # TODO(eakuefner): Share code for replacement logic with add_histograms
  for new_guid, existing_diagnostic in (
      new_guids_to_existing_diagnostics.iteritems()):
    hs.ReplaceSharedDiagnostic(
        new_guid, diagnostic_ref.DiagnosticRef(
            existing_diagnostic['guid']))
  data = hs.GetFirstHistogram().AsDict()

  entity = histogram.Histogram(
      id=guid, data=data, test=test_key, revision=revision,
      internal_only=internal_only)
  yield entity.put_async()
Exemplo n.º 10
0
    def testGet_SuccessWithMeasurementsAndAssociatedHistogram(self):
        owners_diagnostic = generic_set.GenericSet(['owner_name'])
        commit_position_diagnostic = generic_set.GenericSet([123])
        irrelevant_diagnostic = generic_set.GenericSet([42])
        owners_diagnostic.guid = str(uuid.uuid4())
        commit_position_diagnostic.guid = str(uuid.uuid4())

        histogram.SparseDiagnostic(id=owners_diagnostic.guid,
                                   data=owners_diagnostic.AsDict(),
                                   name=reserved_infos.OWNERS.name,
                                   test=None,
                                   start_revision=1,
                                   end_revision=999).put().get()

        hs = histogram.Histogram(
            id=str(uuid.uuid4()),
            data={
                'allBins': {
                    '1': [1],
                    '3': [1],
                    '4': [1]
                },
                'binBoundaries': [1, [1, 1000, 20]],
                'diagnostics': {
                    reserved_infos.CHROMIUM_COMMIT_POSITIONS.name:
                    commit_position_diagnostic.AsDict(),
                    reserved_infos.OWNERS.name:
                    owners_diagnostic.guid,
                    'irrelevant_diagnostic':
                    irrelevant_diagnostic.AsDict(),
                },
                'name': 'foo',
                'running': [3, 3, 0.5972531564093516, 2, 1, 6, 2],
                'sampleValues': [1, 2, 3],
                'unit': 'count_biggerIsBetter'
            },
            test=None,
            revision=123,
            internal_only=True).put().get()

        token_id = str(uuid.uuid4())
        test_path = 'Chromium/win7/suite/metric1'
        token = upload_completion_token.Token(id=token_id).put().get()
        measurement = token.AddMeasurement(test_path, True).get_result()
        measurement.histogram = hs.key
        measurement.put()

        expected = {
            'token':
            token_id,
            'file':
            None,
            'created':
            str(token.creation_time),
            'lastUpdated':
            str(token.update_time),
            'state':
            'PROCESSING',
            'measurements': [
                {
                    'name':
                    test_path,
                    'state':
                    'PROCESSING',
                    'monitored':
                    True,
                    'lastUpdated':
                    str(measurement.update_time),
                    'dimensions': [
                        {
                            'name': reserved_infos.OWNERS.name,
                            'value': list(owners_diagnostic),
                        },
                        {
                            'name':
                            reserved_infos.CHROMIUM_COMMIT_POSITIONS.name,
                            'value': list(commit_position_diagnostic),
                        },
                    ]
                },
            ]
        }
        response = self.GetFullInfoRequest(token_id)
        expected['measurements'][0]['dimensions'].sort()
        response['measurements'][0]['dimensions'].sort()
        self.assertEqual(response, expected)
Exemplo n.º 11
0
 def _AddMockData(self, histogram_data, internal_only=False):
     histogram.Histogram(id=histogram_data['guid'],
                         test=ndb.Key('TestMetadata', 'M/B/S'),
                         revision=10,
                         data=histogram_data,
                         internal_only=internal_only).put()
Exemplo n.º 12
0
    def post(self):
        """Adds a single histogram or sparse shared diagnostic to the datastore.

    The |data| request parameter can be either a histogram or a sparse shared
    diagnostic; the set of diagnostics that are considered sparse (meaning that
    they don't normally change on every upload for a given benchmark from a
    given bot) is shown in add_histograms.SPARSE_DIAGNOSTIC_TYPES.

    See https://goo.gl/lHzea6 for detailed information on the JSON format for
    histograms and diagnostics.

    Request parameters:
      data: JSON encoding of a histogram or shared diagnostic.
      revision: a revision, given as an int.
      test_path: the test path to which this diagnostic or histogram should be
          attached.
    """
        datastore_hooks.SetPrivilegedRequest()

        data = self.request.get('data')
        revision = int(self.request.get('revision'))
        test_path = self.request.get('test_path')

        data_dict = json.loads(data)
        guid = data_dict['guid']
        is_diagnostic = 'type' in data_dict

        test_path_parts = test_path.split('/')
        master = test_path_parts[0]
        bot = test_path_parts[1]
        test_name = '/'.join(test_path_parts[2:])
        bot_whitelist = stored_object.Get(add_point_queue.BOT_WHITELIST_KEY)
        internal_only = add_point_queue.BotInternalOnly(bot, bot_whitelist)
        extra_args = {} if is_diagnostic else GetUnitArgs(data_dict['unit'])
        # TDOO(eakuefner): Populate benchmark_description once it appears in
        # diagnostics.
        parent_test = add_point_queue.GetOrCreateAncestors(
            master, bot, test_name, internal_only, **extra_args)
        test_key = parent_test.key

        added_rows = []
        monitored_test_keys = []

        if is_diagnostic:
            entity = histogram.SparseDiagnostic(id=guid,
                                                data=data,
                                                test=test_key,
                                                start_revision=revision,
                                                end_revision=revision,
                                                internal_only=internal_only)
        else:
            diagnostics = self.request.get('diagnostics')
            if diagnostics:
                diagnostic_data = json.loads(diagnostics)
                diagnostic_entities = []
                for diagnostic_datum in diagnostic_data:
                    # TODO(eakuefner): Pass map of guid to dict to avoid overhead
                    guid = diagnostic_datum['guid']
                    diagnostic_entities.append(
                        histogram.SparseDiagnostic(
                            id=guid,
                            data=diagnostic_datum,
                            test=test_key,
                            start_revision=revision,
                            end_revision=sys.maxint,
                            internal_only=internal_only))
                new_guids_to_existing_diagnostics = add_histograms.DeduplicateAndPut(
                    diagnostic_entities, test_key, revision).iteritems()
                # TODO(eakuefner): Move per-histogram monkeypatching logic to Histogram.
                hs = histogram_set.HistogramSet()
                hs.ImportDicts([data_dict])
                # TODO(eakuefner): Share code for replacement logic with add_histograms
                for new_guid, existing_diagnostic in new_guids_to_existing_diagnostics:
                    hs.ReplaceSharedDiagnostic(
                        new_guid,
                        diagnostic_ref.DiagnosticRef(
                            existing_diagnostic['guid']))
                data = hs.GetFirstHistogram().AsDict()

            entity = histogram.Histogram(id=guid,
                                         data=data,
                                         test=test_key,
                                         revision=revision,
                                         internal_only=internal_only)
            row = AddRow(data_dict, test_key, revision, test_path,
                         internal_only)
            added_rows.append(row)

            is_monitored = parent_test.sheriff and parent_test.has_rows
            if is_monitored:
                monitored_test_keys.append(parent_test.key)

        entity.put()

        tests_keys = [
            k for k in monitored_test_keys if not add_point_queue.IsRefBuild(k)
        ]

        # Updating of the cached graph revisions should happen after put because
        # it requires the new row to have a timestamp, which happens upon put.
        futures = [
            graph_revisions.AddRowsToCacheAsync(added_rows),
            find_anomalies.ProcessTestsAsync(tests_keys)
        ]
        ndb.Future.wait_all(futures)