def testDeduplicateAndPut_Different(self):
     d = {
         'buildNumber': 0,
         'buildbotMasterName': '',
         'buildbotName': '',
         'displayBotName': 'bot',
         'displayMasterName': 'master',
         'guid': 'e9c2891d-2b04-413f-8cf4-099827e67626',
         'logUri': '',
         'type': 'BuildbotInfo'
     }
     test_key = utils.TestKey('Chromium/win7/foo')
     entity = histogram.SparseDiagnostic(data=d,
                                         test=test_key,
                                         start_revision=1,
                                         end_revision=sys.maxint,
                                         id='abc')
     entity.put()
     d2 = d.copy()
     d2['guid'] = 'def'
     d2['displayBotName'] = 'mac'
     entity2 = histogram.SparseDiagnostic(data=d2,
                                          test=test_key,
                                          start_revision=1,
                                          end_revision=sys.maxint,
                                          id='def')
     add_histograms.DeduplicateAndPut([entity2], test_key, 2)
     sparse = histogram.SparseDiagnostic.query().fetch()
     self.assertEqual(2, len(sparse))
示例#2
0
 def testDeduplicateAndPut_New(self):
   d = {
       'guid': 'abc',
       'osName': 'linux',
       'type': 'DeviceInfo'
   }
   test_key = utils.TestKey('Chromium/win7/foo')
   entity = histogram.SparseDiagnostic(
       data=json.dumps(d), test=test_key, start_revision=1,
       end_revision=sys.maxint, id='abc')
   entity.put()
   add_histograms.DeduplicateAndPut([entity], test_key, 1)
   sparse = histogram.SparseDiagnostic.query().fetch()
   self.assertEqual(1, len(sparse))
示例#3
0
 def testDeduplicateAndPut_New(self):
   d = {
       'values': ['master'],
       'guid': 'e9c2891d-2b04-413f-8cf4-099827e67626',
       'type': 'GenericSet'
   }
   test_key = utils.TestKey('Chromium/win7/foo')
   entity = histogram.SparseDiagnostic(
       data=d, test=test_key, start_revision=1,
       end_revision=sys.maxint, id='abc')
   entity.put()
   add_histograms.DeduplicateAndPut([entity], test_key, 1)
   sparse = histogram.SparseDiagnostic.query().fetch()
   self.assertEqual(1, len(sparse))
示例#4
0
 def testDeduplicateAndPut_Same(self):
   d = {
       'guid': 'abc',
       'osName': 'linux',
       'type': 'DeviceInfo'
   }
   test_key = utils.TestKey('Chromium/win7/foo')
   entity = histogram.SparseDiagnostic(
       data=d, test=test_key, start_revision=1,
       end_revision=sys.maxint, id='abc')
   entity.put()
   d2 = d.copy()
   d2['guid'] = 'def'
   entity2 = histogram.SparseDiagnostic(
       data=d2, test=test_key,
       start_revision=2, end_revision=sys.maxint, id='def')
   add_histograms.DeduplicateAndPut([entity2], test_key, 2)
   sparse = histogram.SparseDiagnostic.query().fetch()
   self.assertEqual(1, len(sparse))
示例#5
0
 def testDeduplicateAndPut_Different(self):
   d = {
       'values': ['master'],
       'guid': 'e9c2891d-2b04-413f-8cf4-099827e67626',
       'type': 'GenericSet'
   }
   test_key = utils.TestKey('Chromium/win7/foo')
   entity = histogram.SparseDiagnostic(
       data=d, name='masters', test=test_key, start_revision=1,
       end_revision=sys.maxint, id='abc')
   entity.put()
   d2 = d.copy()
   d2['guid'] = 'def'
   d2['displayBotName'] = 'mac'
   entity2 = histogram.SparseDiagnostic(
       data=d2, test=test_key,
       start_revision=1, end_revision=sys.maxint, id='def')
   add_histograms.DeduplicateAndPut([entity2], test_key, 2)
   sparse = histogram.SparseDiagnostic.query().fetch()
   self.assertEqual(2, len(sparse))
示例#6
0
def ProcessDiagnostics(diagnostics, revision, test_key, internal_only):
    if not diagnostics:
        return {}

    diagnostic_data = json.loads(diagnostics)
    diagnostic_entities = []
    for name, diagnostic_datum in diagnostic_data.iteritems():
        # TODO(eakuefner): Pass map of guid to dict to avoid overhead
        guid = diagnostic_datum['guid']
        diagnostic_entities.append(
            histogram.SparseDiagnostic(id=guid,
                                       name=name,
                                       data=diagnostic_datum,
                                       test=test_key,
                                       start_revision=revision,
                                       end_revision=sys.maxint,
                                       internal_only=internal_only))
    new_guids_to_existing_diagnostics = add_histograms.DeduplicateAndPut(
        diagnostic_entities, test_key, revision).iteritems()
    return new_guids_to_existing_diagnostics
    def post(self):
        """Adds a single histogram or sparse shared diagnostic to the datastore.

    The |data| request parameter can be either a histogram or a sparse shared
    diagnostic; the set of diagnostics that are considered sparse (meaning that
    they don't normally change on every upload for a given benchmark from a
    given bot) is shown in add_histograms.SPARSE_DIAGNOSTIC_TYPES.

    See https://goo.gl/lHzea6 for detailed information on the JSON format for
    histograms and diagnostics.

    Request parameters:
      data: JSON encoding of a histogram or shared diagnostic.
      revision: a revision, given as an int.
      test_path: the test path to which this diagnostic or histogram should be
          attached.
    """
        datastore_hooks.SetPrivilegedRequest()

        data = self.request.get('data')
        revision = int(self.request.get('revision'))
        test_path = self.request.get('test_path')

        data_dict = json.loads(data)
        guid = data_dict['guid']
        is_diagnostic = 'type' in data_dict

        test_path_parts = test_path.split('/')
        master = test_path_parts[0]
        bot = test_path_parts[1]
        test_name = '/'.join(test_path_parts[2:])
        bot_whitelist = stored_object.Get(add_point_queue.BOT_WHITELIST_KEY)
        internal_only = add_point_queue.BotInternalOnly(bot, bot_whitelist)
        extra_args = {} if is_diagnostic else GetUnitArgs(data_dict['unit'])
        # TDOO(eakuefner): Populate benchmark_description once it appears in
        # diagnostics.
        parent_test = add_point_queue.GetOrCreateAncestors(
            master, bot, test_name, internal_only, **extra_args)
        test_key = parent_test.key

        added_rows = []
        monitored_test_keys = []

        if is_diagnostic:
            entity = histogram.SparseDiagnostic(id=guid,
                                                data=data,
                                                test=test_key,
                                                start_revision=revision,
                                                end_revision=revision,
                                                internal_only=internal_only)
        else:
            diagnostics = self.request.get('diagnostics')
            if diagnostics:
                diagnostic_data = json.loads(diagnostics)
                diagnostic_entities = []
                for diagnostic_datum in diagnostic_data:
                    # TODO(eakuefner): Pass map of guid to dict to avoid overhead
                    guid = diagnostic_datum['guid']
                    diagnostic_entities.append(
                        histogram.SparseDiagnostic(
                            id=guid,
                            data=diagnostic_datum,
                            test=test_key,
                            start_revision=revision,
                            end_revision=sys.maxint,
                            internal_only=internal_only))
                new_guids_to_existing_diagnostics = add_histograms.DeduplicateAndPut(
                    diagnostic_entities, test_key, revision).iteritems()
                # TODO(eakuefner): Move per-histogram monkeypatching logic to Histogram.
                hs = histogram_set.HistogramSet()
                hs.ImportDicts([data_dict])
                # TODO(eakuefner): Share code for replacement logic with add_histograms
                for new_guid, existing_diagnostic in new_guids_to_existing_diagnostics:
                    hs.ReplaceSharedDiagnostic(
                        new_guid,
                        diagnostic_ref.DiagnosticRef(
                            existing_diagnostic['guid']))
                data = hs.GetFirstHistogram().AsDict()

            entity = histogram.Histogram(id=guid,
                                         data=data,
                                         test=test_key,
                                         revision=revision,
                                         internal_only=internal_only)
            row = AddRow(data_dict, test_key, revision, test_path,
                         internal_only)
            added_rows.append(row)

            is_monitored = parent_test.sheriff and parent_test.has_rows
            if is_monitored:
                monitored_test_keys.append(parent_test.key)

        entity.put()

        tests_keys = [
            k for k in monitored_test_keys if not add_point_queue.IsRefBuild(k)
        ]

        # Updating of the cached graph revisions should happen after put because
        # it requires the new row to have a timestamp, which happens upon put.
        futures = [
            graph_revisions.AddRowsToCacheAsync(added_rows),
            find_anomalies.ProcessTestsAsync(tests_keys)
        ]
        ndb.Future.wait_all(futures)