def testCloneWithRef(self): diagnostics = histogram.DiagnosticMap() diagnostics['ref'] = diagnostic_ref.DiagnosticRef('abc') clone = histogram.DiagnosticMap.FromDict(diagnostics.AsDict()) self.assertIsInstance(clone.get('ref'), diagnostic_ref.DiagnosticRef) self.assertEqual(clone.get('ref').guid, 'abc')
def _AddHistogramFromData(params, revision, test_key, internal_only): data_dict = params['data'] diagnostics = params.get('diagnostics') new_guids_to_existing_diagnostics = yield ProcessDiagnostics( diagnostics, revision, test_key, internal_only) hs = histogram_set.HistogramSet() hs.ImportDicts([data_dict]) for new_guid, existing_diagnostic in (iter( new_guids_to_existing_diagnostics.items())): hs.ReplaceSharedDiagnostic( new_guid, diagnostic_ref.DiagnosticRef(existing_diagnostic['guid'])) data = hs.GetFirstHistogram().AsDict() entity = histogram.Histogram(id=str(uuid.uuid4()), data=data, test=test_key, revision=revision, internal_only=internal_only) yield entity.put_async() measurement = upload_completion_token.Measurement.GetById( params.get('test_path'), params.get('token')) if measurement is not None: measurement.histogram = entity.key measurement.put_async()
def AddDicts(self, dct): for name, diagnostic_dict in dct.items(): if isinstance(diagnostic_dict, StringTypes): self[name] = diagnostic_ref.DiagnosticRef(diagnostic_dict) elif diagnostic_dict['type'] not in [ 'RelatedHistogramMap', 'RelatedHistogramBreakdown' ]: # Ignore RelatedHistograms. # TODO(benjhayden): Forget about RelatedHistograms in 2019 Q2. self[name] = diagnostic.Diagnostic.FromDict(diagnostic_dict)
def testReplaceSharedDiagnostic(self): hist = histogram.Histogram('', 'unitless') hists = histogram_set.HistogramSet([hist]) diag0 = generic_set.GenericSet(['shared0']) diag1 = generic_set.GenericSet(['shared1']) hists.AddSharedDiagnostic('generic0', diag0) hists.AddSharedDiagnostic('generic1', diag1) guid0 = diag0.guid guid1 = diag1.guid hists.ReplaceSharedDiagnostic(guid0, diagnostic_ref.DiagnosticRef('fakeGuid')) self.assertEqual(hist.diagnostics['generic0'].guid, 'fakeGuid') self.assertEqual(hist.diagnostics['generic1'].guid, guid1)
def _AddHistogramFromData(params, revision, test_key, internal_only): data_dict = params['data'] diagnostics = params.get('diagnostics') new_guids_to_existing_diagnostics = yield ProcessDiagnostics( diagnostics, revision, test_key, internal_only) hs = histogram_set.HistogramSet() hs.ImportDicts([data_dict]) for new_guid, existing_diagnostic in ( iter(new_guids_to_existing_diagnostics.items())): hs.ReplaceSharedDiagnostic( new_guid, diagnostic_ref.DiagnosticRef( existing_diagnostic['guid'])) data = hs.GetFirstHistogram().AsDict() entity = histogram.Histogram( id=str(uuid.uuid4()), data=data, test=test_key, revision=revision, internal_only=internal_only) yield entity.put_async()
def _AddHistogramFromData(params, revision, test_key, internal_only): data_dict = params['data'] guid = data_dict['guid'] diagnostics = params.get('diagnostics') new_guids_to_existing_diagnostics = yield ProcessDiagnostics( diagnostics, revision, test_key, internal_only) # TODO(eakuefner): Move per-histogram monkeypatching logic to Histogram. hs = histogram_set.HistogramSet() hs.ImportDicts([data_dict]) # TODO(eakuefner): Share code for replacement logic with add_histograms for new_guid, existing_diagnostic in ( new_guids_to_existing_diagnostics.iteritems()): hs.ReplaceSharedDiagnostic( new_guid, diagnostic_ref.DiagnosticRef( existing_diagnostic['guid'])) data = hs.GetFirstHistogram().AsDict() entity = histogram.Histogram( id=guid, data=data, test=test_key, revision=revision, internal_only=internal_only) yield entity.put_async()
def AddDicts(self, dct): for name, diagnostic_dict in dct.iteritems(): if isinstance(diagnostic_dict, basestring): self[name] = diagnostic_ref.DiagnosticRef(diagnostic_dict) else: self[name] = diagnostic.Diagnostic.FromDict(diagnostic_dict)
def RefOrDiagnostic(d): if isinstance(d, basestring): return diagnostic_ref.DiagnosticRef(d) return diagnostic.Diagnostic.FromDict(d)
def RefOrDiagnostic(d): if isinstance(d, StringTypes): return diagnostic_ref.DiagnosticRef(d) return diagnostic.Diagnostic.FromDict(d)
def post(self): """Adds a single histogram or sparse shared diagnostic to the datastore. The |data| request parameter can be either a histogram or a sparse shared diagnostic; the set of diagnostics that are considered sparse (meaning that they don't normally change on every upload for a given benchmark from a given bot) is shown in add_histograms.SPARSE_DIAGNOSTIC_TYPES. See https://goo.gl/lHzea6 for detailed information on the JSON format for histograms and diagnostics. Request parameters: data: JSON encoding of a histogram or shared diagnostic. revision: a revision, given as an int. test_path: the test path to which this diagnostic or histogram should be attached. """ datastore_hooks.SetPrivilegedRequest() data = self.request.get('data') revision = int(self.request.get('revision')) test_path = self.request.get('test_path') data_dict = json.loads(data) guid = data_dict['guid'] is_diagnostic = 'type' in data_dict test_path_parts = test_path.split('/') master = test_path_parts[0] bot = test_path_parts[1] test_name = '/'.join(test_path_parts[2:]) bot_whitelist = stored_object.Get(add_point_queue.BOT_WHITELIST_KEY) internal_only = add_point_queue.BotInternalOnly(bot, bot_whitelist) extra_args = {} if is_diagnostic else GetUnitArgs(data_dict['unit']) # TDOO(eakuefner): Populate benchmark_description once it appears in # diagnostics. parent_test = add_point_queue.GetOrCreateAncestors( master, bot, test_name, internal_only, **extra_args) test_key = parent_test.key added_rows = [] monitored_test_keys = [] if is_diagnostic: entity = histogram.SparseDiagnostic(id=guid, data=data, test=test_key, start_revision=revision, end_revision=revision, internal_only=internal_only) else: diagnostics = self.request.get('diagnostics') if diagnostics: diagnostic_data = json.loads(diagnostics) diagnostic_entities = [] for diagnostic_datum in diagnostic_data: # TODO(eakuefner): Pass map of guid to dict to avoid overhead guid = diagnostic_datum['guid'] diagnostic_entities.append( histogram.SparseDiagnostic( id=guid, data=diagnostic_datum, test=test_key, start_revision=revision, end_revision=sys.maxint, internal_only=internal_only)) new_guids_to_existing_diagnostics = add_histograms.DeduplicateAndPut( diagnostic_entities, test_key, revision).iteritems() # TODO(eakuefner): Move per-histogram monkeypatching logic to Histogram. hs = histogram_set.HistogramSet() hs.ImportDicts([data_dict]) # TODO(eakuefner): Share code for replacement logic with add_histograms for new_guid, existing_diagnostic in new_guids_to_existing_diagnostics: hs.ReplaceSharedDiagnostic( new_guid, diagnostic_ref.DiagnosticRef( existing_diagnostic['guid'])) data = hs.GetFirstHistogram().AsDict() entity = histogram.Histogram(id=guid, data=data, test=test_key, revision=revision, internal_only=internal_only) row = AddRow(data_dict, test_key, revision, test_path, internal_only) added_rows.append(row) is_monitored = parent_test.sheriff and parent_test.has_rows if is_monitored: monitored_test_keys.append(parent_test.key) entity.put() tests_keys = [ k for k in monitored_test_keys if not add_point_queue.IsRefBuild(k) ] # Updating of the cached graph revisions should happen after put because # it requires the new row to have a timestamp, which happens upon put. futures = [ graph_revisions.AddRowsToCacheAsync(added_rows), find_anomalies.ProcessTestsAsync(tests_keys) ] ndb.Future.wait_all(futures)