def ProcessHistogramSet(histogram_dicts): if not isinstance(histogram_dicts, list): raise api_request_handler.BadRequestError( 'HistogramSet JSON much be a list of dicts') bot_whitelist_future = stored_object.GetAsync( add_point_queue.BOT_WHITELIST_KEY) histograms = histogram_set.HistogramSet() histograms.ImportDicts(histogram_dicts) histograms.ResolveRelatedHistograms() histograms.DeduplicateDiagnostics() if len(histograms) == 0: raise api_request_handler.BadRequestError( 'HistogramSet JSON must contain at least one histogram.') _LogDebugInfo(histograms) InlineDenseSharedDiagnostics(histograms) # TODO(eakuefner): Get rid of this. # https://github.com/catapult-project/catapult/issues/4242 _PurgeHistogramBinData(histograms) revision = ComputeRevision(histograms) master = _GetDiagnosticValue(reserved_infos.MASTERS.name, histograms.GetFirstHistogram()) bot = _GetDiagnosticValue(reserved_infos.BOTS.name, histograms.GetFirstHistogram()) benchmark = _GetDiagnosticValue(reserved_infos.BENCHMARKS.name, histograms.GetFirstHistogram()) benchmark_description = _GetDiagnosticValue( reserved_infos.BENCHMARK_DESCRIPTIONS.name, histograms.GetFirstHistogram(), optional=True) suite_key = utils.TestKey('%s/%s/%s' % (master, bot, benchmark)) bot_whitelist = bot_whitelist_future.get_result() internal_only = add_point_queue.BotInternalOnly(bot, bot_whitelist) # We'll skip the histogram-level sparse diagnostics because we need to # handle those with the histograms, below, so that we can properly assign # test paths. suite_level_sparse_diagnostic_entities = FindSuiteLevelSparseDiagnostics( histograms, suite_key, revision, internal_only) # TODO(eakuefner): Refactor master/bot computation to happen above this line # so that we can replace with a DiagnosticRef rather than a full diagnostic. new_guids_to_old_diagnostics = DeduplicateAndPut( suite_level_sparse_diagnostic_entities, suite_key, revision) for new_guid, old_diagnostic in new_guids_to_old_diagnostics.iteritems(): histograms.ReplaceSharedDiagnostic( new_guid, diagnostic.Diagnostic.FromDict(old_diagnostic)) tasks = _BatchHistogramsIntoTasks(suite_key.id(), histograms, revision, benchmark_description) _QueueHistogramTasks(tasks)
def _PolyMeasurementTestSuites(cls): """ Returns a list of test suites whose measurements are composed of multiple test path components. """ if cls.POLY_MEASUREMENT_TEST_SUITES is None: # TODO(benjhayden): Revisit with threadsafe:true. cls.POLY_MEASUREMENT_TEST_SUITES = ( yield stored_object.GetAsync(POLY_MEASUREMENT_TEST_SUITES_KEY)) or () raise ndb.Return(cls.POLY_MEASUREMENT_TEST_SUITES)
def _PartialTestSuites(cls): """Returns a list of test path component strings that must be joined with the subsequent test path component in order to form a composite test suite. Some are internal-only, but there's no need to store a separate list for external users. """ if cls.PARTIAL_TEST_SUITES is None: # TODO(benjhayden): Revisit with threadsafe:true. cls.PARTIAL_TEST_SUITES = ( yield stored_object.GetAsync(PARTIAL_TEST_SUITES_KEY)) or () raise ndb.Return(cls.PARTIAL_TEST_SUITES)
def _GroupableTestSuitePrefixes(cls): """ Returns a list of prefixes of test suites that are transformed to allow the UI to group them. Some are internal-only, but there's no need to store a separate list for external users. """ if cls.GROUPABLE_TEST_SUITE_PREFIXES is None: # TODO(benjhayden): Revisit with threadsafe:true. cls.GROUPABLE_TEST_SUITE_PREFIXES = ( yield stored_object.GetAsync(GROUPABLE_TEST_SUITE_PREFIXES_KEY)) or () raise ndb.Return(cls.GROUPABLE_TEST_SUITE_PREFIXES)
def _CompositeTestSuites(cls): """ Returns a list of test suites composed of 2 or more test path components. All composite test suites start with a partial test suite, but not all test suites that start with a partial test suite are composite. Some are internal-only, but there's no need to store a separate list for external users. """ if cls.COMPOSITE_TEST_SUITES is None: # TODO(benjhayden): Revisit with threadsafe:true. cls.COMPOSITE_TEST_SUITES = ( yield stored_object.GetAsync(COMPOSITE_TEST_SUITES_KEY)) or () raise ndb.Return(cls.COMPOSITE_TEST_SUITES)
def post(self): """Adds a single histogram or sparse shared diagnostic to the datastore. The |data| request parameter can be either a histogram or a sparse shared diagnostic; the set of diagnostics that are considered sparse (meaning that they don't normally change on every upload for a given benchmark from a given bot) is shown in add_histograms.SPARSE_DIAGNOSTIC_TYPES. See https://goo.gl/lHzea6 for detailed information on the JSON format for histograms and diagnostics. Request parameters: data: JSON encoding of a histogram or shared diagnostic. revision: a revision, given as an int. test_path: the test path to which this diagnostic or histogram should be attached. """ datastore_hooks.SetPrivilegedRequest() bot_whitelist_future = stored_object.GetAsync( add_point_queue.BOT_WHITELIST_KEY) params = json.loads(self.request.body) _PrewarmGets(params) bot_whitelist = bot_whitelist_future.get_result() # Roughly, the processing of histograms and the processing of rows can be # done in parallel since there are no dependencies. futures = [] for p in params: futures.extend(_ProcessRowAndHistogram(p, bot_whitelist)) ndb.Future.wait_all(futures)
def _GetConfiguration(cls, key, default=None): if key not in cls.CONFIGURATION: cls.CONFIGURATION[key] = (yield stored_object.GetAsync(key)) or default raise ndb.Return(cls.CONFIGURATION[key])
def ProcessHistogramSet(histogram_dicts): if not isinstance(histogram_dicts, list): raise api_request_handler.BadRequestError( 'HistogramSet JSON much be a list of dicts') bot_whitelist_future = stored_object.GetAsync( add_point_queue.BOT_WHITELIST_KEY) histograms = histogram_set.HistogramSet() with timing.WallTimeLogger('hs.ImportDicts'): histograms.ImportDicts(histogram_dicts) with timing.WallTimeLogger('hs.ResolveRelatedHistograms'): histograms.ResolveRelatedHistograms() with timing.WallTimeLogger('hs.DeduplicateDiagnostics'): histograms.DeduplicateDiagnostics() if len(histograms) == 0: raise api_request_handler.BadRequestError( 'HistogramSet JSON must contain at least one histogram.') with timing.WallTimeLogger('hs._LogDebugInfo'): _LogDebugInfo(histograms) with timing.WallTimeLogger('InlineDenseSharedDiagnostics'): InlineDenseSharedDiagnostics(histograms) # TODO(eakuefner): Get rid of this. # https://github.com/catapult-project/catapult/issues/4242 with timing.WallTimeLogger('_PurgeHistogramBinData'): _PurgeHistogramBinData(histograms) with timing.WallTimeLogger('_GetDiagnosticValue calls'): master = _GetDiagnosticValue(reserved_infos.MASTERS.name, histograms.GetFirstHistogram()) bot = _GetDiagnosticValue(reserved_infos.BOTS.name, histograms.GetFirstHistogram()) benchmark = _GetDiagnosticValue(reserved_infos.BENCHMARKS.name, histograms.GetFirstHistogram()) benchmark_description = _GetDiagnosticValue( reserved_infos.BENCHMARK_DESCRIPTIONS.name, histograms.GetFirstHistogram(), optional=True) with timing.WallTimeLogger('_ValidateMasterBotBenchmarkName'): _ValidateMasterBotBenchmarkName(master, bot, benchmark) with timing.WallTimeLogger('ComputeRevision'): suite_key = utils.TestKey('%s/%s/%s' % (master, bot, benchmark)) logging.info('Suite: %s', suite_key.id()) revision = ComputeRevision(histograms) bot_whitelist = bot_whitelist_future.get_result() internal_only = add_point_queue.BotInternalOnly(bot, bot_whitelist) revision_record = histogram.HistogramRevisionRecord.GetOrCreate( suite_key, revision) revision_record.put() last_added = histogram.HistogramRevisionRecord.GetLatest( suite_key).get_result() # On first upload, a query immediately following a put may return nothing. if not last_added: last_added = revision_record _CheckRequest(last_added, 'No last revision') # We'll skip the histogram-level sparse diagnostics because we need to # handle those with the histograms, below, so that we can properly assign # test paths. with timing.WallTimeLogger('FindSuiteLevelSparseDiagnostics'): suite_level_sparse_diagnostic_entities = FindSuiteLevelSparseDiagnostics( histograms, suite_key, revision, internal_only) # TODO(eakuefner): Refactor master/bot computation to happen above this line # so that we can replace with a DiagnosticRef rather than a full diagnostic. with timing.WallTimeLogger('DeduplicateAndPut'): new_guids_to_old_diagnostics = ( histogram.SparseDiagnostic.FindOrInsertDiagnostics( suite_level_sparse_diagnostic_entities, suite_key, revision, last_added.revision).get_result()) with timing.WallTimeLogger('ReplaceSharedDiagnostic calls'): for new_guid, old_diagnostic in new_guids_to_old_diagnostics.iteritems( ): histograms.ReplaceSharedDiagnostic( new_guid, diagnostic.Diagnostic.FromDict(old_diagnostic)) with timing.WallTimeLogger('_BatchHistogramsIntoTasks'): tasks = _BatchHistogramsIntoTasks(suite_key.id(), histograms, revision, benchmark_description) with timing.WallTimeLogger('_QueueHistogramTasks'): _QueueHistogramTasks(tasks)
def GetExternalAsync(key): namespaced_key = NamespaceKey(key, datastore_hooks.EXTERNAL) result = yield stored_object.GetAsync(namespaced_key) raise ndb.Return(result)
def GetAsync(key): namespaced_key = NamespaceKey(key) result = yield stored_object.GetAsync(namespaced_key) raise ndb.Return(result)