def testHistogramsOutputAppendResults(self): hist_file = os.path.join(self.output_dir, compute_metrics.HISTOGRAM_DICTS_FILE) with open(hist_file, 'w') as f: json.dump([histogram.Histogram('a', 'unitless').AsDict()], f) self.SerializeIntermediateResults(test_results=[ testing.TestResult( 'benchmark/story', output_artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), ], ) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label1', ]) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label2', ]) with open( os.path.join(self.output_dir, histograms_output.OUTPUT_FILENAME)) as f: results = json.load(f) out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(results) self.assertEqual(len(out_histograms), 2) diag_values = [list(v) for v in out_histograms.shared_diagnostics] self.assertIn(['label1'], diag_values) self.assertIn(['label2'], diag_values)
def testComputeTestPathWithStory(self): hist = histogram_module.Histogram('hist', 'count') histograms = histogram_set.HistogramSet([hist]) histograms.AddSharedDiagnostic(reserved_infos.MASTERS.name, histogram_module.GenericSet(['master'])) histograms.AddSharedDiagnostic(reserved_infos.BOTS.name, histogram_module.GenericSet(['bot'])) histograms.AddSharedDiagnostic( reserved_infos.BENCHMARKS.name, histogram_module.GenericSet(['benchmark'])) histograms.AddSharedDiagnostic( reserved_infos.STORIES.name, histogram_module.GenericSet(['http://story'])) hist = histograms.GetFirstHistogram() test_path = add_histograms.ComputeTestPath(hist.guid, histograms) self.assertEqual('master/bot/benchmark/hist/http___story', test_path)
def testComputeTestPathWithoutStory(self): histogram = histogram_module.Histogram('hist', 'count') histograms = histogram_module.HistogramSet([histogram]) telemetry_info = histogram_module.TelemetryInfo() telemetry_info.AddInfo({ 'benchmarkName': 'benchmark' }) histograms.AddSharedDiagnostic('telemetry', telemetry_info) buildbot_info = histogram_module.BuildbotInfo({ 'displayMasterName': 'master', 'displayBotName': 'bot' }) histograms.AddSharedDiagnostic('buildbot', buildbot_info) histogram = histograms.GetFirstHistogram() test_path = add_histograms.ComputeTestPath(histogram.guid, histograms) self.assertEqual('master/bot/benchmark/hist', test_path)
def testReplaceSharedDiagnostic(self): hist = histogram.Histogram('', 'unitless') hists = histogram_set.HistogramSet([hist]) diag0 = generic_set.GenericSet(['shared0']) diag1 = generic_set.GenericSet(['shared1']) hists.AddSharedDiagnostic('generic0', diag0) hists.AddSharedDiagnostic('generic1', diag1) guid0 = diag0.guid guid1 = diag1.guid hists.ReplaceSharedDiagnostic(guid0, diagnostic_ref.DiagnosticRef('fakeGuid')) self.assertEqual(hist.diagnostics['generic0'].guid, 'fakeGuid') self.assertEqual(hist.diagnostics['generic1'].guid, guid1)
def testPopulateHistogramSet_UsesHistogramSetData(self): with self.CreateResults(benchmark_name='benchmark_name') as results: results.WillRunPage(self.pages[0]) results.AddHistogram(histogram_module.Histogram('foo', 'count')) results.DidRunPage(self.pages[0]) results.PopulateHistogramSet() histogram_dicts = results.AsHistogramDicts() self.assertEqual(8, len(histogram_dicts)) hs = histogram_set.HistogramSet() hs.ImportDicts(histogram_dicts) hist = hs.GetHistogramNamed('foo') self.assertItemsEqual(hist.diagnostics[reserved_infos.BENCHMARKS.name], ['benchmark_name'])
def testImportHistogramDicts_DelayedImport(self): hs = histogram_set.HistogramSet() hs.AddHistogram(histogram_module.Histogram('foo', 'count')) hs.AddSharedDiagnosticToAllHistograms( 'bar', generic_set.GenericSet(['baz'])) histogram_dicts = hs.AsDicts() benchmark_metadata = benchmark.BenchmarkMetadata( 'benchmark_name', 'benchmark_description') results = self.getPageTestResults( benchmark_metadata=benchmark_metadata, start=1501773200) results.WillRunPage(self.pages[0]) results.ImportHistogramDicts(histogram_dicts, import_immediately=False) results.DidRunPage(self.pages[0]) self.assertEqual(len(results.AsHistogramDicts()), 0) results.PopulateHistogramSet() self.assertEqual(results.AsHistogramDicts(), histogram_dicts)
def testEvaluateFailure_HistogramNoValues(self, isolate_retrieve): isolate_retrieve.side_effect = itertools.chain(*itertools.repeat( [('{"files": {"some_benchmark/perf_results.json": ' '{"h": "394890891823812873798734a"}}}'), json.dumps( histogram_set.HistogramSet([ histogram_module.Histogram('some_benchmark', 'count') ]).AsDicts())], 10)) self.PopulateTaskGraph( benchmark='some_benchmark', chart='some_chart', grouping_label='label', story='https://story') self.assertNotEqual({}, task_module.Evaluate( self.job, event_module.Event( type='initiate', target_task=None, payload={}), self.evaluator)) self.assertEqual( { 'read_value_chromium@aaaaaaa_%s' % (attempt,): { 'benchmark': 'some_benchmark', 'change': mock.ANY, 'mode': 'histogram_sets', 'results_filename': 'some_benchmark/perf_results.json', 'histogram_options': { 'grouping_label': 'label', 'story': 'https://story', 'statistic': None, }, 'graph_json_options': { 'chart': 'some_chart', 'trace': 'some_trace', }, 'status': 'failed', 'errors': [{ 'reason': 'ReadValueNotFound', 'message': mock.ANY, }], 'tries': 1, } for attempt in range(10) }, task_module.Evaluate( self.job, event_module.Event(type='select', target_task=None, payload={}), evaluators.Selector(task_type='read_value')))
def AddDurationHistogram(self, duration_in_milliseconds): hist = histogram.Histogram( 'benchmark_total_duration', 'ms_smallerIsBetter') hist.AddSample(duration_in_milliseconds) # TODO(#4244): Do this generally. if self.telemetry_info.label: hist.diagnostics[reserved_infos.LABELS.name] = generic_set.GenericSet( [self.telemetry_info.label]) hist.diagnostics[reserved_infos.BENCHMARKS.name] = generic_set.GenericSet( [self.telemetry_info.benchmark_name]) hist.diagnostics[reserved_infos.BENCHMARK_START.name] = histogram.DateRange( self.telemetry_info.benchmark_start_epoch * 1000) if self.telemetry_info.benchmark_descriptions: hist.diagnostics[ reserved_infos.BENCHMARK_DESCRIPTIONS.name] = generic_set.GenericSet([ self.telemetry_info.benchmark_descriptions]) self._histograms.AddHistogram(hist)
def testPostHistogram_EmptyCreatesNoTestsOrRowsOrHistograms(self): test_path = 'Chromium/win7/blink_perf.dom/foo' hist = histogram_module.Histogram('foo', 'count') params = [{ 'data': hist.AsDict(), 'test_path': test_path, 'benchmark_description': None, 'revision': 123 }] self.testapp.post('/add_histograms_queue', json.dumps(params)) rows = graph_data.Row.query().fetch() self.assertEqual(len(rows), 0) tests = graph_data.TestMetadata.query().fetch() self.assertEqual(len(tests), 0) hists = histogram.Histogram.query().fetch() self.assertEqual(len(hists), 0)
def testReadHistogramsJsonValueStatisticNoSamples(self): hist = histogram_module.Histogram('hist', 'count') histograms = histogram_set.HistogramSet([hist]) histograms.AddSharedDiagnostic( reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:tir_label'])) histograms.AddSharedDiagnostic( reserved_infos.STORIES.name, generic_set.GenericSet(['story'])) self.SetOutputFileContents(histograms.AsDicts()) quest = read_value.ReadHistogramsJsonValue( hist.name, 'tir_label', 'story', statistic='avg') execution = quest.Start(None, 'server', 'output hash') execution.Poll() self.assertReadValueError(execution)
def _CreateHistogram(self, name, stories=None, tags=None, had_failures=False): h = histogram.Histogram(name, 'count') if stories: h.diagnostics[ reserved_infos.STORIES.name] = generic_set.GenericSet(stories) if tags: h.diagnostics[ reserved_infos.STORY_TAGS.name] = generic_set.GenericSet(tags) if had_failures: h.diagnostics[ reserved_infos.HAD_FAILURES.name] = generic_set.GenericSet( [True]) return h
def testReadHistogramsJsonValueWithNoStory(self): hist = histogram_module.Histogram('hist', 'count') hist.AddSample(0) hist.AddSample(1) hist.AddSample(2) histograms = histogram_set.HistogramSet([hist]) histograms.AddSharedDiagnosticToAllHistograms( reserved_infos.STORIES.name, generic_set.GenericSet(['story'])) self.SetOutputFileContents(histograms.AsDicts()) quest = read_value.ReadValue(results_filename='chartjson-output.json', metric=hist.name, trace_or_story='story') execution = quest.Start(None, 'server', 'output hash') execution.Poll() self.assertReadValueSuccess(execution) self.assertEqual(execution.result_values, (0, 1, 2)) self.assertRetrievedOutputJson()
def testMerge(self): events = histogram.RelatedEventSet() events.Add({ 'stableId': '0.0', 'title': 'foo', 'start': 0, 'duration': 1, }) generic = histogram.Generic('generic diagnostic') generic2 = histogram.Generic('generic diagnostic 2') related_set = histogram.RelatedHistogramSet([ histogram.Histogram('histogram', 'count'), ]) hist = histogram.Histogram('', 'count') # When Histograms are merged, first an empty clone is created with an empty # DiagnosticMap. hist2 = histogram.Histogram('', 'count') hist2.diagnostics['a'] = generic hist.diagnostics.Merge(hist2.diagnostics, hist, hist2) self.assertIs(generic, hist.diagnostics['a']) # Separate keys are not merged. hist3 = histogram.Histogram('', 'count') hist3.diagnostics['b'] = generic2 hist.diagnostics.Merge(hist3.diagnostics, hist, hist3) self.assertIs(generic, hist.diagnostics['a']) self.assertIs(generic2, hist.diagnostics['b']) # Merging unmergeable diagnostics should produce an # UnmergeableDiagnosticSet. hist4 = histogram.Histogram('', 'count') hist4.diagnostics['a'] = related_set hist.diagnostics.Merge(hist4.diagnostics, hist, hist4) self.assertIsInstance(hist.diagnostics['a'], histogram.UnmergeableDiagnosticSet) diagnostics = list(hist.diagnostics['a']) self.assertIs(generic, diagnostics[0]) self.assertIs(related_set, diagnostics[1]) # UnmergeableDiagnosticSets are mergeable. hist5 = histogram.Histogram('', 'count') hist5.diagnostics['a'] = histogram.UnmergeableDiagnosticSet( [events, generic2]) hist.diagnostics.Merge(hist5.diagnostics, hist, hist5) self.assertIsInstance(hist.diagnostics['a'], histogram.UnmergeableDiagnosticSet) diagnostics = list(hist.diagnostics['a']) self.assertIs(generic, diagnostics[0]) self.assertIs(related_set, diagnostics[1]) self.assertIs(events, diagnostics[2]) self.assertIs(generic2, diagnostics[3])
def testHistogramsOutputResetResults(self): self.SerializeIntermediateResults( testing.TestResult( 'benchmark/story', output_artifacts=[ self.CreateHistogramsArtifact( histogram.Histogram('a', 'unitless')), ], ), ) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label1', ]) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label2', '--reset-results', ]) with open( os.path.join(self.output_dir, histograms_output.OUTPUT_FILENAME)) as f: results = json.load(f) out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(results) self.assertEqual(len(out_histograms), 1) hist = out_histograms.GetFirstHistogram() self.assertEqual(hist.diagnostics['labels'], generic_set.GenericSet(['label2']))
def testCsvOutput(self): test_hist = histogram.Histogram('a', 'ms') test_hist.AddSample(3000) self.SerializeIntermediateResults( testing.TestResult( 'benchmark/story', output_artifacts=[ self.CreateHistogramsArtifact(test_hist), self.CreateDiagnosticsArtifact( benchmarks=['benchmark'], osNames=['linux'], documentationUrls=[['documentation', 'url']]), ], start_time='2009-02-13T23:31:30.987000Z', ), ) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label', ]) with open(os.path.join(self.output_dir, csv_output.OUTPUT_FILENAME)) as f: lines = [line for line in f] actual = list(zip(*csv.reader(lines))) expected = [('name', 'a'), ('unit', 'ms'), ('avg', '3000'), ('count', '1'), ('max', '3000'), ('min', '3000'), ('std', '0'), ('sum', '3000'), ('architectures', ''), ('benchmarks', 'benchmark'), ('benchmarkStart', '2009-02-13 23:31:30'), ('bots', ''), ('builds', ''), ('deviceIds', ''), ('displayLabel', 'label'), ('masters', ''), ('memoryAmounts', ''), ('osNames', 'linux'), ('osVersions', ''), ('productVersions', ''), ('stories', ''), ('storysetRepeats', ''), ('traceStart', ''), ('traceUrls', '')] self.assertEqual(actual, expected)
def testCsvOutput(self): hist_file = os.path.join(self.output_dir, histograms_output.HISTOGRAM_DICTS_NAME) test_hist = histogram.Histogram('a', 'ms') test_hist.AddSample(3000) with open(hist_file, 'w') as f: json.dump([test_hist.AsDict()], f) self.SerializeIntermediateResults( test_results=[ testing.TestResult( 'benchmark/story', artifacts={'histogram_dicts.json': testing.Artifact(hist_file)}, ), ], diagnostics={ 'benchmarks': ['benchmark'], 'osNames': ['linux'], 'documentationUrls': [['documentation', 'url']], }, ) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label', ]) with open(os.path.join(self.output_dir, csv_output.OUTPUT_FILENAME)) as f: lines = [line for line in f] actual = list(zip(*csv.reader(lines))) expected = [ ('name', 'a'), ('unit', 'ms'), ('avg', '3000'), ('count', '1'), ('max', '3000'), ('min', '3000'), ('std', '0'), ('sum', '3000'), ('architectures', ''), ('benchmarks', 'benchmark'), ('benchmarkStart', ''), ('bots', ''), ('builds', ''), ('deviceIds', ''), ('displayLabel', 'label'), ('masters', ''), ('memoryAmounts', ''), ('osNames', 'linux'), ('osVersions', ''), ('productVersions', ''), ('stories', ''), ('storysetRepeats', ''), ('traceStart', ''), ('traceUrls', '') ] self.assertEqual(actual, expected)
def _CreateHistogram( self, master=None, bot=None, benchmark=None, commit_position=None, device=None, owner=None, stories=None, benchmark_description=None, samples=None, max_samples=None): hists = [histogram_module.Histogram('hist', 'count')] if max_samples: hists[0].max_num_sample_values = max_samples if samples: for s in samples: hists[0].AddSample(s) histograms = histogram_set.HistogramSet(hists) if master: histograms.AddSharedDiagnostic( reserved_infos.MASTERS.name, generic_set.GenericSet([master])) if bot: histograms.AddSharedDiagnostic( reserved_infos.BOTS.name, generic_set.GenericSet([bot])) if commit_position: histograms.AddSharedDiagnostic( reserved_infos.CHROMIUM_COMMIT_POSITIONS.name, generic_set.GenericSet([commit_position])) if benchmark: histograms.AddSharedDiagnostic( reserved_infos.BENCHMARKS.name, generic_set.GenericSet([benchmark])) if benchmark_description: histograms.AddSharedDiagnostic( reserved_infos.BENCHMARK_DESCRIPTIONS.name, generic_set.GenericSet([benchmark_description])) if owner: histograms.AddSharedDiagnostic( reserved_infos.OWNERS.name, generic_set.GenericSet([owner])) if device: histograms.AddSharedDiagnostic( reserved_infos.DEVICE_IDS.name, generic_set.GenericSet([device])) if stories: histograms.AddSharedDiagnostic( reserved_infos.STORIES.name, generic_set.GenericSet(stories)) return histograms
def testHeapProfiler(self): test_data = histogram_set.HistogramSet() for i in xrange(10): test_hist = histogram.Histogram('test', 'n%') test_hist.AddSample(i / 10.0) test_data.AddHistogram(test_hist) histograms = heap_profiler.Profile(test_data) set_size_hist = histograms.GetHistogramNamed('heap:HistogramSet') self.assertEquals(set_size_hist.num_values, 1) # The exact sizes of python objects can vary between platforms and versions. self.assertGreater(set_size_hist.sum, 10000) hist_size_hist = histograms.GetHistogramNamed('heap:Histogram') self.assertEquals(hist_size_hist.num_values, 10) self.assertGreater(hist_size_hist.sum, 10000) related_names = hist_size_hist.diagnostics['types'] self.assertEquals(related_names.Get('HistogramBin'), 'heap:HistogramBin') self.assertEquals(related_names.Get('DiagnosticMap'), 'heap:DiagnosticMap') properties = hist_size_hist.bins[33].diagnostic_maps[0]['properties'] types = hist_size_hist.bins[33].diagnostic_maps[0]['types'] self.assertEquals(len(properties), 14) self.assertGreater(properties.Get('_bins'), 1000) self.assertEquals(len(types), 4) self.assertGreater(types.Get('HistogramBin'), 1000) self.assertGreater(types.Get('(builtin types)'), 1000) bin_size_hist = histograms.GetHistogramNamed('heap:HistogramBin') self.assertEquals(bin_size_hist.num_values, 32) self.assertGreater(bin_size_hist.sum, 1000) diag_map_size_hist = histograms.GetHistogramNamed('heap:DiagnosticMap') self.assertEquals(diag_map_size_hist.num_values, 10) self.assertGreater(diag_map_size_hist.sum, 1000) range_size_hist = histograms.GetHistogramNamed('heap:Range') self.assertEquals(range_size_hist.num_values, 22) self.assertGreater(range_size_hist.sum, 1000) stats_size_hist = histograms.GetHistogramNamed('heap:RunningStatistics') self.assertEquals(stats_size_hist.num_values, 10) self.assertGreater(stats_size_hist.sum, 1000)
def testHistogramsOutputAppendResults(self): self.SerializeIntermediateResults( testing.TestResult( 'benchmark/story', output_artifacts=[ self.CreateHistogramsArtifact( histogram.Histogram('a', 'unitless')), ], ), ) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label1', ]) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label2', ]) with open( os.path.join(self.output_dir, histograms_output.OUTPUT_FILENAME)) as f: results = json.load(f) out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(results) self.assertEqual(len(out_histograms), 2) expected_labels = set(['label1', 'label2']) observed_labels = set(label for hist in out_histograms for label in hist.diagnostics['labels']) self.assertEqual(observed_labels, expected_labels)
def testReadHistogramsJsonValueStatisticNoSamples(self): hist = histogram_module.Histogram('hist', 'count') histograms = histogram_set.HistogramSet([hist]) histograms.AddSharedDiagnosticToAllHistograms( reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:label'])) histograms.AddSharedDiagnosticToAllHistograms( reserved_infos.STORIES.name, generic_set.GenericSet(['story'])) self.SetOutputFileContents(histograms.AsDicts()) quest = read_value.ReadValue(results_filename='chartjson-output.json', metric=hist.name, grouping_label='label', trace_or_story='story', statistic='avg') execution = quest.Start(None, 'server', 'output hash') execution.Poll() self.assertReadValueError(execution, 'ReadValueNoValues')
def testComputeTestPathWithIsRefWithoutStory(self): hist = histogram_module.Histogram('hist', 'count') histograms = histogram_set.HistogramSet([hist]) histograms.AddSharedDiagnostic( reserved_infos.MASTERS.name, histogram_module.GenericSet(['master'])) histograms.AddSharedDiagnostic( reserved_infos.BOTS.name, histogram_module.GenericSet(['bot'])) histograms.AddSharedDiagnostic( reserved_infos.BENCHMARKS.name, histogram_module.GenericSet(['benchmark'])) histograms.AddSharedDiagnostic( reserved_infos.IS_REFERENCE_BUILD.name, histogram_module.GenericSet([True])) hist = histograms.GetFirstHistogram() test_path = add_histograms.ComputeTestPath(hist.guid, histograms) self.assertEqual('master/bot/benchmark/hist/ref', test_path)
def testSerializationSize(self): hist = histogram.Histogram('', 'unitless', self.TEST_BOUNDARIES) d = hist.AsDict() self.assertEqual(107, len(ToJSON(d))) self.assertIsNone(d.get('allBins')) self.assertDeepEqual(d, histogram.Histogram.FromDict(d).AsDict()) hist.AddSample(100) d = hist.AsDict() self.assertEqual(198, len(ToJSON(d))) self.assertIsInstance(d['allBins'], dict) self.assertDeepEqual(d, histogram.Histogram.FromDict(d).AsDict()) hist.AddSample(100) d = hist.AsDict() # SAMPLE_VALUES grew by "100," self.assertEqual(202, len(ToJSON(d))) self.assertIsInstance(d['allBins'], dict) self.assertDeepEqual(d, histogram.Histogram.FromDict(d).AsDict()) hist.AddSample(271, {'foo': generic_set.GenericSet(['bar'])}) d = hist.AsDict() self.assertEqual(268, len(ToJSON(d))) self.assertIsInstance(d['allBins'], dict) self.assertDeepEqual(d, histogram.Histogram.FromDict(d).AsDict()) # Add samples to most bins so that allBinsArray is more efficient than # allBinsDict. for i in xrange(10, 100): hist.AddSample(10 * i) d = hist.AsDict() self.assertEqual(697, len(ToJSON(d))) self.assertIsInstance(d['allBins'], list) self.assertDeepEqual(d, histogram.Histogram.FromDict(d).AsDict()) # Lowering maxNumSampleValues takes a random sub-sample of the existing # sampleValues. We have deliberately set all samples to 3-digit numbers so # that the serialized size is constant regardless of which samples are # retained. hist.max_num_sample_values = 10 d = hist.AsDict() self.assertEqual(389, len(ToJSON(d))) self.assertIsInstance(d['allBins'], list) self.assertDeepEqual(d, histogram.Histogram.FromDict(d).AsDict())
def testHtmlOutput(self): hist_file = os.path.join(self.output_dir, histograms_output.HISTOGRAM_DICTS_NAME) with open(hist_file, 'w') as f: json.dump([histogram.Histogram('a', 'unitless').AsDict()], f) self.SerializeIntermediateResults( test_results=[ testing.TestResult( 'benchmark/story', artifacts={'histogram_dicts.json': testing.Artifact(hist_file)}, ), ], diagnostics={ 'benchmarks': ['benchmark'], 'osNames': ['linux'], 'documentationUrls': [['documentation', 'url']], }, start_time='2009-02-13T23:31:30.987000Z', ) processor.main([ '--output-format', 'html', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label', ]) with open(os.path.join( self.output_dir, html_output.OUTPUT_FILENAME)) as f: results = render_histograms_viewer.ReadExistingResults(f.read()) out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(results) self.assertEqual(len(out_histograms), 1) self.assertEqual(out_histograms.GetFirstHistogram().name, 'a') self.assertEqual(out_histograms.GetFirstHistogram().unit, 'unitless') diag_values = [list(v) for v in out_histograms.shared_diagnostics] self.assertEqual(len(diag_values), 4) self.assertIn(['benchmark'], diag_values) self.assertIn(['linux'], diag_values) self.assertIn([['documentation', 'url']], diag_values) self.assertIn(['label'], diag_values)
def testCsvOutputAppendResults(self): hist_file = os.path.join(self.output_dir, compute_metrics.HISTOGRAM_DICTS_FILE) with open(hist_file, 'w') as f: json.dump([histogram.Histogram('a', 'unitless').AsDict()], f) self.SerializeIntermediateResults(test_results=[ testing.TestResult( 'benchmark/story', output_artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), ], ) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label1', ]) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label2', ]) with open(os.path.join(self.output_dir, csv_output.OUTPUT_FILENAME)) as f: lines = [line for line in f] self.assertEqual(len(lines), 3) self.assertIn('label2', lines[1]) self.assertIn('label1', lines[2])
def testBasic(self): hist = histogram.Histogram('', 'unitless', self.TEST_BOUNDARIES) self.assertEqual(hist.GetBinForValue(250).range.min, 200) self.assertEqual(hist.GetBinForValue(250).range.max, 300) hist.AddSample(-1) hist.AddSample(0) hist.AddSample(0) hist.AddSample(500) hist.AddSample(999) hist.AddSample(1000) self.assertEqual(hist.bins[0].count, 1) self.assertEqual(hist.GetBinForValue(0).count, 2) self.assertEqual(hist.GetBinForValue(500).count, 1) self.assertEqual(hist.GetBinForValue(999).count, 1) self.assertEqual(hist.bins[-1].count, 1) self.assertEqual(hist.num_values, 6) self.assertAlmostEqual(hist.average, 416.3333333)
def testReadHistogramsJsonValueStoryWithNoValues(self, retrieve): hist = histogram_module.Histogram('hist', 'count') histograms = histogram_set.HistogramSet([hist]) retrieve.side_effect = ( { 'files': { 'chartjson-output.json': { 'h': 'histograms hash' } } }, json.dumps(histograms.AsDicts()), ) quest = read_value.ReadHistogramsJsonValue('chart', None, 'story') execution = quest.Start(None, 'output hash') execution.Poll() self.assertReadValueError(execution)
def testReadHistogramsJsonValueWithNoTirLabel(self): hist = histogram_module.Histogram('hist', 'count') hist.AddSample(0) hist.AddSample(1) hist.AddSample(2) histograms = histogram_set.HistogramSet([hist]) histograms.AddSharedDiagnostic( reserved_infos.STORY_TAGS.name, generic_set.GenericSet(['group:tir_label'])) self.SetOutputFileContents(histograms.AsDicts()) quest = read_value.ReadHistogramsJsonValue(hist.name, 'tir_label', None) execution = quest.Start(None, 'output hash') execution.Poll() self.assertReadValueSuccess(execution) self.assertEqual(execution.result_values, (0, 1, 2)) self.assertRetrievedOutputJson()
def testCsvOutput(self): test_hist = histogram.Histogram('a', 'ms') test_hist.AddSample(3000) self.SerializeIntermediateResults( testing.TestResult( 'benchmark/story', output_artifacts=[ self.CreateHtmlTraceArtifact(), self.CreateDiagnosticsArtifact( benchmarks=['benchmark'], osNames=['linux'], documentationUrls=[['documentation', 'url']]), ], tags=['tbmv2:sampleMetric'], start_time='2009-02-13T23:31:30.987000Z', ), ) processor.main([ '--is-unittest', '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label', ]) sample_rows = self.ReadSampleHistogramsFromCsv() self.assertEqual(len(sample_rows), 1) actual = sample_rows[0] self.assertEqual(actual['name'], SAMPLE_HISTOGRAM_NAME) self.assertEqual(actual['unit'], 'B') self.assertEqual(actual['avg'], '50') self.assertEqual(actual['count'], '2') self.assertEqual(actual['benchmarks'], 'benchmark') self.assertEqual(actual['benchmarkStart'], '2009-02-13 23:31:30') self.assertEqual(actual['displayLabel'], 'label') self.assertEqual(actual['osNames'], 'linux') self.assertEqual(actual['traceStart'], '2009-02-13 23:31:30')
def testConvertTwoStories(self): hist_file = os.path.join(self.output_dir, histograms_output.HISTOGRAM_DICTS_NAME) with open(hist_file, 'w') as f: json.dump([histogram.Histogram('a', 'unitless').AsDict()], f) in_results = testing.IntermediateResults(test_results=[ testing.TestResult( 'benchmark/story1', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), testing.TestResult( 'benchmark/story2', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), testing.TestResult( 'benchmark/story1', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), testing.TestResult( 'benchmark/story2', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), ], ) histogram_dicts = histograms_output.Convert(in_results, results_label='label') out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(histogram_dicts) self.assertEqual(len(out_histograms), 4) hist = out_histograms.GetFirstHistogram() self.assertEqual(hist.name, 'a') self.assertEqual(hist.unit, 'unitless') self.assertEqual(list(hist.diagnostics['labels']), ['label'])
def _TestDiagnosticsInternalOnly(self): hists = [histogram_module.Histogram('hist', 'count')] histograms = histogram_set.HistogramSet(hists) histograms.AddSharedDiagnostic( reserved_infos.MASTERS.name, histogram_module.GenericSet(['master'])) histograms.AddSharedDiagnostic( reserved_infos.BOTS.name, histogram_module.GenericSet(['bot'])) histograms.AddSharedDiagnostic( reserved_infos.CHROMIUM_COMMIT_POSITIONS.name, histogram_module.GenericSet([12345])) histograms.AddSharedDiagnostic( reserved_infos.BENCHMARKS.name, histogram_module.GenericSet(['benchmark'])) self.testapp.post( '/add_histograms', {'data': json.dumps(histograms.AsDicts())}) self.ExecuteTaskQueueTasks('/add_histograms_queue', add_histograms.TASK_QUEUE_NAME)