def testComputeTBMv2Metrics(self): in_results = testing.IntermediateResults([ testing.TestResult( 'benchmark/story1', output_artifacts={ compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace1.html', 'gs://trace1.html') }, tags=['tbmv2:metric1'], ), testing.TestResult( 'benchmark/story2', output_artifacts={ compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace2.html', 'gs://trace2.html') }, tags=['tbmv2:metric2'], ), ]) test_dict = histogram.Histogram('a', 'unitless').AsDict() metrics_result = mre_result.MreResult() metrics_result.AddPair('histograms', [test_dict]) with mock.patch(GETSIZE_METHOD) as getsize_mock: with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock: getsize_mock.return_value = 1000 run_metrics_mock.return_value = metrics_result histogram_dicts = compute_metrics.ComputeTBMv2Metrics( in_results) self.assertEqual(histogram_dicts, [test_dict, test_dict]) self.assertEqual(in_results['testResults'][0]['status'], 'PASS') self.assertEqual(in_results['testResults'][1]['status'], 'PASS')
def testRemoteArtifacts(self): results = self.Convert([ testing.TestResult('benchmark/story1', output_artifacts={ 'logs.txt': testing.Artifact( 'base/artifacts/logs1.txt', fetch_url='gs://artifacts/logs1.txt') }), testing.TestResult( 'benchmark/story1', output_artifacts={ 'logs.txt': testing.Artifact('base/artifacts/logs2.txt', fetch_url='gs://artifacts/logs2.txt'), 'trace.json': testing.Artifact('base/artifacts/trace2.json', fetch_url='gs://artifacts/trace2.json') }), ]) test_result = self.FindTestResult(results, 'benchmark', 'story1') self.assertEqual(test_result['actual'], 'PASS') self.assertEqual(test_result['expected'], 'PASS') self.assertEqual( test_result['artifacts'], { 'logs.txt': ['gs://artifacts/logs1.txt', 'gs://artifacts/logs2.txt'], 'trace.json': ['gs://artifacts/trace2.json'] })
def testFaliedAndSippedTestCases(self): results = self.Convert([ testing.TestResult('benchmark/story1', status='PASS'), testing.TestResult('benchmark/story2', status='PASS'), testing.TestResult('benchmark/story1', status='FAIL'), testing.TestResult('benchmark/story2', status='SKIP', expected=False), ]) test_result = self.FindTestResult(results, 'benchmark', 'story1') self.assertEqual(test_result['actual'], 'PASS FAIL') self.assertEqual(test_result['expected'], 'PASS') self.assertTrue(test_result['is_unexpected']) test_result = self.FindTestResult(results, 'benchmark', 'story2') self.assertEqual(test_result['actual'], 'PASS SKIP') self.assertEqual(test_result['expected'], 'PASS') self.assertTrue(test_result['is_unexpected']) self.assertEqual(results['num_failures_by_type'], { 'PASS': 2, 'SKIP': 1, 'FAIL': 1 })
def testRepeatedTestCases(self): results = self.Convert([ testing.TestResult('benchmark/story1', status='PASS', run_duration='1.2s'), testing.TestResult('benchmark/story2', status='SKIP'), testing.TestResult('benchmark/story1', status='PASS', run_duration='3.4s'), testing.TestResult('benchmark/story2', status='SKIP'), ]) test_result = self.FindTestResult(results, 'benchmark', 'story1') self.assertEqual(test_result['actual'], 'PASS') self.assertEqual(test_result['expected'], 'PASS') self.assertEqual(test_result['times'], [1.2, 3.4]) self.assertEqual(test_result['time'], 1.2) test_result = self.FindTestResult(results, 'benchmark', 'story2') self.assertEqual(test_result['actual'], 'SKIP') self.assertEqual(test_result['expected'], 'SKIP') self.assertEqual(results['num_failures_by_type'], { 'PASS': 2, 'SKIP': 2 })
def testJson3Output(self): self.SerializeIntermediateResults( testing.TestResult('benchmark/story', run_duration='1.1s', tags=['shard:7'], start_time='2009-02-13T23:31:30.987000Z'), testing.TestResult('benchmark/story', run_duration='1.2s', tags=['shard:7']), ) processor.main([ '--output-format', 'json-test-results', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir ]) with open(os.path.join(self.output_dir, json3_output.OUTPUT_FILENAME)) as f: results = json.load(f) self.assertFalse(results['interrupted']) self.assertEqual(results['num_failures_by_type'], {'PASS': 2}) self.assertEqual(results['seconds_since_epoch'], 1234567890.987) self.assertEqual(results['version'], 3) self.assertIn('benchmark', results['tests']) self.assertIn('story', results['tests']['benchmark']) test_result = results['tests']['benchmark']['story'] self.assertEqual(test_result['actual'], 'PASS') self.assertEqual(test_result['expected'], 'PASS') self.assertEqual(test_result['times'], [1.1, 1.2]) self.assertEqual(test_result['time'], 1.1) self.assertEqual(test_result['shard'], 7)
def testMaxValuesPerTestCase(self): def SomeMeasurements(num): return self.CreateMeasurementsArtifact({ 'n%d' % i: {'unit': 'count', 'samples': [i]} for i in range(num)}) self.SerializeIntermediateResults( testing.TestResult( 'benchmark/story1', status='PASS', output_artifacts=[SomeMeasurements(3)]), testing.TestResult( 'benchmark/story2', status='PASS', output_artifacts=[SomeMeasurements(7)]), ) exit_code = processor.main([ '--output-format', 'json-test-results', '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--max-values-per-test-case', '5' ]) self.assertEqual(exit_code, 1) with open(os.path.join( self.output_dir, json3_output.OUTPUT_FILENAME)) as f: results = json.load(f) self.assertEqual(results['tests']['benchmark']['story1']['actual'], 'PASS') self.assertEqual(results['tests']['benchmark']['story2']['actual'], 'FAIL') self.assertTrue(results['tests']['benchmark']['story2']['is_unexpected'])
def testUploadArtifacts(self): in_results = testing.IntermediateResults(test_results=[ testing.TestResult( 'benchmark/story', output_artifacts={'log': testing.Artifact('/log.log')}, ), testing.TestResult( 'benchmark/story', output_artifacts={ 'trace.html': testing.Artifact('/trace.html'), 'screenshot': testing.Artifact('/screenshot.png'), }, ), ], ) with mock.patch('py_utils.cloud_storage.Insert') as cloud_patch: cloud_patch.return_value = 'gs://url' processor.UploadArtifacts(in_results, 'bucket', None) cloud_patch.assert_has_calls( [ mock.call('bucket', mock.ANY, '/log.log'), mock.call('bucket', mock.ANY, '/trace.html'), mock.call('bucket', mock.ANY, '/screenshot.png'), ], any_order=True, ) for result in in_results['testResults']: for artifact in result['outputArtifacts'].itervalues(): self.assertEqual(artifact['remoteUrl'], 'gs://url')
def testRepeatedTestCaseWithArtifacts(self): self.base_dir = 'base' results = self.Convert([ testing.TestResult('benchmark/story1', output_artifacts={ 'logs.txt': testing.Artifact('base/artifacts/logs1.txt') }), testing.TestResult( 'benchmark/story1', output_artifacts={ 'logs.txt': testing.Artifact('base/artifacts/logs2.txt'), 'trace.json': testing.Artifact('base/artifacts/trace2.json') }), ]) test_result = self.FindTestResult(results, 'benchmark', 'story1') self.assertEqual(test_result['actual'], 'PASS') self.assertEqual(test_result['expected'], 'PASS') self.assertEqual( test_result['artifacts'], { 'logs.txt': ['artifacts/logs1.txt', 'artifacts/logs2.txt'], 'trace.json': ['artifacts/trace2.json'] })
def testExitCodeSomeSkipped(self): self.SerializeIntermediateResults( testing.TestResult('benchmark/story', status='SKIP'), testing.TestResult('benchmark/story', status='PASS'), ) exit_code = processor.main([ '--output-format', 'json-test-results', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir ]) self.assertEqual(exit_code, 0)
def testExitCodeHasFailures(self): self.SerializeIntermediateResults([ testing.TestResult('benchmark/story', status='PASS'), testing.TestResult('benchmark/story', status='FAIL'), ]) exit_code = processor.main([ '--output-format', 'json-test-results', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir ]) self.assertEqual(exit_code, 1)
def testGetTraceUrlLocal(self): test_result = testing.TestResult( 'benchmark/story', output_artifacts={'trace.html': testing.Artifact('trace.html')}, ) url = processor.GetTraceUrl(test_result) self.assertEqual(url, 'file://trace.html')
def testUrlAsStoryName(self): results = self.Convert( [testing.TestResult('benchmark/http%3A%2F%2Fexample.com')]) test_result = self.FindTestResult(results, 'benchmark', 'http://example.com') self.assertEqual(test_result['actual'], 'PASS')
def testUploadArtifacts(self): test_result = testing.TestResult( 'benchmark/story', output_artifacts={ 'logs': testing.Artifact('/log.log'), 'trace.html': testing.Artifact('/trace.html'), 'screenshot': testing.Artifact('/screenshot.png'), }, ) with mock.patch('py_utils.cloud_storage.Insert') as cloud_patch: cloud_patch.return_value = 'gs://url' processor.UploadArtifacts(test_result, 'bucket', 'run1') cloud_patch.assert_has_calls( [ mock.call('bucket', 'run1/benchmark/story/logs', '/log.log'), mock.call('bucket', 'run1/benchmark/story/trace.html', '/trace.html'), mock.call('bucket', 'run1/benchmark/story/screenshot', '/screenshot.png'), ], any_order=True, ) for artifact in test_result['outputArtifacts'].itervalues(): self.assertEqual(artifact['remoteUrl'], 'gs://url')
def testAggregateTraces(self): test_result = testing.TestResult( 'benchmark/story2', output_artifacts={ 'trace/1.json': testing.Artifact( os.path.join('test_run', 'story2', 'trace', '1.json')), 'trace/2.json': testing.Artifact( os.path.join('test_run', 'story2', 'trace', '2.json')), }, ) serialize_method = 'tracing.trace_data.trace_data.SerializeAsHtml' with mock.patch(serialize_method) as mock_serialize: processor.AggregateTraces(test_result) self.assertEqual(mock_serialize.call_count, 1) trace_files, file_path = mock_serialize.call_args[0][:2] self.assertEqual( set(trace_files), set([ os.path.join('test_run', 'story2', 'trace', '1.json'), os.path.join('test_run', 'story2', 'trace', '2.json'), ]), ) self.assertEqual( file_path, os.path.join('test_run', 'story2', 'trace', 'trace.html'), ) artifacts = test_result['outputArtifacts'] self.assertEqual(len(artifacts), 1) self.assertEqual(artifacts.keys()[0], 'trace.html')
def testJson3OutputWithArtifacts(self): self.SerializeIntermediateResults( testing.TestResult('benchmark/story', output_artifacts={ 'logs': testing.Artifact('/logs.txt', 'gs://logs.txt'), 'trace/telemetry': testing.Artifact('/telemetry.json'), 'trace.html': testing.Artifact('/trace.html', 'gs://trace.html'), }), ) processor.main([ '--output-format', 'json-test-results', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir ]) with open(os.path.join(self.output_dir, json3_output.OUTPUT_FILENAME)) as f: results = json.load(f) self.assertIn('benchmark', results['tests']) self.assertIn('story', results['tests']['benchmark']) self.assertIn('artifacts', results['tests']['benchmark']['story']) artifacts = results['tests']['benchmark']['story']['artifacts'] self.assertEqual(len(artifacts), 2) self.assertEqual(artifacts['logs'], ['gs://logs.txt']) self.assertEqual(artifacts['trace.html'], ['gs://trace.html'])
def testHistogramsOutputMeasurements(self): measurements = { 'a': {'unit': 'ms', 'samples': [4, 6], 'description': 'desc_a'}, 'b': {'unit': 'ms', 'samples': [5], 'description': 'desc_b'}, } start_ts = 1500000000 start_iso = datetime.datetime.utcfromtimestamp(start_ts).isoformat() + 'Z' self.SerializeIntermediateResults( testing.TestResult( 'benchmark/story', output_artifacts=[ self.CreateMeasurementsArtifact(measurements), ], tags=['story_tag:test'], start_time=start_iso, ), ) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, ]) with open(os.path.join( self.output_dir, histograms_output.OUTPUT_FILENAME)) as f: results = json.load(f) out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(results) self.assertEqual(len(out_histograms), 2) hist = out_histograms.GetHistogramNamed('a') self.assertEqual(hist.name, 'a') self.assertEqual(hist.unit, 'ms_smallerIsBetter') self.assertEqual(hist.sample_values, [4, 6]) self.assertEqual(hist.description, 'desc_a') self.assertEqual(hist.diagnostics['benchmarks'], generic_set.GenericSet(['benchmark'])) self.assertEqual(hist.diagnostics['stories'], generic_set.GenericSet(['story'])) self.assertEqual(hist.diagnostics['storyTags'], generic_set.GenericSet(['test'])) self.assertEqual(hist.diagnostics['benchmarkStart'], date_range.DateRange(start_ts * 1e3)) hist = out_histograms.GetHistogramNamed('b') self.assertEqual(hist.name, 'b') self.assertEqual(hist.unit, 'ms_smallerIsBetter') self.assertEqual(hist.sample_values, [5]) self.assertEqual(hist.description, 'desc_b') self.assertEqual(hist.diagnostics['benchmarks'], generic_set.GenericSet(['benchmark'])) self.assertEqual(hist.diagnostics['stories'], generic_set.GenericSet(['story'])) self.assertEqual(hist.diagnostics['storyTags'], generic_set.GenericSet(['test'])) self.assertEqual(hist.diagnostics['benchmarkStart'], date_range.DateRange(start_ts * 1e3))
def testCsvOutputAppendResults(self): self.SerializeIntermediateResults( testing.TestResult( 'benchmark/story', output_artifacts=[ self.CreateHtmlTraceArtifact(), ], tags=['tbmv2:sampleMetric'], ), ) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label1', ]) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label2', ]) sample_rows = self.ReadSampleHistogramsFromCsv() self.assertEqual(len(sample_rows), 2) self.assertEqual(sample_rows[0]['displayLabel'], 'label2') self.assertEqual(sample_rows[1]['displayLabel'], 'label1')
def testCsvOutputAppendResults(self): hist_file = os.path.join(self.output_dir, histograms_output.HISTOGRAM_DICTS_NAME) with open(hist_file, 'w') as f: json.dump([histogram.Histogram('a', 'unitless').AsDict()], f) self.SerializeIntermediateResults( test_results=[ testing.TestResult( 'benchmark/story', artifacts={'histogram_dicts.json': testing.Artifact(hist_file)}, ), ], ) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label1', ]) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label2', ]) with open(os.path.join(self.output_dir, csv_output.OUTPUT_FILENAME)) as f: lines = [line for line in f] self.assertEqual(len(lines), 3) self.assertIn('label2', lines[1]) self.assertIn('label1', lines[2])
def testUploadArtifacts(self): test_result = testing.TestResult( 'benchmark/story', output_artifacts={ 'logs': testing.Artifact('/log.log'), 'trace.html': testing.Artifact('/trace.html'), 'screenshot': testing.Artifact('/screenshot.png'), }, ) with mock.patch('py_utils.cloud_storage.Upload') as cloud_patch: cloud_patch.return_value = processor.cloud_storage.CloudFilepath( 'bucket', 'path') processor.UploadArtifacts(test_result, 'bucket', 'run1') cloud_patch.assert_has_calls( [ mock.call('bucket', 'run1/benchmark/story/retry_0/logs', '/log.log'), mock.call('bucket', 'run1/benchmark/story/retry_0/trace.html', '/trace.html'), mock.call('bucket', 'run1/benchmark/story/retry_0/screenshot', '/screenshot.png'), ], any_order=True, ) for artifact in test_result['outputArtifacts'].values(): self.assertEqual(artifact['fetchUrl'], 'gs://bucket/path') self.assertEqual( artifact['viewUrl'], 'https://console.developers.google.com' '/m/cloudstorage/b/bucket/o/path')
def testHistogramsOutputNoAggregatedTrace(self): json_trace = os.path.join(self.output_dir, 'trace.json') with open(json_trace, 'w') as f: json.dump({'traceEvents': []}, f) self.SerializeIntermediateResults( testing.TestResult( 'benchmark/story', output_artifacts={'trace/json': testing.Artifact(json_trace)}, tags=['tbmv2:sampleMetric'], ), ) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, ]) with open( os.path.join(self.output_dir, histograms_output.OUTPUT_FILENAME)) as f: results = json.load(f) out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(results) # sampleMetric records a histogram with the name 'foo'. hist = out_histograms.GetHistogramNamed('foo') self.assertIsNotNone(hist) self.assertIn('traceUrls', hist.diagnostics)
def testAggregateTraces(self): in_results = testing.IntermediateResults(test_results=[ testing.TestResult( 'benchmark/story1', output_artifacts={ 'trace/1.json': testing.Artifact( os.path.join('test_run', 'story1', 'trace', '1.json')), }, ), testing.TestResult( 'benchmark/story2', output_artifacts={ 'trace/1.json': testing.Artifact( os.path.join('test_run', 'story2', 'trace', '1.json')), 'trace/2.json': testing.Artifact( os.path.join('test_run', 'story2', 'trace', '2.json')), }, ), ], ) with mock.patch( 'tracing.trace_data.trace_data.SerializeAsHtml') as patch: processor.AggregateTraces(in_results) call_list = [list(call[0]) for call in patch.call_args_list] self.assertEqual(len(call_list), 2) for call in call_list: call[0] = set(call[0]) self.assertIn([ set([os.path.join('test_run', 'story1', 'trace', '1.json')]), os.path.join('test_run', 'story1', 'trace', 'trace.html'), ], call_list) self.assertIn([ set([ os.path.join('test_run', 'story2', 'trace', '1.json'), os.path.join('test_run', 'story2', 'trace', '2.json'), ]), os.path.join('test_run', 'story2', 'trace', 'trace.html'), ], call_list) for result in in_results['testResults']: artifacts = result['outputArtifacts'] self.assertEqual(len(artifacts), 1) self.assertEqual(artifacts.keys()[0], 'trace.html')
def testAmortizeProcessingDuration_UndefinedDuration(self): test_results = [testing.TestResult('benchmark/story')] del test_results[0]['runDuration'] # pylint: disable=protected-access processor._AmortizeProcessingDuration(1.0, test_results) # pylint: enable=protected-access self.assertNotIn('runDuration', test_results[0]) self.assertEqual(len(test_results), 1)
def testTwoTestCases(self): results = self.Convert([ testing.TestResult('benchmark/story1', tags=['shard:7']), testing.TestResult('benchmark/story2', tags=['shard:3']) ]) test_result = self.FindTestResult(results, 'benchmark', 'story1') self.assertEqual(test_result['actual'], 'PASS') self.assertEqual(test_result['expected'], 'PASS') self.assertEqual(test_result['shard'], 7) test_result = self.FindTestResult(results, 'benchmark', 'story2') self.assertEqual(test_result['actual'], 'PASS') self.assertEqual(test_result['expected'], 'PASS') self.assertEqual(test_result['shard'], 3) self.assertEqual(results['num_failures_by_type'], {'PASS': 2})
def testHistogramsOutput(self): self.SerializeIntermediateResults( testing.TestResult( 'benchmark/story', output_artifacts=[ self.CreateHtmlTraceArtifact(), self.CreateDiagnosticsArtifact( benchmarks=['benchmark'], osNames=['linux'], documentationUrls=[['documentation', 'url']]) ], tags=['tbmv2:sampleMetric'], start_time='2009-02-13T23:31:30.987000Z', ), ) with mock.patch('py_utils.cloud_storage.Upload') as cloud_patch: cloud_patch.return_value = processor.cloud_storage.CloudFilepath( bucket='bucket', remote_path='trace.html') processor.main([ '--is-unittest', '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label', '--upload-results', ]) with open( os.path.join(self.output_dir, histograms_output.OUTPUT_FILENAME)) as f: results = json.load(f) out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(results) hist = out_histograms.GetHistogramNamed(SAMPLE_HISTOGRAM_NAME) self.assertEqual(hist.unit, SAMPLE_HISTOGRAM_UNIT) self.assertEqual(hist.diagnostics['benchmarks'], generic_set.GenericSet(['benchmark'])) self.assertEqual(hist.diagnostics['osNames'], generic_set.GenericSet(['linux'])) self.assertEqual(hist.diagnostics['documentationUrls'], generic_set.GenericSet([['documentation', 'url']])) self.assertEqual(hist.diagnostics['labels'], generic_set.GenericSet(['label'])) self.assertEqual(hist.diagnostics['benchmarkStart'], date_range.DateRange(1234567890987)) self.assertEqual( hist.diagnostics['traceUrls'], generic_set.GenericSet([ 'https://console.developers.google.com' '/m/cloudstorage/b/bucket/o/trace.html' ]))
def testAmortizeProcessingDuration_OneResult(self): test_results = [ testing.TestResult('benchmark/story', run_duration='1.0s') ] # pylint: disable=protected-access processor._AmortizeProcessingDuration(1.0, test_results) # pylint: enable=protected-access self.assertEqual(str(test_results[0]['runDuration']), '2.0s') self.assertEqual(len(test_results), 1)
def testStartTime(self): results = self.Convert([ testing.TestResult('benchmark/story', start_time='2009-02-13T23:31:30.987000Z') ]) self.assertFalse(results['interrupted']) self.assertEqual(results['path_delimiter'], '/') self.assertEqual(results['seconds_since_epoch'], 1234567890.987) self.assertEqual(results['version'], 3)
def testConvertTwoStories(self): hist_file = os.path.join(self.output_dir, histograms_output.HISTOGRAM_DICTS_NAME) with open(hist_file, 'w') as f: json.dump([histogram.Histogram('a', 'unitless').AsDict()], f) in_results = testing.IntermediateResults(test_results=[ testing.TestResult( 'benchmark/story1', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), testing.TestResult( 'benchmark/story2', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), testing.TestResult( 'benchmark/story1', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), testing.TestResult( 'benchmark/story2', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), ], ) histogram_dicts = histograms_output.Convert(in_results, results_label='label') out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(histogram_dicts) self.assertEqual(len(out_histograms), 4) hist = out_histograms.GetFirstHistogram() self.assertEqual(hist.name, 'a') self.assertEqual(hist.unit, 'unitless') self.assertEqual(list(hist.diagnostics['labels']), ['label'])
def testSingleTestCase(self): results = self.Convert( [testing.TestResult('benchmark/story', run_duration='1.2s')]) test_result = self.FindTestResult(results, 'benchmark', 'story') self.assertEqual(test_result['actual'], 'PASS') self.assertEqual(test_result['expected'], 'PASS') self.assertEqual(test_result['times'], [1.2]) self.assertEqual(test_result['time'], 1.2) self.assertNotIn('shard', test_result) self.assertEqual(results['num_failures_by_type'], {'PASS': 1})
def testDedupedStatus(self): results = self.Convert([ testing.TestResult('benchmark/story1', status='PASS'), testing.TestResult('benchmark/story2', status='SKIP'), testing.TestResult('benchmark/story3', status='FAIL'), testing.TestResult('benchmark/story1', status='PASS'), testing.TestResult('benchmark/story2', status='SKIP'), testing.TestResult('benchmark/story3', status='FAIL'), ]) test_result = self.FindTestResult(results, 'benchmark', 'story1') self.assertEqual(test_result['actual'], 'PASS') self.assertEqual(test_result['expected'], 'PASS') self.assertFalse(test_result['is_unexpected']) test_result = self.FindTestResult(results, 'benchmark', 'story2') self.assertEqual(test_result['actual'], 'SKIP') self.assertEqual(test_result['expected'], 'SKIP') self.assertFalse(test_result['is_unexpected']) test_result = self.FindTestResult(results, 'benchmark', 'story3') self.assertEqual(test_result['actual'], 'FAIL FAIL') self.assertEqual(test_result['expected'], 'PASS') self.assertTrue(test_result['is_unexpected']) self.assertEqual(results['num_failures_by_type'], { 'PASS': 2, 'SKIP': 2, 'FAIL': 2 })
def testCsvOutput(self): hist_file = os.path.join(self.output_dir, compute_metrics.HISTOGRAM_DICTS_FILE) test_hist = histogram.Histogram('a', 'ms') test_hist.AddSample(3000) with open(hist_file, 'w') as f: json.dump([test_hist.AsDict()], f) self.SerializeIntermediateResults( test_results=[ testing.TestResult( 'benchmark/story', output_artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), ], diagnostics={ 'benchmarks': ['benchmark'], 'osNames': ['linux'], 'documentationUrls': [['documentation', 'url']], }, ) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label', ]) with open(os.path.join(self.output_dir, csv_output.OUTPUT_FILENAME)) as f: lines = [line for line in f] actual = list(zip(*csv.reader(lines))) expected = [('name', 'a'), ('unit', 'ms'), ('avg', '3000'), ('count', '1'), ('max', '3000'), ('min', '3000'), ('std', '0'), ('sum', '3000'), ('architectures', ''), ('benchmarks', 'benchmark'), ('benchmarkStart', ''), ('bots', ''), ('builds', ''), ('deviceIds', ''), ('displayLabel', 'label'), ('masters', ''), ('memoryAmounts', ''), ('osNames', 'linux'), ('osVersions', ''), ('productVersions', ''), ('stories', ''), ('storysetRepeats', ''), ('traceStart', ''), ('traceUrls', '')] self.assertEqual(actual, expected)