def testRepeatedTestCaseWithArtifacts(self): self.base_dir = 'base' results = self.Convert([ testing.TestResult('benchmark/story1', output_artifacts={ 'logs.txt': testing.Artifact('base/artifacts/logs1.txt') }), testing.TestResult( 'benchmark/story1', output_artifacts={ 'logs.txt': testing.Artifact('base/artifacts/logs2.txt'), 'trace.json': testing.Artifact('base/artifacts/trace2.json') }), ]) test_result = self.FindTestResult(results, 'benchmark', 'story1') self.assertEqual(test_result['actual'], 'PASS') self.assertEqual(test_result['expected'], 'PASS') self.assertEqual( test_result['artifacts'], { 'logs.txt': ['artifacts/logs1.txt', 'artifacts/logs2.txt'], 'trace.json': ['artifacts/trace2.json'] })
def testUploadArtifacts(self): in_results = testing.IntermediateResults(test_results=[ testing.TestResult( 'benchmark/story', output_artifacts={'log': testing.Artifact('/log.log')}, ), testing.TestResult( 'benchmark/story', output_artifacts={ 'trace.html': testing.Artifact('/trace.html'), 'screenshot': testing.Artifact('/screenshot.png'), }, ), ], ) with mock.patch('py_utils.cloud_storage.Insert') as cloud_patch: cloud_patch.return_value = 'gs://url' processor.UploadArtifacts(in_results, 'bucket', None) cloud_patch.assert_has_calls( [ mock.call('bucket', mock.ANY, '/log.log'), mock.call('bucket', mock.ANY, '/trace.html'), mock.call('bucket', mock.ANY, '/screenshot.png'), ], any_order=True, ) for result in in_results['testResults']: for artifact in result['outputArtifacts'].itervalues(): self.assertEqual(artifact['remoteUrl'], 'gs://url')
def testRemoteArtifacts(self): results = self.Convert([ testing.TestResult( 'benchmark/story1', output_artifacts={ 'logs.txt': testing.Artifact( 'base/artifacts/logs1.txt', 'https://example.org/artifacts/logs1.txt') }), testing.TestResult( 'benchmark/story1', output_artifacts={ 'logs.txt': testing.Artifact( 'base/artifacts/logs2.txt', 'https://example.org/artifacts/logs2.txt'), 'trace.json': testing.Artifact( 'base/artifacts/trace2.json', 'https://example.org/artifacts/trace2.json') }), ]) test_result = self.FindTestResult(results, 'benchmark', 'story1') self.assertEqual(test_result['actual'], 'PASS') self.assertEqual(test_result['expected'], 'PASS') self.assertEqual( test_result['artifacts'], { 'logs.txt': [ 'https://example.org/artifacts/logs1.txt', 'https://example.org/artifacts/logs2.txt' ], 'trace.json': ['https://example.org/artifacts/trace2.json'] })
def testComputeTBMv2Metrics(self): in_results = testing.IntermediateResults([ testing.TestResult( 'benchmark/story1', output_artifacts={ compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace1.html', 'gs://trace1.html') }, tags=['tbmv2:metric1'], ), testing.TestResult( 'benchmark/story2', output_artifacts={ compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace2.html', 'gs://trace2.html') }, tags=['tbmv2:metric2'], ), ]) test_dict = histogram.Histogram('a', 'unitless').AsDict() metrics_result = mre_result.MreResult() metrics_result.AddPair('histograms', [test_dict]) with mock.patch(GETSIZE_METHOD) as getsize_mock: with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock: getsize_mock.return_value = 1000 run_metrics_mock.return_value = metrics_result histogram_dicts = compute_metrics.ComputeTBMv2Metrics( in_results) self.assertEqual(histogram_dicts, [test_dict, test_dict]) self.assertEqual(in_results['testResults'][0]['status'], 'PASS') self.assertEqual(in_results['testResults'][1]['status'], 'PASS')
def testUploadArtifacts(self): test_result = testing.TestResult( 'benchmark/story', output_artifacts={ 'logs': testing.Artifact('/log.log'), 'trace.html': testing.Artifact('/trace.html'), 'screenshot': testing.Artifact('/screenshot.png'), }, ) with mock.patch('py_utils.cloud_storage.Upload') as cloud_patch: cloud_patch.return_value = processor.cloud_storage.CloudFilepath( 'bucket', 'path') processor.UploadArtifacts(test_result, 'bucket', 'run1') cloud_patch.assert_has_calls( [ mock.call('bucket', 'run1/benchmark/story/retry_0/logs', '/log.log'), mock.call('bucket', 'run1/benchmark/story/retry_0/trace.html', '/trace.html'), mock.call('bucket', 'run1/benchmark/story/retry_0/screenshot', '/screenshot.png'), ], any_order=True, ) for artifact in test_result['outputArtifacts'].values(): self.assertEqual(artifact['fetchUrl'], 'gs://bucket/path') self.assertEqual( artifact['viewUrl'], 'https://console.developers.google.com' '/m/cloudstorage/b/bucket/o/path')
def testAggregateTraces(self): test_result = testing.TestResult( 'benchmark/story2', output_artifacts={ 'trace/1.json': testing.Artifact( os.path.join('test_run', 'story2', 'trace', '1.json')), 'trace/2.json': testing.Artifact( os.path.join('test_run', 'story2', 'trace', '2.json')), }, ) serialize_method = 'tracing.trace_data.trace_data.SerializeAsHtml' with mock.patch(serialize_method) as mock_serialize: processor.AggregateTraces(test_result) self.assertEqual(mock_serialize.call_count, 1) trace_files, file_path = mock_serialize.call_args[0][:2] self.assertEqual( set(trace_files), set([ os.path.join('test_run', 'story2', 'trace', '1.json'), os.path.join('test_run', 'story2', 'trace', '2.json'), ]), ) self.assertEqual( file_path, os.path.join('test_run', 'story2', 'trace', 'trace.html'), ) artifacts = test_result['outputArtifacts'] self.assertEqual(len(artifacts), 1) self.assertEqual(artifacts.keys()[0], 'trace.html')
def testUploadArtifacts(self): test_result = testing.TestResult( 'benchmark/story', output_artifacts={ 'logs': testing.Artifact('/log.log'), 'trace.html': testing.Artifact('/trace.html'), 'screenshot': testing.Artifact('/screenshot.png'), }, ) with mock.patch('py_utils.cloud_storage.Insert') as cloud_patch: cloud_patch.return_value = 'gs://url' processor.UploadArtifacts(test_result, 'bucket', 'run1') cloud_patch.assert_has_calls( [ mock.call('bucket', 'run1/benchmark/story/logs', '/log.log'), mock.call('bucket', 'run1/benchmark/story/trace.html', '/trace.html'), mock.call('bucket', 'run1/benchmark/story/screenshot', '/screenshot.png'), ], any_order=True, ) for artifact in test_result['outputArtifacts'].itervalues(): self.assertEqual(artifact['remoteUrl'], 'gs://url')
def testJson3OutputWithArtifacts(self): self.SerializeIntermediateResults( testing.TestResult('benchmark/story', output_artifacts={ 'logs': testing.Artifact('/logs.txt', 'gs://logs.txt'), 'trace/telemetry': testing.Artifact('/telemetry.json'), 'trace.html': testing.Artifact('/trace.html', 'gs://trace.html'), }), ) processor.main([ '--output-format', 'json-test-results', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir ]) with open(os.path.join(self.output_dir, json3_output.OUTPUT_FILENAME)) as f: results = json.load(f) self.assertIn('benchmark', results['tests']) self.assertIn('story', results['tests']['benchmark']) self.assertIn('artifacts', results['tests']['benchmark']['story']) artifacts = results['tests']['benchmark']['story']['artifacts'] self.assertEqual(len(artifacts), 2) self.assertEqual(artifacts['logs'], ['gs://logs.txt']) self.assertEqual(artifacts['trace.html'], ['gs://trace.html'])
def CreateMeasurementsArtifact(self, measurements): with tempfile.NamedTemporaryFile(dir=self.intermediate_dir, delete=False, mode='w') as artifact_file: json.dump({'measurements': measurements}, artifact_file) return processor.MEASUREMENTS_NAME, testing.Artifact( artifact_file.name)
def CreateDiagnosticsArtifact(self, **diagnostics): """Create an artifact with diagnostics.""" with tempfile.NamedTemporaryFile(dir=self.intermediate_dir, delete=False, mode='w') as artifact_file: json.dump({'diagnostics': diagnostics}, artifact_file) return processor.DIAGNOSTICS_NAME, testing.Artifact(artifact_file.name)
def testGetTraceUrlLocal(self): test_result = testing.TestResult( 'benchmark/story', output_artifacts={'trace.html': testing.Artifact('trace.html')}, ) url = processor.GetTraceUrl(test_result) self.assertEqual(url, 'file://trace.html')
def testCsvOutputAppendResults(self): hist_file = os.path.join(self.output_dir, histograms_output.HISTOGRAM_DICTS_NAME) with open(hist_file, 'w') as f: json.dump([histogram.Histogram('a', 'unitless').AsDict()], f) self.SerializeIntermediateResults( test_results=[ testing.TestResult( 'benchmark/story', artifacts={'histogram_dicts.json': testing.Artifact(hist_file)}, ), ], ) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label1', ]) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label2', ]) with open(os.path.join(self.output_dir, csv_output.OUTPUT_FILENAME)) as f: lines = [line for line in f] self.assertEqual(len(lines), 3) self.assertIn('label2', lines[1]) self.assertIn('label1', lines[2])
def testHistogramsOutputNoAggregatedTrace(self): json_trace = os.path.join(self.output_dir, 'trace.json') with open(json_trace, 'w') as f: json.dump({'traceEvents': []}, f) self.SerializeIntermediateResults( testing.TestResult( 'benchmark/story', output_artifacts={'trace/json': testing.Artifact(json_trace)}, tags=['tbmv2:sampleMetric'], ), ) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, ]) with open( os.path.join(self.output_dir, histograms_output.OUTPUT_FILENAME)) as f: results = json.load(f) out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(results) # sampleMetric records a histogram with the name 'foo'. hist = out_histograms.GetHistogramNamed('foo') self.assertIsNotNone(hist) self.assertIn('traceUrls', hist.diagnostics)
def CreateProtoTraceArtifact(self): """Create an empty file as a fake proto trace.""" with tempfile.NamedTemporaryFile(dir=self.intermediate_dir, delete=False) as artifact_file: pass return (compute_metrics.CONCATENATED_PROTO_NAME, testing.Artifact(artifact_file.name))
def CreateHtmlTraceArtifact(self): """Create an empty file as a fake html trace.""" with tempfile.NamedTemporaryFile(dir=self.intermediate_dir, delete=False) as artifact_file: pass return (compute_metrics.HTML_TRACE_NAME, testing.Artifact(artifact_file.name))
def testAggregateTraces(self): in_results = testing.IntermediateResults(test_results=[ testing.TestResult( 'benchmark/story1', output_artifacts={ 'trace/1.json': testing.Artifact( os.path.join('test_run', 'story1', 'trace', '1.json')), }, ), testing.TestResult( 'benchmark/story2', output_artifacts={ 'trace/1.json': testing.Artifact( os.path.join('test_run', 'story2', 'trace', '1.json')), 'trace/2.json': testing.Artifact( os.path.join('test_run', 'story2', 'trace', '2.json')), }, ), ], ) with mock.patch( 'tracing.trace_data.trace_data.SerializeAsHtml') as patch: processor.AggregateTraces(in_results) call_list = [list(call[0]) for call in patch.call_args_list] self.assertEqual(len(call_list), 2) for call in call_list: call[0] = set(call[0]) self.assertIn([ set([os.path.join('test_run', 'story1', 'trace', '1.json')]), os.path.join('test_run', 'story1', 'trace', 'trace.html'), ], call_list) self.assertIn([ set([ os.path.join('test_run', 'story2', 'trace', '1.json'), os.path.join('test_run', 'story2', 'trace', '2.json'), ]), os.path.join('test_run', 'story2', 'trace', 'trace.html'), ], call_list) for result in in_results['testResults']: artifacts = result['outputArtifacts'] self.assertEqual(len(artifacts), 1) self.assertEqual(artifacts.keys()[0], 'trace.html')
def CreateHistogramsArtifact(self, hist): """Create an artifact with histograms.""" histogram_dicts = [hist.AsDict()] with tempfile.NamedTemporaryFile(dir=self.intermediate_dir, delete=False) as artifact_file: json.dump(histogram_dicts, artifact_file) return (compute_metrics.HISTOGRAM_DICTS_FILE, testing.Artifact(artifact_file.name))
def testConvertTwoStories(self): hist_file = os.path.join(self.output_dir, histograms_output.HISTOGRAM_DICTS_NAME) with open(hist_file, 'w') as f: json.dump([histogram.Histogram('a', 'unitless').AsDict()], f) in_results = testing.IntermediateResults(test_results=[ testing.TestResult( 'benchmark/story1', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), testing.TestResult( 'benchmark/story2', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), testing.TestResult( 'benchmark/story1', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), testing.TestResult( 'benchmark/story2', artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), ], ) histogram_dicts = histograms_output.Convert(in_results, results_label='label') out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(histogram_dicts) self.assertEqual(len(out_histograms), 4) hist = out_histograms.GetFirstHistogram() self.assertEqual(hist.name, 'a') self.assertEqual(hist.unit, 'unitless') self.assertEqual(list(hist.diagnostics['labels']), ['label'])
def testCsvOutput(self): hist_file = os.path.join(self.output_dir, compute_metrics.HISTOGRAM_DICTS_FILE) test_hist = histogram.Histogram('a', 'ms') test_hist.AddSample(3000) with open(hist_file, 'w') as f: json.dump([test_hist.AsDict()], f) self.SerializeIntermediateResults( test_results=[ testing.TestResult( 'benchmark/story', output_artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), ], diagnostics={ 'benchmarks': ['benchmark'], 'osNames': ['linux'], 'documentationUrls': [['documentation', 'url']], }, ) processor.main([ '--output-format', 'csv', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label', ]) with open(os.path.join(self.output_dir, csv_output.OUTPUT_FILENAME)) as f: lines = [line for line in f] actual = list(zip(*csv.reader(lines))) expected = [('name', 'a'), ('unit', 'ms'), ('avg', '3000'), ('count', '1'), ('max', '3000'), ('min', '3000'), ('std', '0'), ('sum', '3000'), ('architectures', ''), ('benchmarks', 'benchmark'), ('benchmarkStart', ''), ('bots', ''), ('builds', ''), ('deviceIds', ''), ('displayLabel', 'label'), ('masters', ''), ('memoryAmounts', ''), ('osNames', 'linux'), ('osVersions', ''), ('productVersions', ''), ('stories', ''), ('storysetRepeats', ''), ('traceStart', ''), ('traceUrls', '')] self.assertEqual(actual, expected)
def testHistogramsOutput(self): hist_file = os.path.join(self.output_dir, compute_metrics.HISTOGRAM_DICTS_FILE) with open(hist_file, 'w') as f: json.dump([histogram.Histogram('a', 'unitless').AsDict()], f) self.SerializeIntermediateResults( test_results=[ testing.TestResult( 'benchmark/story', output_artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), ], diagnostics={ 'benchmarks': ['benchmark'], 'osNames': ['linux'], 'documentationUrls': [['documentation', 'url']], }, start_time='2009-02-13T23:31:30.987000Z', ) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label', ]) with open( os.path.join(self.output_dir, histograms_output.OUTPUT_FILENAME)) as f: results = json.load(f) out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(results) self.assertEqual(len(out_histograms), 1) self.assertEqual(out_histograms.GetFirstHistogram().name, 'a') self.assertEqual(out_histograms.GetFirstHistogram().unit, 'unitless') diag_values = [list(v) for v in out_histograms.shared_diagnostics] self.assertEqual(len(diag_values), 4) self.assertIn(['benchmark'], diag_values) self.assertIn(['linux'], diag_values) self.assertIn([['documentation', 'url']], diag_values) self.assertIn(['label'], diag_values)
def testHistogramsOutputResetResults(self): hist_file = os.path.join(self.output_dir, compute_metrics.HISTOGRAM_DICTS_FILE) with open(hist_file, 'w') as f: json.dump([histogram.Histogram('a', 'unitless').AsDict()], f) self.SerializeIntermediateResults(test_results=[ testing.TestResult( 'benchmark/story', output_artifacts={ 'histogram_dicts.json': testing.Artifact(hist_file) }, ), ], ) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label1', ]) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, '--results-label', 'label2', '--reset-results', ]) with open( os.path.join(self.output_dir, histograms_output.OUTPUT_FILENAME)) as f: results = json.load(f) out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(results) self.assertEqual(len(out_histograms), 1) diag_values = [list(v) for v in out_histograms.shared_diagnostics] self.assertNotIn(['label1'], diag_values) self.assertIn(['label2'], diag_values)
def testComputeTBMv2MetricsSkipped(self): test_result = testing.TestResult( 'benchmark/story1', output_artifacts={ compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace1.html', 'gs://trace1.html') }, tags=['tbmv2:metric1'], status='SKIP', ) test_result['_histograms'] = histogram_set.HistogramSet() with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock: compute_metrics.ComputeTBMv2Metrics(test_result) self.assertEqual(run_metrics_mock.call_count, 0) histogram_dicts = test_result['_histograms'].AsDicts() self.assertEqual(histogram_dicts, []) self.assertEqual(test_result['status'], 'SKIP')
def testComputeTBMv2MetricsSkipped(self): in_results = testing.IntermediateResults([ testing.TestResult( 'benchmark/story1', output_artifacts={ compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace1.html', 'gs://trace1.html') }, tags=['tbmv2:metric1'], status='SKIP', ), ]) with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock: histogram_dicts = compute_metrics.ComputeTBMv2Metrics(in_results) self.assertEqual(run_metrics_mock.call_count, 0) self.assertEqual(histogram_dicts, []) self.assertEqual(in_results['testResults'][0]['status'], 'SKIP')
def testComputeTBMv2MetricsTraceTooBig(self): test_result = testing.TestResult( 'benchmark/story1', output_artifacts={ compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace1.html', 'gs://trace1.html') }, tags=['tbmv2:metric1'], ) test_result['_histograms'] = histogram_set.HistogramSet() with mock.patch(GETSIZE_METHOD) as getsize_mock: with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock: getsize_mock.return_value = 1e9 compute_metrics.ComputeTBMv2Metrics(test_result) self.assertEqual(run_metrics_mock.call_count, 0) histogram_dicts = test_result['_histograms'].AsDicts() self.assertEqual(histogram_dicts, []) self.assertEqual(test_result['status'], 'FAIL') self.assertFalse(test_result['expected'])
def testComputeTBMv2MetricsTraceTooBig(self): in_results = testing.IntermediateResults([ testing.TestResult( 'benchmark/story1', output_artifacts={ compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace1.html', 'gs://trace1.html') }, tags=['tbmv2:metric1'], ), ]) with mock.patch(GETSIZE_METHOD) as getsize_mock: with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock: getsize_mock.return_value = 1e9 histogram_dicts = compute_metrics.ComputeTBMv2Metrics( in_results) self.assertEqual(run_metrics_mock.call_count, 0) self.assertEqual(histogram_dicts, []) self.assertEqual(in_results['testResults'][0]['status'], 'FAIL')
def testComputeTBMv3Metrics(self): test_result = testing.TestResult( 'benchmark/story1', output_artifacts={ compute_metrics.CONCATENATED_PROTO_NAME: testing.Artifact('/concatenated.pb') }, tags=['tbmv3:metric'], ) test_result['_histograms'] = histogram_set.HistogramSet() metric_result = histogram_set.HistogramSet() metric_result.CreateHistogram('a', 'unitless', [0]) with mock.patch(TRACE_PROCESSOR_METRIC_METHOD) as run_metric_mock: run_metric_mock.return_value = metric_result compute_metrics.ComputeTBMv3Metrics(test_result, '/path/to/tp') histogram_dicts = test_result['_histograms'].AsDicts() self.assertEqual(histogram_dicts, metric_result.AsDicts()) self.assertEqual(test_result['status'], 'PASS')
def testAddDiagnosticsToHistograms(self): start_ts = 1500000000 start_iso = datetime.datetime.utcfromtimestamp( start_ts).isoformat() + 'Z' test_result = testing.TestResult( 'benchmark/story', output_artifacts={ 'trace.html': testing.Artifact('/trace.html', 'gs://trace.html'), }, start_time=start_iso, tags=['story_tag:test'], result_id='3', ) test_result['_histograms'] = histogram_set.HistogramSet() test_result['_histograms'].CreateHistogram('a', 'unitless', [0]) processor.AddDiagnosticsToHistograms(test_result, test_suite_start=start_iso, results_label='label', test_path_format='telemetry') hist = test_result['_histograms'].GetFirstHistogram() self.assertEqual(hist.diagnostics['labels'], generic_set.GenericSet(['label'])) self.assertEqual(hist.diagnostics['benchmarks'], generic_set.GenericSet(['benchmark'])) self.assertEqual(hist.diagnostics['benchmarkStart'], date_range.DateRange(start_ts * 1e3)) self.assertEqual(hist.diagnostics['traceStart'], date_range.DateRange(start_ts * 1e3)) self.assertEqual(hist.diagnostics['stories'], generic_set.GenericSet(['story'])) self.assertEqual(hist.diagnostics['storyTags'], generic_set.GenericSet(['test'])) self.assertEqual(hist.diagnostics['storysetRepeats'], generic_set.GenericSet([3])) self.assertEqual(hist.diagnostics['traceUrls'], generic_set.GenericSet(['gs://trace.html']))
def testHistogramsOutputNoMetricsFromTelemetry(self): trace_file = os.path.join(self.output_dir, compute_metrics.HTML_TRACE_NAME) with open(trace_file, 'w') as f: pass self.SerializeIntermediateResults(test_results=[ testing.TestResult( 'benchmark/story', output_artifacts={ compute_metrics.HTML_TRACE_NAME: testing.Artifact(trace_file, 'gs://trace.html') }, tags=['tbmv2:sampleMetric'], ), ], ) processor.main([ '--output-format', 'histograms', '--output-dir', self.output_dir, '--intermediate-dir', self.intermediate_dir, ]) with open( os.path.join(self.output_dir, histograms_output.OUTPUT_FILENAME)) as f: results = json.load(f) out_histograms = histogram_set.HistogramSet() out_histograms.ImportDicts(results) # sampleMetric records a histogram with the name 'foo'. hist = out_histograms.GetHistogramNamed('foo') self.assertIsNotNone(hist) self.assertEqual(hist.diagnostics['traceUrls'], generic_set.GenericSet(['gs://trace.html']))
def testUploadArtifacts_CheckRemoteUrl(self): in_results = testing.IntermediateResults( test_results=[ testing.TestResult( 'benchmark/story', output_artifacts={ 'trace.html': testing.Artifact('/trace.html') }, ), ], start_time='2019-10-01T12:00:00.123456Z', ) with mock.patch('py_utils.cloud_storage.Insert') as cloud_patch: with mock.patch('random.randint') as randint_patch: randint_patch.return_value = 54321 processor.UploadArtifacts(in_results, 'bucket', 'src@abc + 123') cloud_patch.assert_called_once_with( 'bucket', 'src_abc_123_20191001T120000_54321/benchmark/story/trace.html', '/trace.html')
def testComputeTBMv2MetricsFailure(self): test_result = testing.TestResult( 'benchmark/story1', output_artifacts={ compute_metrics.HTML_TRACE_NAME: testing.Artifact('/trace1.html', 'gs://trace1.html') }, tags=['tbmv2:metric1'], ) test_result['_histograms'] = histogram_set.HistogramSet() metrics_result = mre_result.MreResult() metrics_result.AddFailure(failure.Failure(job.Job(0), 0, 0, 0, 0, 0)) with mock.patch(GETSIZE_METHOD) as getsize_mock: with mock.patch(RUN_METRICS_METHOD) as run_metrics_mock: getsize_mock.return_value = 100 run_metrics_mock.return_value = metrics_result compute_metrics.ComputeTBMv2Metrics(test_result) histogram_dicts = test_result['_histograms'].AsDicts() self.assertEqual(histogram_dicts, []) self.assertEqual(test_result['status'], 'FAIL') self.assertFalse(test_result['expected'])