def testVerifyAllTestsInBenchmarkCsvPassesWithCorrectInput(self): tests = { 'AAAAA1 AUTOGENERATED': {}, 'Android Nexus5 Perf (2)': { 'scripts': [ {'name': 'benchmark_name_1'}, {'name': 'benchmark_name_2'} ] }, 'Linux Perf': { 'isolated_scripts': [ {'name': 'benchmark_name_2.reference'}, {'name': 'benchmark_name_3'} ] } } benchmarks = { 'benchmark_name_1': BenchmarkMetadata('*****@*****.**'), 'benchmark_name_2': BenchmarkMetadata('darth@deathstar'), 'benchmark_name_3': BenchmarkMetadata('*****@*****.**') } # Mock out content of unowned_benchmarks.txt and sharding map data = { "0": { "benchmarks": { "benchmark_name_2": {} } } } with mock.patch('__builtin__.open', mock.mock_open(read_data=json.dumps(data))): perf_data_generator.verify_all_tests_in_benchmark_csv(tests, benchmarks)
def testVerifyAllTestsInBenchmarkCsvPassesWithCorrectInput(self): tests = { 'AAAAA1 AUTOGENERATED': {}, 'Android Nexus5 Perf (2)': { 'scripts': [{ 'name': 'benchmark_name_1' }, { 'name': 'benchmark_name_2' }] }, 'Linux Perf': { 'isolated_scripts': [{ 'name': 'benchmark_name_2.reference' }, { 'name': 'benchmark_name_3' }] } } benchmarks = { 'benchmark_name_1': BenchmarkMetadata('*****@*****.**', None, False), 'benchmark_name_2': BenchmarkMetadata(None, None, False), 'benchmark_name_3': BenchmarkMetadata('*****@*****.**', None, False) } # Mock out content of unowned_benchmarks.txt with mock.patch('__builtin__.open', mock.mock_open(read_data="benchmark_name_2")): perf_data_generator.verify_all_tests_in_benchmark_csv( tests, benchmarks)
def testVerifyAllTestsInBenchmarkCsvPassesWithCorrectInput(self): tests = { 'AAAAA1 AUTOGENERATED': {}, 'Android Nexus5 Perf (2)': { 'scripts': [{ 'name': 'benchmark_name_1' }, { 'name': 'benchmark_name_2' }] }, 'Linux Perf': { 'isolated_scripts': [{ 'name': 'benchmark_name_2.reference' }, { 'name': 'benchmark_name_3' }] } } benchmarks = { 'benchmark_name_1': BenchmarkMetadata(None, None), 'benchmark_name_2': BenchmarkMetadata(None, None), 'benchmark_name_3': BenchmarkMetadata(None, None) } perf_data_generator.verify_all_tests_in_benchmark_csv( tests, benchmarks)
def test_UnscheduledCppBenchmarks(self): self.get_non_telemetry_benchmarks.return_value = {'honda'} perf_data_generator.GTEST_BENCHMARKS = { 'honda': BenchmarkMetadata('*****@*****.**'), 'toyota': BenchmarkMetadata('*****@*****.**'), } perf_data_generator.OTHER_BENCHMARKS = {} valid = perf_data_generator.is_perf_benchmarks_scheduling_valid( 'dummy', self.test_stream) self.assertEquals(valid, False) self.assertIn('Benchmark toyota is tracked but not scheduled', self.test_stream.getvalue())
def test_UntrackedTelemetryBenchmarks(self): self.get_telemetry_benchmarks.return_value = {'t_bar', 'darth.vader'} self.get_non_telemetry_benchmarks.return_value = {'honda'} perf_data_generator.TELEMETRY_PERF_BENCHMARKS = { 't_bar': BenchmarkMetadata('*****@*****.**'), } perf_data_generator.NON_TELEMETRY_BENCHMARKS = { 'honda': BenchmarkMetadata('*****@*****.**'), } valid = perf_data_generator.is_perf_benchmarks_scheduling_valid( 'dummy', self.test_stream) self.assertEquals(valid, False) self.assertIn('Telemetry benchmark darth.vader no longer exists', self.test_stream.getvalue())
def test_returnTrue(self): self.get_telemetry_benchmarks.return_value = {'t_foo', 't_bar'} self.get_non_telemetry_benchmarks.return_value = {'honda'} perf_data_generator.TELEMETRY_PERF_BENCHMARKS = { 't_foo': BenchmarkMetadata('*****@*****.**'), 't_bar': BenchmarkMetadata('*****@*****.**'), } perf_data_generator.NON_TELEMETRY_BENCHMARKS = { 'honda': BenchmarkMetadata('*****@*****.**'), } valid = perf_data_generator.is_perf_benchmarks_scheduling_valid( 'dummy', self.test_stream) self.assertEquals(valid, True) self.assertEquals(self.test_stream.getvalue(), '')
def testVerifyAllTestsInBenchmarkCsvFindsFakeTest(self): tests = {'Random fake test': {}} benchmarks = {'benchmark_name_1': BenchmarkMetadata(None, None, False)} with self.assertRaises(AssertionError) as context: perf_data_generator.verify_all_tests_in_benchmark_csv( tests, benchmarks) self.assertTrue('Unknown test' in context.exception.message)
def test_UntrackedCppBenchmarks(self): self.get_telemetry_benchmarks.return_value = {'t_bar'} self.get_non_telemetry_benchmarks.return_value = {'honda', 'tesla'} perf_data_generator.TELEMETRY_PERF_BENCHMARKS = { 't_bar': BenchmarkMetadata('*****@*****.**'), } perf_data_generator.NON_TELEMETRY_BENCHMARKS = { 'honda': BenchmarkMetadata('*****@*****.**'), } valid = perf_data_generator.is_perf_benchmarks_scheduling_valid( 'dummy', self.test_stream) self.assertEquals(valid, False) self.assertIn( 'Benchmark tesla is scheduled on perf waterfall but not tracked', self.test_stream.getvalue())
def testVerifyAllTestsInBenchmarkCsvCatchesMismatchedTests(self): tests = { 'Android Nexus5 Perf (2)': { 'scripts': [ {'name': 'benchmark_name_1'}, {'name': 'benchmark_name_2'} ] } } benchmarks = { 'benchmark_name_2': BenchmarkMetadata(None, None), 'benchmark_name_3': BenchmarkMetadata(None, None), } with self.assertRaises(AssertionError) as context: perf_data_generator.verify_all_tests_in_benchmark_csv(tests, benchmarks) exception = context.exception.message self.assertTrue('Add benchmark_name_1' in exception) self.assertTrue('Remove benchmark_name_3' in exception)
def test_UnscheduledTelemetryBenchmarks(self): self.get_telemetry_benchmarks.return_value = {'t_bar'} self.get_non_telemetry_benchmarks.return_value = {'honda'} perf_data_generator.TELEMETRY_PERF_BENCHMARKS = { 'darth.vader': BenchmarkMetadata('*****@*****.**'), 't_bar': BenchmarkMetadata('*****@*****.**'), } perf_data_generator.GTEST_BENCHMARKS = { 'honda': BenchmarkMetadata('*****@*****.**'), } perf_data_generator.OTHER_BENCHMARKS = {} valid = perf_data_generator.is_perf_benchmarks_scheduling_valid( 'dummy', self.test_stream) self.assertEquals(valid, False) self.assertIn( 'Telemetry benchmark darth.vader exists but is not scheduled', self.test_stream.getvalue())
def test_returnTrue(self): self.get_non_telemetry_benchmarks.return_value = {'honda'} perf_data_generator.GTEST_BENCHMARKS = { 'honda': BenchmarkMetadata('*****@*****.**'), } perf_data_generator.OTHER_BENCHMARKS = {} valid = perf_data_generator.is_perf_benchmarks_scheduling_valid( 'dummy', self.test_stream) self.assertEquals(self.test_stream.getvalue(), '') self.assertEquals(valid, True)