def testVerifyAllTestsInBenchmarkCsvPassesWithCorrectInput(self):
    tests = {
        'AAAAA1 AUTOGENERATED': {},
        'Android Nexus5 Perf (2)': {
            'scripts': [
                {'name': 'benchmark_name_1'},
                {'name': 'benchmark_name_2'}
            ]
        },
        'Linux Perf': {
            'isolated_scripts': [
                {'name': 'benchmark_name_2.reference'},
                {'name': 'benchmark_name_3'}
            ]
        }
    }
    benchmarks = {
        'benchmark_name_1': BenchmarkMetadata('*****@*****.**'),
        'benchmark_name_2':
            BenchmarkMetadata('darth@deathstar'),
        'benchmark_name_3':
            BenchmarkMetadata('*****@*****.**')
    }

    # Mock out content of unowned_benchmarks.txt and sharding map
    data = {
      "0": {
        "benchmarks": {
            "benchmark_name_2": {}
        }
      }
    }
    with mock.patch('__builtin__.open',
                    mock.mock_open(read_data=json.dumps(data))):
      perf_data_generator.verify_all_tests_in_benchmark_csv(tests, benchmarks)
Beispiel #2
0
    def testVerifyAllTestsInBenchmarkCsvPassesWithCorrectInput(self):
        tests = {
            'AAAAA1 AUTOGENERATED': {},
            'Android Nexus5 Perf (2)': {
                'scripts': [{
                    'name': 'benchmark_name_1'
                }, {
                    'name': 'benchmark_name_2'
                }]
            },
            'Linux Perf': {
                'isolated_scripts': [{
                    'name': 'benchmark_name_2.reference'
                }, {
                    'name': 'benchmark_name_3'
                }]
            }
        }
        benchmarks = {
            'benchmark_name_1': BenchmarkMetadata('*****@*****.**', None, False),
            'benchmark_name_2': BenchmarkMetadata(None, None, False),
            'benchmark_name_3': BenchmarkMetadata('*****@*****.**', None,
                                                  False)
        }

        # Mock out content of unowned_benchmarks.txt
        with mock.patch('__builtin__.open',
                        mock.mock_open(read_data="benchmark_name_2")):
            perf_data_generator.verify_all_tests_in_benchmark_csv(
                tests, benchmarks)
Beispiel #3
0
    def testVerifyAllTestsInBenchmarkCsvPassesWithCorrectInput(self):
        tests = {
            'AAAAA1 AUTOGENERATED': {},
            'Android Nexus5 Perf (2)': {
                'scripts': [{
                    'name': 'benchmark_name_1'
                }, {
                    'name': 'benchmark_name_2'
                }]
            },
            'Linux Perf': {
                'isolated_scripts': [{
                    'name': 'benchmark_name_2.reference'
                }, {
                    'name': 'benchmark_name_3'
                }]
            }
        }
        benchmarks = {
            'benchmark_name_1': BenchmarkMetadata(None, None),
            'benchmark_name_2': BenchmarkMetadata(None, None),
            'benchmark_name_3': BenchmarkMetadata(None, None)
        }

        perf_data_generator.verify_all_tests_in_benchmark_csv(
            tests, benchmarks)
Beispiel #4
0
    def testVerifyAllTestsInBenchmarkCsvFindsFakeTest(self):
        tests = {'Random fake test': {}}
        benchmarks = {'benchmark_name_1': BenchmarkMetadata(None, None, False)}

        with self.assertRaises(AssertionError) as context:
            perf_data_generator.verify_all_tests_in_benchmark_csv(
                tests, benchmarks)
        self.assertTrue('Unknown test' in context.exception.message)
Beispiel #5
0
  def testVerifyAllTestsInBenchmarkCsvCatchesMismatchedTests(self):
    tests = {
        'Android Nexus5 Perf (2)': {
            'scripts': [
                {'name': 'benchmark_name_1'},
                {'name': 'benchmark_name_2'}
            ]
        }
    }
    benchmarks = {
        'benchmark_name_2': BenchmarkMetadata(None, None),
        'benchmark_name_3': BenchmarkMetadata(None, None),
    }

    with self.assertRaises(AssertionError) as context:
      perf_data_generator.verify_all_tests_in_benchmark_csv(tests, benchmarks)
    exception = context.exception.message
    self.assertTrue('Add benchmark_name_1' in exception)
    self.assertTrue('Remove benchmark_name_3' in exception)