def test_all_benchmarks_have_unique_names(self): names = [] for benchmark in all_benchmarks.all_benchmarks(include_ignored=False): name = benchmark.name self.assertIsNotNone(name) names.append(name) self.assertLen(set(names), len(names))
def _parameterize_all_benchmarks(): """Creates parameterized test cases for all benchmarks. This is useful so that tests can run on each benchmark individually, instead of having a single test loop over all benchmarks. In this way, issues with multiple benchmarks can be identified in one round of testing, and it is clearer which of the benchmarks need further attention. Returns: A list of tuples (test_case_name, benchmark, use_eager) for all benchmarks and both values of use_eager (True and False). """ parameterized_tuples = [] for index, benchmark in enumerate(all_benchmarks.all_benchmarks()): # The index ensures all test cases have distinct names, even if multiple # benchmarks have the same name. test_case_name = '{index}_{name}'.format(index=index, name=benchmark.name) parameterized_tuples.append((test_case_name, benchmark)) return parameterized_tuples
def test_all_benchmarks_have_reason_if_ignored(self): for benchmark in all_benchmarks.all_benchmarks(include_ignored=True): if benchmark.should_ignore: self.assertIsNotNone(benchmark.ignore_reason)
def test_all_benchmarks_have_description(self): for benchmark in all_benchmarks.all_benchmarks(include_ignored=False): self.assertTrue(benchmark.description)
def test_all_benchmarks_have_source(self): for benchmark in all_benchmarks.all_benchmarks(include_ignored=False): self.assertIsNotNone(benchmark.source)
def test_all_benchmarks_finds_correct_functions(self, include_ignored, expected_names): benchmarks = all_benchmarks.all_benchmarks( modules=[test_benchmarks], include_ignored=include_ignored) benchmark_names = [benchmark.name for benchmark in benchmarks] self.assertCountEqual(benchmark_names, expected_names)
def test_all_stackoverflow_benchmarks_have_unique_source(self): all_sources = set() for benchmark in all_benchmarks.all_benchmarks( modules=[stackoverflow_benchmarks], include_ignored=False): self.assertNotIn(benchmark.source, all_sources) all_sources.add(benchmark.source)
def test_all_benchmarks_have_description(self): for benchmark in all_benchmarks.all_benchmarks(include_ignored=False): if benchmark.name.startswith('autopandas_'): continue # AutoPandas benchmarks don't have descriptions. self.assertTrue(benchmark.description)
def test_all_benchmarks_have_description(self): modules = [google_benchmarks, stackoverflow_benchmarks] for benchmark in all_benchmarks.all_benchmarks(modules=modules): self.assertTrue(benchmark.description)