def test_filter_runs(self): runs = (1.0, 2.0, 3.0, 4.0, 5.0) bench = self.create_bench(runs) self.assertEqual(bench.get_values(), runs) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') bench.dump(filename) stdout = self.run_command('convert', filename, '--include-runs', '4', '--stdout') bench2 = pyperf.Benchmark.loads(stdout) stdout = self.run_command('convert', filename, '--include-runs', '1-3,5', '--stdout') bench3 = pyperf.Benchmark.loads(stdout) stdout = self.run_command('convert', filename, '--exclude-runs', '2,4', '--stdout') bench4 = pyperf.Benchmark.loads(stdout) self.assertEqual(bench2.get_values(), (4.0, )) self.assertEqual(bench3.get_values(), (1.0, 2.0, 3.0, 5.0)) self.assertEqual(bench4.get_values(), (1.0, 3.0, 5.0))
def test_filter_benchmarks(self): values = (1.0, 1.5, 2.0) benchmarks = [] for name in ("call_simple", "go", "telco"): bench = self.create_bench(values, metadata={'name': name}) benchmarks.append(bench) suite = pyperf.BenchmarkSuite(benchmarks) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') suite.dump(filename) stdout = self.run_command('convert', filename, '--include-benchmark', 'go', '--stdout') suite2 = pyperf.BenchmarkSuite.loads(stdout) stdout = self.run_command('convert', filename, '--exclude-benchmark', 'go', '--stdout') suite3 = pyperf.BenchmarkSuite.loads(stdout) self.assertEqual(suite2.get_benchmark_names(), ['go']) self.assertEqual(suite3.get_benchmark_names(), ['call_simple', 'telco'])
def run_timeit_bench(self, args): with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') args += ('--output', filename) stdout = self.run_timeit(args) bench = pyperf.Benchmark.load(filename) return (bench, stdout)
def test_json_file(self): with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') result = self.exec_runner('--worker', '-l1', '-w1', '--output', filename) loaded = pyperf.Benchmark.load(filename) tests.compare_benchmarks(self, loaded, result.bench)
def test_convert(self): bench = pyperf.Benchmark.load(TELCO) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') self.run_command('convert', TELCO, '-o', filename) bench2 = pyperf.Benchmark.load(filename) tests.compare_benchmarks(self, bench2, bench)
def compare(self, action, ref_result, changed_result, *args): with tests.temporary_directory() as tmpdir: ref_name = os.path.join(tmpdir, 'ref.json') changed_name = os.path.join(tmpdir, 'changed.json') ref_result.dump(ref_name) changed_result.dump(changed_name) stdout = self.run_command(action, ref_name, changed_name, *args) return stdout
def test_append(self): with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') args = PERF_TIMEIT + ('--append', filename) + FAST_BENCH_ARGS self.run_timeit(args) bench = pyperf.Benchmark.load(filename) self.assertEqual(bench.get_nvalue(), 1) self.run_timeit(args) bench = pyperf.Benchmark.load(filename) self.assertEqual(bench.get_nvalue(), 2)
def test_remove_warmups(self): values = [1.0, 2.0, 3.0] raw_values = [5.0] + values run = pyperf.Run(values, warmups=[(1, 5.0)], metadata={'name': 'bench'}) bench = pyperf.Benchmark([run]) self.assertEqual(bench._get_nwarmup(), 1) self.assertEqual(bench._get_raw_values(warmups=True), raw_values) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') bench.dump(filename) stdout = self.run_command('convert', filename, '--remove-warmups', '--stdout') bench2 = pyperf.Benchmark.loads(stdout) self.assertEqual(bench2._get_nwarmup(), 0) self.assertEqual(bench2._get_raw_values(warmups=True), raw_values[1:])