def test_json_file(self): loops = 4 with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') args = [sys.executable, '-m', 'perf', 'timeit', '-p', '2', '-n', '3', '-l', str(loops), '--json', filename, '-s', 'import time', SLEEP] proc = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines=True) proc.communicate() self.assertEqual(proc.returncode, 0) bench = perf.Benchmark.load(filename) for run in bench.get_runs(): self.assertEqual(run.loops, 4) runs = bench.get_runs() self.assertEqual(len(runs), 2) for run in runs: self.assertIsInstance(run, perf.Run) raw_samples = run._get_raw_samples(warmups=True) self.assertEqual(len(raw_samples), 4) for raw_sample in raw_samples: ms = (raw_sample / loops) * 1e3 self.assertTrue(MIN_SAMPLE <= ms <= MAX_SAMPLE, ms)
def test_filter_benchmarks(self): values = (1.0, 1.5, 2.0) benchmarks = [] for name in ("call_simple", "go", "telco"): bench = self.create_bench(values, metadata={'name': name}) benchmarks.append(bench) suite = perf.BenchmarkSuite(benchmarks) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') suite.dump(filename) stdout = self.run_command('convert', filename, '--include-benchmark', 'go', '--stdout') suite2 = perf.BenchmarkSuite.loads(stdout) stdout = self.run_command('convert', filename, '--exclude-benchmark', 'go', '--stdout') suite3 = perf.BenchmarkSuite.loads(stdout) self.assertEqual(suite2.get_benchmark_names(), ['go']) self.assertEqual(suite3.get_benchmark_names(), ['call_simple', 'telco'])
def run_timeit_bench(self, args): with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') args += ('--output', filename) stdout = self.run_timeit(args) bench = perf.Benchmark.load(filename) return (bench, stdout)
def test_filter_benchmarks(self): samples = (1.0, 1.5, 2.0) suite = perf.BenchmarkSuite() for name in ("call_simple", "go", "telco"): suite.add_benchmark(self.create_bench(samples, name=name)) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') suite.dump(filename) stdout = self.run_command('convert', filename, '--include-benchmark', 'go', '--stdout') suite2 = perf.BenchmarkSuite.loads(stdout) stdout = self.run_command('convert', filename, '--exclude-benchmark', 'go', '--stdout') suite3 = perf.BenchmarkSuite.loads(stdout) def get_benchmark_names(suite): return [bench.name for bench in suite.get_benchmarks()] self.assertEqual(get_benchmark_names(suite2), ['go']) self.assertEqual(get_benchmark_names(suite3), ['call_simple', 'telco'])
def test_filter_runs(self): runs = (1.0, 2.0, 3.0, 4.0, 5.0) bench = self.create_bench(runs) self.assertEqual(bench.get_samples(), runs) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') bench.dump(filename) stdout = self.run_command('convert', filename, '--include-runs', '4', '--stdout') bench2 = perf.Benchmark.loads(stdout) stdout = self.run_command('convert', filename, '--include-runs', '1-3,5', '--stdout') bench3 = perf.Benchmark.loads(stdout) stdout = self.run_command('convert', filename, '--exclude-runs', '2,4', '--stdout') bench4 = perf.Benchmark.loads(stdout) self.assertEqual(bench2.get_samples(), (4.0,)) self.assertEqual(bench3.get_samples(), (1.0, 2.0, 3.0, 5.0)) self.assertEqual(bench4.get_samples(), (1.0, 3.0, 5.0))
def test_filter_runs(self): runs = (1.0, 2.0, 3.0, 4.0, 5.0) bench = self.create_bench(runs) self.assertEqual(bench.get_values(), runs) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') bench.dump(filename) stdout = self.run_command('convert', filename, '--include-runs', '4', '--stdout') bench2 = perf.Benchmark.loads(stdout) stdout = self.run_command('convert', filename, '--include-runs', '1-3,5', '--stdout') bench3 = perf.Benchmark.loads(stdout) stdout = self.run_command('convert', filename, '--exclude-runs', '2,4', '--stdout') bench4 = perf.Benchmark.loads(stdout) self.assertEqual(bench2.get_values(), (4.0, )) self.assertEqual(bench3.get_values(), (1.0, 2.0, 3.0, 5.0)) self.assertEqual(bench4.get_values(), (1.0, 3.0, 5.0))
def test_output(self): loops = 4 with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') args = [sys.executable, '-m', 'perf', 'timeit', '-p', '2', '-w', '1', '-n', '3', '-l', str(loops), '--min-time', '0.001', '--output', filename, '-s', 'import time', SLEEP] self.run_timeit(args) bench = perf.Benchmark.load(filename) # FIXME: skipped test, since calibration continues during warmup if not perf.python_has_jit(): for run in bench.get_runs(): self.assertEqual(run.get_total_loops(), 4) runs = bench.get_runs() self.assertEqual(len(runs), 2) for run in runs: self.assertIsInstance(run, perf.Run) raw_samples = run._get_raw_samples(warmups=True) self.assertEqual(len(raw_samples), 4) for raw_sample in raw_samples: ms = (raw_sample / loops) * 1e3 self.assertTrue(MIN_SAMPLE <= ms <= MAX_SAMPLE, ms)
def test_json_file(self): with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') result = self.run_text_runner('--worker', '--output', filename) loaded = perf.Benchmark.load(filename) tests.compare_benchmarks(self, loaded, result.bench)
def test_json_file(self): with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') result = self.exec_runner('--worker', '--output', filename) loaded = perf.Benchmark.load(filename) tests.compare_benchmarks(self, loaded, result.bench)
def test_convert(self): bench = perf.Benchmark.load(TELCO) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') self.run_command('convert', TELCO, '-o', filename) bench2 = perf.Benchmark.load(filename) tests.compare_benchmarks(self, bench2, bench)
def compare(self, action, ref_result, changed_result, *args): with tests.temporary_directory() as tmpdir: ref_name = os.path.join(tmpdir, 'ref.json') changed_name = os.path.join(tmpdir, 'changed.json') ref_result.dump(ref_name) changed_result.dump(changed_name) stdout = self.run_command(action, ref_name, changed_name, *args) return stdout
def test_append(self): with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') args = PERF_TIMEIT + ('--append', filename) + FAST_BENCH_ARGS self.run_timeit(args) bench = perf.Benchmark.load(filename) self.assertEqual(bench.get_nvalue(), 1) self.run_timeit(args) bench = perf.Benchmark.load(filename) self.assertEqual(bench.get_nvalue(), 2)
def test_remove_outliers(self): samples = (100.0,) * 100 + (99.0, 101.0) outliers = (90.0, 110.0) bench = self.create_bench(samples + outliers) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') bench.dump(filename) stdout = self.run_command('convert', filename, '--remove-outliers', '--stdout') bench2 = perf.Benchmark.loads(stdout) self.assertEqual(bench2.get_samples(), samples)
def test_remove_warmups(self): raw_samples = [5.0, 1.0, 2.0, 3.0] bench = perf.Benchmark('bench') bench.add_run(perf.Run(1, raw_samples)) self.assertEqual(bench._get_raw_samples(warmups=True), raw_samples) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') bench.dump(filename) stdout = self.run_command('convert', filename, '--remove-warmups', '--stdout') bench2 = perf.Benchmark.loads(stdout) self.assertEqual(bench2._get_raw_samples(warmups=True), raw_samples[1:])
def test_remove_warmups(self): values = [1.0, 2.0, 3.0] raw_values = [5.0] + values run = perf.Run(values, warmups=[(1, 5.0)], metadata={'name': 'bench'}) bench = perf.Benchmark([run]) self.assertEqual(bench._get_nwarmup(), 1) self.assertEqual(bench._get_raw_values(warmups=True), raw_values) with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') bench.dump(filename) stdout = self.run_command('convert', filename, '--remove-warmups', '--stdout') bench2 = perf.Benchmark.loads(stdout) self.assertEqual(bench2._get_nwarmup(), 0) self.assertEqual(bench2._get_raw_values(warmups=True), raw_values[1:])
def test_append(self): with tests.temporary_directory() as tmpdir: filename = os.path.join(tmpdir, 'test.json') args = [sys.executable, '-m', 'perf', 'timeit', '-p', '1', '-n', '1', '-l', '1', '-w', '0', '--append', filename, '-s', 'import time', SLEEP] self.run_timeit(args) bench = perf.Benchmark.load(filename) self.assertEqual(bench.get_nsample(), 1) self.run_timeit(args) bench = perf.Benchmark.load(filename) self.assertEqual(bench.get_nsample(), 2)