def exec_runner(self, *args, **kwargs): def fake_timer(): t = fake_timer.value fake_timer.value += 1.0 return t fake_timer.value = 0.0 name = kwargs.pop('name', 'bench') time_func = kwargs.pop('time_func', None) runner = self.create_runner(args, **kwargs) with mock.patch('pyperf.perf_counter', fake_timer): with tests.capture_stdout() as stdout: with tests.capture_stderr() as stderr: if time_func: bench = runner.bench_time_func(name, time_func) else: bench = runner.bench_func(name, check_args, None, 1, 2) stdout = stdout.getvalue() stderr = stderr.getvalue() if '--stdout' not in args: self.assertEqual(stderr, '') # check bench_time_func() bench self.assertIsInstance(bench, pyperf.Benchmark) self.assertEqual(bench.get_name(), name) self.assertEqual(bench.get_nrun(), 1) return Result(runner, bench, stdout)
def test_bench_command(self): args = [sys.executable, '-c', 'pass'] runner = self.create_runner('-l1 -w0 -n1 --worker'.split()) with tests.capture_stdout(): bench = runner.bench_command('bench', args) self.assertEqual(bench.get_metadata()['command'], ' '.join(map(shell_quote, args)))
def check_calibrate_loops(self, runner, time_func, warmups): with tests.capture_stdout(): bench = runner.bench_time_func('bench', time_func) runs = bench.get_runs() self.assertEqual(len(runs), 1) run = runs[0] self.assertEqual(run.warmups, warmups)
def test_cpu_affinity_args(self): runner = self.create_runner(['-v', '--affinity=3,7']) with mock.patch('pyperf._runner.set_cpu_affinity') as mock_setaffinity: with tests.capture_stdout() as stdout: runner._cpu_affinity() self.assertEqual(runner.args.affinity, '3,7') self.assertEqual(stdout.getvalue(), 'Pin process to CPUs: 3,7\n') mock_setaffinity.assert_called_once_with([3, 7])
def test_compare_to(self): def time_func(loops): return 1.0 def abs_executable(python): return python run = pyperf.Run([1.5], metadata={'name': 'name'}, collect_metadata=False) bench = pyperf.Benchmark([run]) suite = pyperf.BenchmarkSuite([bench]) with ExitStack() as cm: def popen(*args, **kw): mock_popen = mock.Mock() mock_popen.wait.return_value = 0 return mock_popen mock_subprocess = cm.enter_context( mock.patch('pyperf._master.subprocess')) mock_subprocess.Popen.side_effect = popen cm.enter_context( mock.patch('pyperf._runner.abs_executable', side_effect=abs_executable)) cm.enter_context( mock.patch('pyperf._master._load_suite_from_pipe', return_value=suite)) args = [ "--python=python3.8", "--compare-to=python3.6", "--min-time=5", "-p1", "-w3", "-n7", "-l11" ] runner = self.create_runner(args) with tests.capture_stdout(): runner.bench_time_func('name', time_func) def popen_call(python): args = [ python, mock.ANY, '--worker', '--pipe', mock.ANY, '--worker-task=0', '--values', '7', '--min-time', '5.0', '--loops', '11', '--warmups', '3' ] kw = {} if MS_WINDOWS: kw['close_fds'] = False else: kw['pass_fds'] = mock.ANY return mock.call(args, env=mock.ANY, **kw) call1 = popen_call('python3.6') call2 = popen_call('python3.8') mock_subprocess.Popen.assert_has_calls([call1, call2])
def test_calibration_zero(self): runner = self.create_runner(['--worker', '--calibrate-loops']) def time_func(loops): return 0 with self.assertRaises(SystemExit): with tests.capture_stdout() as stdout: runner.bench_time_func('bench', time_func) self.assertIn('ERROR: failed to calibrate the number of loops', stdout.getvalue())
def test_duplicated_named(self): def time_func(loops): return 1.0 runner = self.create_runner('-l1 -w0 -n1 --worker'.split()) with tests.capture_stdout(): runner.bench_time_func('optim', time_func) with self.assertRaises(ValueError) as cm: runner.bench_time_func('optim', time_func) self.assertEqual(str(cm.exception), "duplicated benchmark name: 'optim'")
def test_cpu_affinity_isolcpus(self): runner = self.create_runner(['-v']) with mock.patch('pyperf._runner.set_cpu_affinity') as mock_setaffinity: with mock.patch('pyperf._runner.get_isolated_cpus', return_value=[1, 2]): with tests.capture_stdout() as stdout: runner._cpu_affinity() self.assertEqual(runner.args.affinity, '1-2') self.assertEqual(stdout.getvalue(), 'Pin process to isolated CPUs: 1-2\n') mock_setaffinity.assert_called_once_with([1, 2])
def test_json_exists(self): with tempfile.NamedTemporaryFile('wb+') as tmp: with tests.capture_stdout() as stdout: try: self.create_runner( ['--worker', '-l1', '-w1', '--output', tmp.name]) except SystemExit as exc: self.assertEqual(exc.code, 1) self.assertEqual( 'ERROR: The JSON file %r already exists' % tmp.name, stdout.getvalue().rstrip())
def check_two_benchmarks(self, task=None): args = ['--worker', '--loops=1', '-w0', '-n3'] if task is not None: args.append('--worker-task=%s' % task) runner = self.create_runner(args) def time_func(loops): return 1.0 def time_func2(loops): return 2.0 with tests.capture_stdout(): bench1 = runner.bench_time_func('bench1', time_func) bench2 = runner.bench_time_func('bench2', time_func2) return (bench1, bench2)