def test_compare_to_quiet(self): args = PERF_TIMEIT + ('--compare-to', sys.executable, '--quiet') args += COMPARE_BENCH cmd = tests.get_output(args) expected = r'(?:Mean \+- std dev: .* -> .*: (?:[0-9]+\.[0-9][0-9]x (?:faster|slower)|no change)|Not significant!)' self.assertRegex(cmd.stdout, expected)
def test_worker_verbose(self): args = ('--worker', '-w', '1', '-n', '2', '-l', '1', '--min-time', '0.001', '--metadata', '-v', '-s', 'import time', SLEEP) args = PERF_TIMEIT + args cmd = tests.get_output(args) self.assertEqual(cmd.returncode, 0) self.assertEqual(cmd.stderr, '') match = re.search( r'Warmup 1: ([0-9.]+) ms \(1 loop: [0-9.]+ ms\)\n' r'\n' r'Value 1: ([0-9.]+) ms\n' r'Value 2: ([0-9.]+) ms\n' r'\n' r'Metadata:\n' r'(- .*\n)+' r'\n' r'Mean \+- std dev: (?P<mean>[0-9.]+) ms \+-' ' (?P<mad>[0-9.]+) ms\n' r'$', cmd.stdout) self.assertIsNotNone(match, repr(cmd.stdout)) values = [float(match.group(i)) for i in range(1, 4)] for value in values: self.assertTrue(MIN_VALUE <= value <= MAX_VALUE, repr(value)) mean = float(match.group('mean')) self.assertTrue(MIN_MEAN <= mean <= MAX_MEAN, mean) mad = float(match.group('mad')) self.assertLessEqual(mad, MAX_STD_DEV)
def test_compare_to_verbose(self): args = PERF_TIMEIT + ('--compare-to', sys.executable, '--verbose') args += COMPARE_BENCH cmd = tests.get_output(args) expected = textwrap.dedent(r''' Benchmark .* ==========+ .* Mean \+- std dev: .* Benchmark .* ==========+ .* Mean \+- std dev: .* Compare ======= Mean \+- std dev: .* -> .*: (?:[0-9]+\.[0-9][0-9]x (?:faster|slower)|no change) ''').strip() expected = re.compile(expected, flags=re.DOTALL) self.assertRegex(cmd.stdout, expected)
def test_cli_snippet_error(self): args = PERF_TIMEIT + ('x+1', ) cmd = tests.get_output(args) self.assertEqual(cmd.returncode, 1) self.assertIn('Traceback (most recent call last):', cmd.stderr) self.assertIn("NameError", cmd.stderr)
def run_command(self, *args, **kwargs): cmd = [sys.executable, '-m', 'perf'] cmd.extend(args) proc = tests.get_output(cmd, **kwargs) self.assertEqual(proc.stderr, '') self.assertEqual(proc.returncode, 0) return proc.stdout
def check_command(self, script, args, nproc=3): self.TESTED.add(script) script = os.path.join(EXAMPLES_DIR, script) cmd = [sys.executable] + [script] + args + ["--inherit-env=PYTHONPATH"] proc = tests.get_output(cmd) self.assertRegex( proc.stdout, r'Mean \+- std dev: [0-9.]+ [mun]s ' r'\+- [0-9.]+ [mun]s\n') self.assertEqual(proc.returncode, 0)
def check_command(self, script, args, nproc=3): self.TESTED.add(script) script = os.path.join(EXAMPLES_DIR, script) cmd = [sys.executable] + [script] + args + ["--inherit-env=PYTHONPATH"] proc = tests.get_output(cmd) self.assertRegex(proc.stdout, r'Mean \+- std dev: [0-9.]+ [mun]s ' r'\+- [0-9.]+ [mun]s\n') self.assertEqual(proc.returncode, 0)
def test_show(self): args = [sys.executable, '-m', 'perf', 'system', 'show'] proc = get_output(args) regex = ('(Run "%s -m perf system tune" to tune the system configuration to run benchmarks' '|OK! System ready for benchmarking' '|WARNING: no operation available for your platform)' % os.path.basename(sys.executable)) self.assertRegex(proc.stdout, regex, msg=proc) # The return code is either 0 if the system is tuned or 2 if the # system isn't self.assertIn(proc.returncode, (0, 2), msg=proc)
def test_compare_to(self): args = ('--compare-to', sys.executable, '--python-names=ref:changed') args = PERF_TIMEIT + args + COMPARE_BENCH cmd = tests.get_output(args) # ".*" and DOTALL ignore stability warnings expected = textwrap.dedent(r''' ref: \. [0-9.]+ (?:ms|us) \+- [0-9.]+ (?:ms|us).* changed: \. [0-9.]+ (?:ms|us) \+- [0-9.]+ (?:ms|us).* Mean \+- std dev: \[ref\] .* -> \[changed\] .*: (?:[0-9]+\.[0-9][0-9]x (?:faster|slower)|no change) ''').strip() expected = re.compile(expected, flags=re.DOTALL) self.assertRegex(cmd.stdout, expected)
def test_show(self): args = [sys.executable, '-m', 'perf', 'system', 'show'] proc = get_output(args) regex = ( '(Run "%s -m perf system tune" to tune the system configuration to run benchmarks' '|OK! System ready for benchmarking' '|WARNING: no operation available for your platform)' % os.path.basename(sys.executable)) self.assertRegex(proc.stdout, regex, msg=proc) # The return code is either 0 if the system is tuned or 2 if the # system isn't self.assertIn(proc.returncode, (0, 2), msg=proc)
def test_cli(self): args = ('-p', '2', '-w', '1', '-n', '3', '-l', '4', '--min-time', '0.001', '-s', 'import time', SLEEP) args = PERF_TIMEIT + args cmd = tests.get_output(args) self.assertEqual(cmd.returncode, 0) self.assertEqual(cmd.stderr, '') # ignore lines before to ignore random warnings like # "ERROR: the benchmark is very unstable" match = re.search( r'Mean \+- std dev: (?P<mean>[0-9.]+) ms' r' \+- (?P<mad>[0-9.]+) ms' r'$', cmd.stdout.rstrip()) self.assertIsNotNone(match, repr(cmd.stdout)) # Tolerate large differences on busy systems mean = float(match.group('mean')) self.assertTrue(MIN_MEAN <= mean <= MAX_MEAN, mean) mad = float(match.group('mad')) self.assertLessEqual(mad, MAX_STD_DEV)
def test_python_option(self): # Ensure that paths are absolute paths = [os.path.realpath(path) for path in sys.path] env = dict(os.environ, PYTHONPATH=os.pathsep.join(paths)) tmp_exe = tempfile.mktemp() try: shutil.copy2(sys.executable, tmp_exe) # Run benchmark to check if --python works args = ('--metadata', '--python', tmp_exe, '--inherit-env', 'PYTHONPATH') args = PERF_TIMEIT + args + FAST_BENCH_ARGS cmd = tests.get_output(args, env=env) finally: try: os.unlink(tmp_exe) except OSError as exc: if exc.errno != errno.ENOENT: raise self.assertEqual(cmd.returncode, 0, repr(cmd.stdout + cmd.stderr)) self.assertIn("python_executable: %s" % tmp_exe, cmd.stdout)
def run_timeit(self, args): cmd = tests.get_output(args) self.assertEqual(cmd.returncode, 0, cmd.stdout + cmd.stderr) return cmd.stdout