def test_metadata(self): suite = self.create_suite() with tests.temporary_file() as tmp_name: suite.dump(tmp_name) stdout = self.run_command('metadata', '-q', tmp_name) expected = textwrap.dedent(""" Common metadata =============== - hostname: toto py36 ---- Metadata: - python_version: 2.7 py38 ---- Metadata: - python_version: 3.4 """).strip() self.assertEqual(stdout.rstrip(), expected)
def test_check_unstable(self): suite = self.create_suite() with tests.temporary_file() as tmp_name: suite.dump(tmp_name) stdout = self.run_command('check', tmp_name) expected = textwrap.dedent(""" py36 ---- WARNING: the benchmark result may be unstable * the standard deviation (500 ms) is 33% of the mean (1.50 sec) Try to rerun the benchmark with more runs, values and/or loops. Run '{0} -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. py38 ---- WARNING: the benchmark result may be unstable * the standard deviation (500 ms) is 25% of the mean (2.00 sec) Try to rerun the benchmark with more runs, values and/or loops. Run '{0} -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. """).strip() expected = expected.format(os.path.basename(sys.executable)) self.assertEqual(stdout.rstrip(), expected)
def _check_track_memory(self, track_option): with tests.temporary_file() as tmp_name: self.run_command('timeit', track_option, '-p2', '-w1', '-l5', '-n3', '[1,2]*1000', '-o', tmp_name) bench = pyperf.Benchmark.load(tmp_name) self._check_track_memory_bench(bench, loops=5)
def test_show_common_metadata(self): suite = self.create_suite() with tests.temporary_file() as tmp_name: suite.dump(tmp_name) stdout = self.run_command('show', '-q', '--metadata', tmp_name) expected = textwrap.dedent(""" Common metadata =============== - hostname: toto py2 --- Metadata: - python_version: 2.7 Mean +- std dev: 1.50 sec +- 0.50 sec py3 --- Metadata: - python_version: 3.4 Mean +- std dev: 2.00 sec +- 0.50 sec """).strip() self.assertEqual(stdout.rstrip(), expected)
def test_stdout(self): bench = self.create_bench((1.0, 1.5, 2.0)) with tests.temporary_file() as tmp_name: bench.dump(tmp_name) stdout = self.run_command('convert', tmp_name, '--stdout') self.assertEqual(stdout, tests.benchmark_as_json(bench))
def test_load_gzip(self): bench = self.create_dummy_benchmark() with tests.temporary_file(suffix='.gz') as tmp_name: bench.dump(tmp_name) bench2 = pyperf.Benchmark.load(tmp_name) self.check_benchmarks_equal(bench, bench2)
def test_command_track_memory(self): cmd = (sys.executable, '-c', 'pass') with tests.temporary_file() as tmp_name: args = ('command', '--track-memory', '-p2', '-w1', '-l2', '-n3', '-o', tmp_name, '--') args += cmd self.run_command(*args) bench = pyperf.Benchmark.load(tmp_name) self._check_track_memory_bench(bench, loops=2)
def test_json(self): suite = self.create_dummy_suite() with tests.temporary_file() as filename: suite.dump(filename) suite = pyperf.BenchmarkSuite.load(filename) self.assertEqual(suite.filename, filename) self.check_dummy_suite(suite)
def test_abs_executable(self): with tests.temporary_file() as tmpname: tmpname = os.path.realpath(tmpname) try: os.symlink(sys.executable, tmpname) except (OSError, NotImplementedError): self.skipTest("os.symlink() failed") self.assertEqual(utils.abs_executable(tmpname), tmpname)
def test_dump_gzip(self): bench = self.create_dummy_benchmark() with tests.temporary_file(suffix='.gz') as tmp_name: bench.dump(tmp_name) with gzip.open(tmp_name, 'rt', encoding='utf-8') as fp: json = fp.read() expected = tests.benchmark_as_json(bench) self.assertEqual(json, expected)
def test_dump_replace(self): suite = self.create_dummy_suite() with tests.temporary_file() as tmp_name: suite.dump(tmp_name) # dump() must not override an existing file by default with self.assertRaises(OSError) as cm: suite.dump(tmp_name) self.assertEqual(cm.exception.errno, errno.EEXIST) # ok if replace is true suite.dump(tmp_name, replace=True)
def test_export_csv(self): script = 'export_csv.py' self.TESTED.add(script) script = os.path.join(EXAMPLES_DIR, script) json = os.path.join(os.path.dirname(__file__), 'telco.json') with tests.temporary_file() as tmpname: cmd = [sys.executable, script, json, tmpname] exitcode = tests.run_command(cmd) self.assertEqual(exitcode, 0) with open(tmpname, 'r') as fp: lines = fp.readlines() lines = [line.rstrip() for line in lines] expected = [ '0.02263077381239782', '0.022488519346734393', '0.02247294420317303' ] self.assertEqual(lines, expected)