def test_quick(tmpdir): # Check that the quick option works tmpdir = six.text_type(tmpdir) os.chdir(tmpdir) shutil.copytree(BENCHMARK_DIR, 'benchmark') d = {} d.update(ASV_CONF_JSON) d['env_dir'] = "env" d['benchmark_dir'] = 'benchmark' d['repo'] = tools.generate_test_repo(tmpdir, [0]).path conf = config.Config.from_json(d) repo = get_repo(conf) envs = list(environment.get_environments(conf, None)) b = benchmarks.Benchmarks(conf, repo, envs) skip_names = [ name for name in b.keys() if name != 'time_examples.TimeWithRepeat.time_it' ] times = b.run_benchmarks(envs[0], quick=True, show_stderr=True, skip=skip_names) assert len(times) == 1 # Check that the benchmark was run only once. The result for quick==False # is tested above in test_find_benchmarks expected = ["<1>"] assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split( ) == expected
def test_find_benchmarks_cwd_imports(tmpdir): # Test that files in the directory above the benchmark suite are # not importable tmpdir = six.text_type(tmpdir) os.chdir(tmpdir) os.makedirs('benchmark') with open(os.path.join('benchmark', '__init__.py'), 'w') as f: pass with open(os.path.join('benchmark', 'test.py'), 'w') as f: f.write(""" try: import this_should_really_not_be_here raise AssertionError('This should not happen!') except ImportError: pass def track_this(): return 0 """) with open(os.path.join('this_should_really_not_be_here.py'), 'w') as f: f.write("raise AssertionError('Should not be imported!')") d = {} d.update(ASV_CONF_JSON) d['env_dir'] = "env" d['benchmark_dir'] = 'benchmark' d['repo'] = tools.generate_test_repo(tmpdir, [0]).path conf = config.Config.from_json(d) b = benchmarks.Benchmarks(conf, regex='track_this') assert len(b) == 1
def test_invalid_benchmark_tree(tmpdir): tmpdir = six.text_type(tmpdir) os.chdir(tmpdir) d = {} d.update(ASV_CONF_JSON) d['benchmark_dir'] = INVALID_BENCHMARK_DIR d['env_dir'] = "env" d['repo'] = tools.generate_test_repo(tmpdir, [0]).path conf = config.Config.from_json(d) with pytest.raises(util.UserError): b = benchmarks.Benchmarks(conf)
def test_skip_param_selection(): d = {'repo': 'foo'} d.update(ASV_CONF_JSON) conf = config.Config.from_json(d) class DummyEnv(object): name = 'env' d = [ {'name': 'test_nonparam', 'params': []}, {'name': 'test_param', 'params': [['1', '2', '3']], 'param_names': ['n']} ] b = benchmarks.Benchmarks(conf, d, [r'test_nonparam', r'test_param\([23]\)']) result = b.skip_benchmarks(DummyEnv()) assert result['test_nonparam'].result == None assert util.is_nan(result['test_param'].result[0]) assert result['test_param'].result[1:] == [None, None]
def test_skip_param_selection(): d = {'repo': 'foo'} d.update(ASV_CONF_JSON) conf = config.Config.from_json(d) class DummyEnv(object): name = 'env' d = [{ 'name': 'test_nonparam', 'params': [], 'version': '1' }, { 'name': 'test_param', 'params': [['1', '2', '3']], 'param_names': ['n'], 'version': '1' }] results = Results.unnamed() b = benchmarks.Benchmarks(conf, d, [r'test_nonparam', r'test_param\([23]\)']) results.add_result( b['test_param'], runner.BenchmarkResult(result=[1, 2, 3], samples=[None] * 3, number=[None] * 3, errcode=0, stderr='', profile=None)) runner.skip_benchmarks(b, DummyEnv(), results) assert results._results.get('test_nonparam') == None assert results._results['test_param'] == [1, None, None]
def test_find_benchmarks(tmpdir): tmpdir = six.text_type(tmpdir) os.chdir(tmpdir) shutil.copytree(BENCHMARK_DIR, 'benchmark') d = {} d.update(ASV_CONF_JSON) d['env_dir'] = "env" d['benchmark_dir'] = 'benchmark' d['repo'] = tools.generate_test_repo(tmpdir, [0]).path conf = config.Config.from_json(d) repo = get_repo(conf) envs = list(environment.get_environments(conf, None)) b = benchmarks.Benchmarks(conf, repo, envs, regex='secondary') assert len(b) == 3 b = benchmarks.Benchmarks(conf, repo, envs, regex='example') assert len(b) == 25 b = benchmarks.Benchmarks(conf, repo, envs, regex='time_example_benchmark_1') assert len(b) == 2 b = benchmarks.Benchmarks(conf, repo, envs, regex=[ 'time_example_benchmark_1', 'some regexp that does not match anything' ]) assert len(b) == 2 b = benchmarks.Benchmarks(conf, repo, envs, regex='custom') assert sorted(b.keys()) == ['custom.time_function', 'custom.track_method'] b = benchmarks.Benchmarks(conf, repo, envs) assert len(b) == 33 start_timestamp = datetime.datetime.utcnow() b = benchmarks.Benchmarks(conf, repo, envs) times = b.run_benchmarks(envs[0], profile=True, show_stderr=True) end_timestamp = datetime.datetime.utcnow() assert len(times) == len(b) assert times['time_examples.TimeSuite.time_example_benchmark_1'][ 'result'] != [None] assert isinstance( times['time_examples.TimeSuite.time_example_benchmark_1']['stats'][0] ['std'], float) # The exact number of samples may vary if the calibration is not fully accurate assert len(times['time_examples.TimeSuite.time_example_benchmark_1'] ['samples'][0]) in (8, 9, 10) # Benchmarks that raise exceptions should have a time of "None" assert times['time_secondary.TimeSecondary.time_exception']['result'] == [ None ] assert times['subdir.time_subdir.time_foo']['result'] != [None] if not ON_PYPY: # XXX: the memory benchmarks don't work on Pypy, since asizeof # is CPython-only assert times['mem_examples.mem_list']['result'][0] > 1000 assert times['time_secondary.track_value']['result'] == [42.0] assert 'profile' in times['time_secondary.track_value'] assert 'stderr' in times['time_examples.time_with_warnings'] assert times['time_examples.time_with_warnings']['errcode'] != 0 assert times['time_examples.TimeWithBadTimer.time_it']['result'] == [0.0] assert times['params_examples.track_param']['params'] == [[ "<class 'benchmark.params_examples.ClassOne'>", "<class 'benchmark.params_examples.ClassTwo'>" ]] assert times['params_examples.track_param']['result'] == [42, 42] assert times['params_examples.mem_param']['params'] == [['10', '20'], ['2', '3']] assert len(times['params_examples.mem_param']['result']) == 2 * 2 assert times['params_examples.ParamSuite.track_value']['params'] == [[ "'a'", "'b'", "'c'" ]] assert times['params_examples.ParamSuite.track_value']['result'] == [ 1 + 0, 2 + 0, 3 + 0 ] assert isinstance(times['params_examples.TuningTest.time_it']['result'][0], float) assert isinstance(times['params_examples.time_skip']['result'][0], float) assert isinstance(times['params_examples.time_skip']['result'][1], float) assert util.is_nan(times['params_examples.time_skip']['result'][2]) assert times['peakmem_examples.peakmem_list']['result'][0] >= 4 * 2**20 assert times['cache_examples.ClassLevelSetup.track_example']['result'] == [ 500 ] assert times['cache_examples.ClassLevelSetup.track_example2'][ 'result'] == [500] assert times['cache_examples.track_cache_foo']['result'] == [42] assert times['cache_examples.track_cache_bar']['result'] == [12] assert times['cache_examples.track_my_cache_foo']['result'] == [0] assert times['cache_examples.ClassLevelSetupFail.track_fail'][ 'result'] == None assert 'raise RuntimeError()' in times[ 'cache_examples.ClassLevelSetupFail.track_fail']['stderr'] assert times['cache_examples.ClassLevelCacheTimeout.track_fail'][ 'result'] == None assert times['cache_examples.ClassLevelCacheTimeoutSuccess.track_success'][ 'result'] == [0] profile_path = join(tmpdir, 'test.profile') with open(profile_path, 'wb') as fd: fd.write(times['time_secondary.track_value']['profile']) pstats.Stats(profile_path) # Check for running setup on each repeat (one extra run from profile) # The output would contain error messages if the asserts in the benchmark fail. expected = ["<%d>" % j for j in range(1, 12)] assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split( ) == expected # Calibration of iterations should not rerun setup expected = (['setup'] * 2, ['setup'] * 3) assert times['time_examples.TimeWithRepeatCalibrate.time_it'][ 'stderr'].split() in expected # Check run time timestamps for name, result in times.items(): assert result['started_at'] >= start_timestamp assert result['ended_at'] >= result['started_at'] assert result['ended_at'] <= end_timestamp
def test_code_extraction(tmpdir): tmpdir = six.text_type(tmpdir) os.chdir(tmpdir) shutil.copytree(BENCHMARK_DIR, 'benchmark') d = {} d.update(ASV_CONF_JSON) d['env_dir'] = "env" d['benchmark_dir'] = 'benchmark' d['repo'] = tools.generate_test_repo(tmpdir, [0]).path conf = config.Config.from_json(d) repo = get_repo(conf) envs = list(environment.get_environments(conf, None)) b = benchmarks.Benchmarks(conf, repo, envs, regex=r'^code_extraction\.') expected_code = textwrap.dedent(""" def track_test(): # module-level 難 return 0 def setup(): # module-level pass def setup_cache(): # module-level pass """).strip() bench = b['code_extraction.track_test'] assert bench['version'] == sha256( bench['code'].encode('utf-8')).hexdigest() assert bench['code'] == expected_code expected_code = textwrap.dedent(""" class MyClass: def track_test(self): # class-level 難 return 0 def setup(): # module-level pass class MyClass: def setup(self): # class-level pass def setup_cache(self): # class-level pass """).strip() bench = b['code_extraction.MyClass.track_test'] assert bench['version'] == sha256( bench['code'].encode('utf-8')).hexdigest() if sys.version_info[:2] != (3, 2): # Python 3.2 doesn't have __qualname__ assert bench['code'] == expected_code
def test_find_benchmarks(tmpdir): tmpdir = six.text_type(tmpdir) os.chdir(tmpdir) shutil.copytree(BENCHMARK_DIR, 'benchmark') d = {} d.update(ASV_CONF_JSON) d['env_dir'] = "env" d['benchmark_dir'] = 'benchmark' d['repo'] = tools.generate_test_repo(tmpdir, [0]).path conf = config.Config.from_json(d) b = benchmarks.Benchmarks(conf, regex='secondary') assert len(b) == 3 b = benchmarks.Benchmarks(conf, regex='example') assert len(b) == 22 b = benchmarks.Benchmarks(conf, regex='time_example_benchmark_1') assert len(b) == 2 b = benchmarks.Benchmarks(conf, regex=[ 'time_example_benchmark_1', 'some regexp that does not match anything' ]) assert len(b) == 2 b = benchmarks.Benchmarks(conf) assert len(b) == 26 envs = list(environment.get_environments(conf)) b = benchmarks.Benchmarks(conf) times = b.run_benchmarks(envs[0], profile=True, show_stderr=True) assert len(times) == len(b) assert times['time_examples.TimeSuite.time_example_benchmark_1'][ 'result'] is not None # Benchmarks that raise exceptions should have a time of "None" assert times['time_secondary.TimeSecondary.time_exception'][ 'result'] is None assert times['subdir.time_subdir.time_foo']['result'] is not None assert times['mem_examples.mem_list']['result'] > 1000 assert times['time_secondary.track_value']['result'] == 42.0 assert 'profile' in times['time_secondary.track_value'] assert 'stderr' in times['time_examples.time_with_warnings'] assert times['time_examples.time_with_warnings']['errcode'] != 0 assert times['params_examples.track_param']['result']['params'] == [[ "<class 'benchmark.params_examples.ClassOne'>", "<class 'benchmark.params_examples.ClassTwo'>" ]] assert times['params_examples.track_param']['result']['result'] == [42, 42] assert times['params_examples.mem_param']['result']['params'] == [[ '10', '20' ], ['2', '3']] assert len(times['params_examples.mem_param']['result']['result']) == 2 * 2 assert times['params_examples.ParamSuite.track_value']['result'][ 'params'] == [["'a'", "'b'", "'c'"]] assert times['params_examples.ParamSuite.track_value']['result'][ 'result'] == [1 + 0, 2 + 0, 3 + 0] assert isinstance( times['params_examples.TuningTest.time_it']['result']['result'][0], float) assert isinstance( times['params_examples.time_skip']['result']['result'][0], float) assert isinstance( times['params_examples.time_skip']['result']['result'][1], float) assert util.is_nan( times['params_examples.time_skip']['result']['result'][2]) assert times['peakmem_examples.peakmem_list']['result'] >= 4 * 2**20 assert times['cache_examples.ClassLevelSetup.track_example'][ 'result'] == 500 assert times['cache_examples.ClassLevelSetup.track_example2'][ 'result'] == 500 assert times['cache_examples.track_cache_foo']['result'] == 42 assert times['cache_examples.track_cache_bar']['result'] == 12 assert times['cache_examples.track_my_cache_foo']['result'] == 0 assert times['cache_examples.ClassLevelSetupFail.track_fail'][ 'result'] == None assert 'raise RuntimeError()' in times[ 'cache_examples.ClassLevelSetupFail.track_fail']['stderr'] profile_path = join(tmpdir, 'test.profile') with open(profile_path, 'wb') as fd: fd.write(times['time_secondary.track_value']['profile']) pstats.Stats(profile_path) # Check for running setup on each repeat (one extra run from profile) # The output would contain error messages if the asserts in the benchmark fail. expected = ["<%d>" % j for j in range(1, 12)] assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split( ) == expected # Calibration of iterations should not rerun setup expected = ['setup'] * 2 assert times['time_examples.TimeWithRepeatCalibrate.time_it'][ 'stderr'].split() == expected