Exemple #1
0
 def get_results():
     results = util.load_json(glob.glob(join(
         tmpdir, 'results_workflow', 'orangutan', '*-*.json'))[0])
     # replacing NaN by 'n/a' make assertions easier
     return ['n/a' if util.is_nan(item) else item
             for item in results['results'][
                 'params_examples.track_param_selection']['result']]
Exemple #2
0
 def get_results():
     results = util.load_json(glob.glob(join(
         tmpdir, 'results_workflow', 'orangutan', '*-*.json'))[0])
     # replacing NaN by 'n/a' make assertions easier
     return ['n/a' if util.is_nan(item) else item
             for item in results['results'][
                 'params_examples.track_param_selection']['result']]
Exemple #3
0
def test_skip_param_selection():
    d = {'repo': 'foo'}
    d.update(ASV_CONF_JSON)
    conf = config.Config.from_json(d)

    class DummyEnv(object):
        name = 'env'

    d = [
        {'name': 'test_nonparam', 'params': []},
        {'name': 'test_param',
         'params': [['1', '2', '3']],
         'param_names': ['n']}
    ]

    b = benchmarks.Benchmarks(conf, d, [r'test_nonparam', r'test_param\([23]\)'])
    result = b.skip_benchmarks(DummyEnv())

    assert result['test_nonparam'].result == None
    assert util.is_nan(result['test_param'].result[0])
    assert result['test_param'].result[1:] == [None, None]
Exemple #4
0
def test_find_benchmarks(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)

    envs = list(environment.get_environments(conf, None))

    b = benchmarks.Benchmarks(conf, repo, envs, regex='secondary')
    assert len(b) == 3

    b = benchmarks.Benchmarks(conf, repo, envs, regex='example')
    assert len(b) == 25

    b = benchmarks.Benchmarks(conf,
                              repo,
                              envs,
                              regex='time_example_benchmark_1')
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf,
                              repo,
                              envs,
                              regex=[
                                  'time_example_benchmark_1',
                                  'some regexp that does not match anything'
                              ])
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf, repo, envs, regex='custom')
    assert sorted(b.keys()) == ['custom.time_function', 'custom.track_method']

    b = benchmarks.Benchmarks(conf, repo, envs)
    assert len(b) == 33

    start_timestamp = datetime.datetime.utcnow()

    b = benchmarks.Benchmarks(conf, repo, envs)
    times = b.run_benchmarks(envs[0], profile=True, show_stderr=True)

    end_timestamp = datetime.datetime.utcnow()

    assert len(times) == len(b)
    assert times['time_examples.TimeSuite.time_example_benchmark_1'][
        'result'] != [None]
    assert isinstance(
        times['time_examples.TimeSuite.time_example_benchmark_1']['stats'][0]
        ['std'], float)
    # The exact number of samples may vary if the calibration is not fully accurate
    assert len(times['time_examples.TimeSuite.time_example_benchmark_1']
               ['samples'][0]) in (8, 9, 10)
    # Benchmarks that raise exceptions should have a time of "None"
    assert times['time_secondary.TimeSecondary.time_exception']['result'] == [
        None
    ]
    assert times['subdir.time_subdir.time_foo']['result'] != [None]
    if not ON_PYPY:
        # XXX: the memory benchmarks don't work on Pypy, since asizeof
        # is CPython-only
        assert times['mem_examples.mem_list']['result'][0] > 1000
    assert times['time_secondary.track_value']['result'] == [42.0]
    assert 'profile' in times['time_secondary.track_value']
    assert 'stderr' in times['time_examples.time_with_warnings']
    assert times['time_examples.time_with_warnings']['errcode'] != 0

    assert times['time_examples.TimeWithBadTimer.time_it']['result'] == [0.0]

    assert times['params_examples.track_param']['params'] == [[
        "<class 'benchmark.params_examples.ClassOne'>",
        "<class 'benchmark.params_examples.ClassTwo'>"
    ]]
    assert times['params_examples.track_param']['result'] == [42, 42]

    assert times['params_examples.mem_param']['params'] == [['10', '20'],
                                                            ['2', '3']]
    assert len(times['params_examples.mem_param']['result']) == 2 * 2

    assert times['params_examples.ParamSuite.track_value']['params'] == [[
        "'a'", "'b'", "'c'"
    ]]
    assert times['params_examples.ParamSuite.track_value']['result'] == [
        1 + 0, 2 + 0, 3 + 0
    ]

    assert isinstance(times['params_examples.TuningTest.time_it']['result'][0],
                      float)

    assert isinstance(times['params_examples.time_skip']['result'][0], float)
    assert isinstance(times['params_examples.time_skip']['result'][1], float)
    assert util.is_nan(times['params_examples.time_skip']['result'][2])

    assert times['peakmem_examples.peakmem_list']['result'][0] >= 4 * 2**20

    assert times['cache_examples.ClassLevelSetup.track_example']['result'] == [
        500
    ]
    assert times['cache_examples.ClassLevelSetup.track_example2'][
        'result'] == [500]

    assert times['cache_examples.track_cache_foo']['result'] == [42]
    assert times['cache_examples.track_cache_bar']['result'] == [12]
    assert times['cache_examples.track_my_cache_foo']['result'] == [0]

    assert times['cache_examples.ClassLevelSetupFail.track_fail'][
        'result'] == None
    assert 'raise RuntimeError()' in times[
        'cache_examples.ClassLevelSetupFail.track_fail']['stderr']

    assert times['cache_examples.ClassLevelCacheTimeout.track_fail'][
        'result'] == None
    assert times['cache_examples.ClassLevelCacheTimeoutSuccess.track_success'][
        'result'] == [0]

    profile_path = join(tmpdir, 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value']['profile'])
    pstats.Stats(profile_path)

    # Check for running setup on each repeat (one extra run from profile)
    # The output would contain error messages if the asserts in the benchmark fail.
    expected = ["<%d>" % j for j in range(1, 12)]
    assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split(
    ) == expected

    # Calibration of iterations should not rerun setup
    expected = (['setup'] * 2, ['setup'] * 3)
    assert times['time_examples.TimeWithRepeatCalibrate.time_it'][
        'stderr'].split() in expected

    # Check run time timestamps
    for name, result in times.items():
        assert result['started_at'] >= start_timestamp
        assert result['ended_at'] >= result['started_at']
        assert result['ended_at'] <= end_timestamp
Exemple #5
0
def test_find_benchmarks(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)

    envs = list(environment.get_environments(conf, None))

    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                                       regex='secondary')
    assert len(b) == 3

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                                       regex='example')
    assert len(b) == 25

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                              regex='time_example_benchmark_1')
    assert len(b) == 2

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                              regex=['time_example_benchmark_1',
                                     'some regexp that does not match anything'])
    assert len(b) == 2

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash], regex='custom')
    assert sorted(b.keys()) == ['custom.time_function', 'custom.track_method',
                                'named.track_custom_pretty_name']
    assert 'pretty_name' not in b['custom.track_method']
    assert b['custom.time_function']['pretty_name'] == 'My Custom Function'
    assert b['named.track_custom_pretty_name']['pretty_name'] == 'this.is/the.answer'

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])
    assert len(b) == 35

    assert 'named.OtherSuite.track_some_func' in b

    start_timestamp = datetime.datetime.utcnow()

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])
    times = b.run_benchmarks(envs[0], profile=True, show_stderr=True)

    end_timestamp = datetime.datetime.utcnow()

    assert len(times) == len(b)
    assert times[
        'time_examples.TimeSuite.time_example_benchmark_1']['result'] != [None]
    assert isinstance(times['time_examples.TimeSuite.time_example_benchmark_1']['stats'][0]['std'], float)
    # The exact number of samples may vary if the calibration is not fully accurate
    assert len(times['time_examples.TimeSuite.time_example_benchmark_1']['samples'][0]) >= 5
    # Benchmarks that raise exceptions should have a time of "None"
    assert times[
        'time_secondary.TimeSecondary.time_exception']['result'] == [None]
    assert times[
        'subdir.time_subdir.time_foo']['result'] != [None]
    if not ON_PYPY:
        # XXX: the memory benchmarks don't work on Pypy, since asizeof
        # is CPython-only
        assert times[
            'mem_examples.mem_list']['result'][0] > 1000
    assert times[
        'time_secondary.track_value']['result'] == [42.0]
    assert 'profile' in times[
        'time_secondary.track_value']
    assert 'stderr' in times[
        'time_examples.time_with_warnings']
    assert times['time_examples.time_with_warnings']['errcode'] != 0

    assert times['time_examples.TimeWithBadTimer.time_it']['result'] == [0.0]

    assert times['params_examples.track_param']['params'] == [["<class 'benchmark.params_examples.ClassOne'>",
                                                               "<class 'benchmark.params_examples.ClassTwo'>"]]
    assert times['params_examples.track_param']['result'] == [42, 42]

    assert times['params_examples.mem_param']['params'] == [['10', '20'], ['2', '3']]
    assert len(times['params_examples.mem_param']['result']) == 2*2

    assert times['params_examples.ParamSuite.track_value']['params'] == [["'a'", "'b'", "'c'"]]
    assert times['params_examples.ParamSuite.track_value']['result'] == [1+0, 2+0, 3+0]

    assert isinstance(times['params_examples.TuningTest.time_it']['result'][0], float)

    assert isinstance(times['params_examples.time_skip']['result'][0], float)
    assert isinstance(times['params_examples.time_skip']['result'][1], float)
    assert util.is_nan(times['params_examples.time_skip']['result'][2])

    assert times['peakmem_examples.peakmem_list']['result'][0] >= 4 * 2**20

    assert times['cache_examples.ClassLevelSetup.track_example']['result'] == [500]
    assert times['cache_examples.ClassLevelSetup.track_example2']['result'] == [500]

    assert times['cache_examples.track_cache_foo']['result'] == [42]
    assert times['cache_examples.track_cache_bar']['result'] == [12]
    assert times['cache_examples.track_my_cache_foo']['result'] == [0]

    assert times['cache_examples.ClassLevelSetupFail.track_fail']['result'] == None
    assert 'raise RuntimeError()' in times['cache_examples.ClassLevelSetupFail.track_fail']['stderr']

    assert times['cache_examples.ClassLevelCacheTimeout.track_fail']['result'] == None
    assert times['cache_examples.ClassLevelCacheTimeoutSuccess.track_success']['result'] == [0]

    profile_path = join(tmpdir, 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value']['profile'])
    pstats.Stats(profile_path)

    # Check for running setup on each repeat (one extra run from profile)
    # The output would contain error messages if the asserts in the benchmark fail.
    expected = ["<%d>" % j for j in range(1, 12)]
    assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split() == expected

    # Calibration of iterations should not rerun setup
    expected = (['setup']*2, ['setup']*3)
    assert times['time_examples.TimeWithRepeatCalibrate.time_it']['stderr'].split() in expected

    # Check run time timestamps
    for name, result in times.items():
        assert result['started_at'] >= start_timestamp
        assert result['ended_at'] >= result['started_at']
        assert result['ended_at'] <= end_timestamp
Exemple #6
0
def test_run_benchmarks(benchmarks_fixture, tmpdir):
    conf, repo, envs, commit_hash = benchmarks_fixture

    start_timestamp = datetime.datetime.utcnow()

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])

    # Old results to append to
    results = Results.unnamed()
    name = 'time_examples.TimeSuite.time_example_benchmark_1'
    results.add_result(b[name],
                       runner.BenchmarkResult(result=[1],
                                              samples=[[42.0, 24.0]],
                                              number=[1],
                                              errcode=0,
                                              stderr='',
                                              profile=None),
                       record_samples=True)

    # Run
    runner.run_benchmarks(b,
                          envs[0],
                          results=results,
                          profile=True,
                          show_stderr=True,
                          append_samples=True,
                          record_samples=True)
    times = ResultsWrapper(results, b)

    end_timestamp = datetime.datetime.utcnow()

    assert len(times) == len(b)
    assert times[
        'time_examples.TimeSuite.time_example_benchmark_1'].result != [None]
    stats = results.get_result_stats(name, b[name]['params'])
    assert isinstance(stats[0]['std'], float)
    # The exact number of samples may vary if the calibration is not fully accurate
    samples = results.get_result_samples(name, b[name]['params'])
    assert len(samples[0]) >= 4
    # Explicitly provided 'prev_samples` should come first
    assert samples[0][:2] == [42.0, 24.0]
    # Benchmarks that raise exceptions should have a time of "None"
    assert times['time_secondary.TimeSecondary.time_exception'].result == [
        None
    ]
    assert times['subdir.time_subdir.time_foo'].result != [None]
    if not ON_PYPY:
        # XXX: the memory benchmarks don't work on Pypy, since asizeof
        # is CPython-only
        assert times['mem_examples.mem_list'].result[0] > 1000
    assert times['time_secondary.track_value'].result == [42.0]
    assert times['time_secondary.track_value'].profile is not None
    assert isinstance(times['time_examples.time_with_warnings'].stderr,
                      type(''))
    assert times['time_examples.time_with_warnings'].errcode != 0

    assert times['time_examples.TimeWithBadTimer.time_it'].result == [0.0]

    assert times['params_examples.track_param'].params == [[
        "<class 'benchmark.params_examples.ClassOne'>",
        "<class 'benchmark.params_examples.ClassTwo'>"
    ]]
    assert times['params_examples.track_param'].result == [42, 42]

    assert times['params_examples.mem_param'].params == [['10', '20'],
                                                         ['2', '3']]
    assert len(times['params_examples.mem_param'].result) == 2 * 2

    assert times['params_examples.ParamSuite.track_value'].params == [[
        "'a'", "'b'", "'c'"
    ]]
    assert times['params_examples.ParamSuite.track_value'].result == [
        1 + 0, 2 + 0, 3 + 0
    ]

    assert isinstance(times['params_examples.TuningTest.time_it'].result[0],
                      float)
    assert isinstance(times['params_examples.TuningTest.time_it'].result[1],
                      float)

    assert isinstance(times['params_examples.time_skip'].result[0], float)
    assert isinstance(times['params_examples.time_skip'].result[1], float)
    assert util.is_nan(times['params_examples.time_skip'].result[2])

    assert times['peakmem_examples.peakmem_list'].result[0] >= 4 * 2**20

    assert times['cache_examples.ClassLevelSetup.track_example'].result == [
        500
    ]
    assert times['cache_examples.ClassLevelSetup.track_example2'].result == [
        500
    ]

    assert times['cache_examples.track_cache_foo'].result == [42]
    assert times['cache_examples.track_cache_bar'].result == [12]
    assert times['cache_examples.track_my_cache_foo'].result == [0]

    assert times['cache_examples.ClassLevelSetupFail.track_fail'].result == [
        None
    ]
    assert 'raise RuntimeError()' in times[
        'cache_examples.ClassLevelSetupFail.track_fail'].stderr

    assert times[
        'cache_examples.ClassLevelCacheTimeout.track_fail'].result == [None]
    assert times[
        'cache_examples.ClassLevelCacheTimeoutSuccess.track_success'].result == [
            0
        ]

    assert times['cache_examples.time_fail_second_run'].result == [None]
    assert times['cache_examples.time_fail_second_run'].samples == [None]

    profile_path = join(six.text_type(tmpdir), 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value'].profile)
    pstats.Stats(profile_path)

    # Check for running setup on each repeat (one extra run from profile)
    # The output would contain error messages if the asserts in the benchmark fail.
    expected = ["<%d>" % j for j in range(1, 12)]
    assert times['time_examples.TimeWithRepeat.time_it'].stderr.split(
    ) == expected

    # Calibration of iterations should not rerun setup
    expected = (['setup'] * 2, ['setup'] * 3)
    assert times['time_examples.TimeWithRepeatCalibrate.time_it'].stderr.split(
    ) in expected

    # Check tuple-form repeat attribute produced results
    assert 2 <= len(times['time_examples.time_auto_repeat'].samples[0]) <= 4

    # Check run time timestamps
    for name, result in times.items():
        assert result.started_at >= util.datetime_to_js_timestamp(
            start_timestamp)
        assert result.ended_at >= result.started_at
        assert result.ended_at <= util.datetime_to_js_timestamp(end_timestamp)
Exemple #7
0
def test_find_benchmarks(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    b = benchmarks.Benchmarks(conf, regex='secondary')
    assert len(b) == 3

    b = benchmarks.Benchmarks(conf, regex='example')
    assert len(b) == 22

    b = benchmarks.Benchmarks(conf, regex='time_example_benchmark_1')
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf,
                              regex=[
                                  'time_example_benchmark_1',
                                  'some regexp that does not match anything'
                              ])
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf)
    assert len(b) == 26

    envs = list(environment.get_environments(conf))
    b = benchmarks.Benchmarks(conf)
    times = b.run_benchmarks(envs[0], profile=True, show_stderr=True)

    assert len(times) == len(b)
    assert times['time_examples.TimeSuite.time_example_benchmark_1'][
        'result'] is not None
    # Benchmarks that raise exceptions should have a time of "None"
    assert times['time_secondary.TimeSecondary.time_exception'][
        'result'] is None
    assert times['subdir.time_subdir.time_foo']['result'] is not None
    assert times['mem_examples.mem_list']['result'] > 1000
    assert times['time_secondary.track_value']['result'] == 42.0
    assert 'profile' in times['time_secondary.track_value']
    assert 'stderr' in times['time_examples.time_with_warnings']
    assert times['time_examples.time_with_warnings']['errcode'] != 0

    assert times['params_examples.track_param']['result']['params'] == [[
        "<class 'benchmark.params_examples.ClassOne'>",
        "<class 'benchmark.params_examples.ClassTwo'>"
    ]]
    assert times['params_examples.track_param']['result']['result'] == [42, 42]

    assert times['params_examples.mem_param']['result']['params'] == [[
        '10', '20'
    ], ['2', '3']]
    assert len(times['params_examples.mem_param']['result']['result']) == 2 * 2

    assert times['params_examples.ParamSuite.track_value']['result'][
        'params'] == [["'a'", "'b'", "'c'"]]
    assert times['params_examples.ParamSuite.track_value']['result'][
        'result'] == [1 + 0, 2 + 0, 3 + 0]

    assert isinstance(
        times['params_examples.TuningTest.time_it']['result']['result'][0],
        float)

    assert isinstance(
        times['params_examples.time_skip']['result']['result'][0], float)
    assert isinstance(
        times['params_examples.time_skip']['result']['result'][1], float)
    assert util.is_nan(
        times['params_examples.time_skip']['result']['result'][2])

    assert times['peakmem_examples.peakmem_list']['result'] >= 4 * 2**20

    assert times['cache_examples.ClassLevelSetup.track_example'][
        'result'] == 500
    assert times['cache_examples.ClassLevelSetup.track_example2'][
        'result'] == 500

    assert times['cache_examples.track_cache_foo']['result'] == 42
    assert times['cache_examples.track_cache_bar']['result'] == 12
    assert times['cache_examples.track_my_cache_foo']['result'] == 0

    assert times['cache_examples.ClassLevelSetupFail.track_fail'][
        'result'] == None
    assert 'raise RuntimeError()' in times[
        'cache_examples.ClassLevelSetupFail.track_fail']['stderr']

    profile_path = join(tmpdir, 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value']['profile'])
    pstats.Stats(profile_path)

    # Check for running setup on each repeat (one extra run from profile)
    # The output would contain error messages if the asserts in the benchmark fail.
    expected = ["<%d>" % j for j in range(1, 12)]
    assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split(
    ) == expected

    # Calibration of iterations should not rerun setup
    expected = ['setup'] * 2
    assert times['time_examples.TimeWithRepeatCalibrate.time_it'][
        'stderr'].split() == expected
Exemple #8
0
def test_find_benchmarks(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    envs = list(environment.get_environments(conf, None))

    b = benchmarks.Benchmarks(conf, envs, regex='secondary')
    assert len(b) == 3

    b = benchmarks.Benchmarks(conf, envs, regex='example')
    assert len(b) == 22

    b = benchmarks.Benchmarks(conf, envs, regex='time_example_benchmark_1')
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf, envs, regex=['time_example_benchmark_1',
                                                 'some regexp that does not match anything'])
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf, envs)
    assert len(b) == 26

    b = benchmarks.Benchmarks(conf, envs)
    times = b.run_benchmarks(envs[0], profile=True, show_stderr=True)

    assert len(times) == len(b)
    assert times[
        'time_examples.TimeSuite.time_example_benchmark_1']['result'] is not None
    # Benchmarks that raise exceptions should have a time of "None"
    assert times[
        'time_secondary.TimeSecondary.time_exception']['result'] is None
    assert times[
        'subdir.time_subdir.time_foo']['result'] is not None
    assert times[
        'mem_examples.mem_list']['result'] > 1000
    assert times[
        'time_secondary.track_value']['result'] == 42.0
    assert 'profile' in times[
        'time_secondary.track_value']
    assert 'stderr' in times[
        'time_examples.time_with_warnings']
    assert times['time_examples.time_with_warnings']['errcode'] != 0

    assert times['params_examples.track_param']['result']['params'] == [["<class 'benchmark.params_examples.ClassOne'>",
                                                                         "<class 'benchmark.params_examples.ClassTwo'>"]]
    assert times['params_examples.track_param']['result']['result'] == [42, 42]

    assert times['params_examples.mem_param']['result']['params'] == [['10', '20'], ['2', '3']]
    assert len(times['params_examples.mem_param']['result']['result']) == 2*2

    assert times['params_examples.ParamSuite.track_value']['result']['params'] == [["'a'", "'b'", "'c'"]]
    assert times['params_examples.ParamSuite.track_value']['result']['result'] == [1+0, 2+0, 3+0]

    assert isinstance(times['params_examples.TuningTest.time_it']['result']['result'][0], float)

    assert isinstance(times['params_examples.time_skip']['result']['result'][0], float)
    assert isinstance(times['params_examples.time_skip']['result']['result'][1], float)
    assert util.is_nan(times['params_examples.time_skip']['result']['result'][2])

    assert times['peakmem_examples.peakmem_list']['result'] >= 4 * 2**20

    assert times['cache_examples.ClassLevelSetup.track_example']['result'] == 500
    assert times['cache_examples.ClassLevelSetup.track_example2']['result'] == 500

    assert times['cache_examples.track_cache_foo']['result'] == 42
    assert times['cache_examples.track_cache_bar']['result'] == 12
    assert times['cache_examples.track_my_cache_foo']['result'] == 0

    assert times['cache_examples.ClassLevelSetupFail.track_fail']['result'] == None
    assert 'raise RuntimeError()' in times['cache_examples.ClassLevelSetupFail.track_fail']['stderr']

    profile_path = join(tmpdir, 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value']['profile'])
    pstats.Stats(profile_path)

    # Check for running setup on each repeat (one extra run from profile)
    # The output would contain error messages if the asserts in the benchmark fail.
    expected = ["<%d>" % j for j in range(1, 12)]
    assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split() == expected

    # Calibration of iterations should not rerun setup
    expected = ['setup']*2
    assert times['time_examples.TimeWithRepeatCalibrate.time_it']['stderr'].split() == expected
Exemple #9
0
def test_find_benchmarks(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = join(tmpdir, "env")
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    b = benchmarks.Benchmarks(conf, regex='secondary')
    assert len(b) == 3

    b = benchmarks.Benchmarks(conf, regex='example')
    assert len(b) == 14

    b = benchmarks.Benchmarks(conf, regex='time_example_benchmark_1')
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf, regex=['time_example_benchmark_1',
                                           'some regexp that does not match anything'])
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf)
    assert len(b) == 18

    envs = list(environment.get_environments(conf))
    b = benchmarks.Benchmarks(conf)
    times = b.run_benchmarks(envs[0], profile=True, show_stderr=True)

    assert len(times) == len(b)
    assert times[
        'time_examples.TimeSuite.time_example_benchmark_1']['result'] is not None
    # Benchmarks that raise exceptions should have a time of "None"
    assert times[
        'time_secondary.TimeSecondary.time_exception']['result'] is None
    assert times[
        'subdir.time_subdir.time_foo']['result'] is not None
    assert times[
        'mem_examples.mem_list']['result'] > 2000
    assert times[
        'time_secondary.track_value']['result'] == 42.0
    assert 'profile' in times[
        'time_secondary.track_value']
    assert 'stderr' in times[
        'time_examples.time_with_warnings']
    assert times['time_examples.time_with_warnings']['errcode'] != 0

    assert times['params_examples.track_param']['result']['params'] == [["<class 'benchmark.params_examples.ClassOne'>",
                                                                         "<class 'benchmark.params_examples.ClassTwo'>"]]
    assert times['params_examples.track_param']['result']['result'] == [42, 42]

    assert times['params_examples.mem_param']['result']['params'] == [['10', '20'], ['2', '3']]
    assert len(times['params_examples.mem_param']['result']['result']) == 2*2

    assert times['params_examples.ParamSuite.track_value']['result']['params'] == [["'a'", "'b'", "'c'"]]
    assert times['params_examples.ParamSuite.track_value']['result']['result'] == [1+0, 2+0, 3+0]

    assert isinstance(times['params_examples.TuningTest.time_it']['result']['result'][0], float)

    assert isinstance(times['params_examples.time_skip']['result']['result'][0], float)
    assert isinstance(times['params_examples.time_skip']['result']['result'][1], float)
    assert util.is_nan(times['params_examples.time_skip']['result']['result'][2])

    assert times['peakmem_examples.peakmem_list']['result'] >= 4 * 2**20

    profile_path = join(tmpdir, 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value']['profile'])
    pstats.Stats(profile_path)