示例#1
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = six.text_type(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    stamp2 = datetime.datetime.utcnow()

    r = results.Results({'machine': 'mach'}, {}, 'aaaa',
                        util.datetime_to_timestamp(stamp0), 'py', 'env')
    value = {
        'result': [42],
        'params': [],
        'stats': None,
        'samples': None,
        'number': None,
        'started_at': stamp1,
        'ended_at': stamp2
    }
    r.add_result('some_benchmark', value)
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp(
        stamp1)
    assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp(
        stamp2)
示例#2
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = six.text_type(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    stamp2 = datetime.datetime.utcnow()

    r = results.Results({'machine': 'mach'}, {}, 'aaaa',
                        util.datetime_to_timestamp(stamp0), 'py', 'env', {})
    value = runner.BenchmarkResult(result=[42],
                                   samples=[None],
                                   number=[None],
                                   profile=None,
                                   errcode=0,
                                   stderr='')
    benchmark = {
        'name': 'some_benchmark',
        'version': 'some version',
        'params': []
    }
    r.add_result(benchmark, value, started_at=stamp1, ended_at=stamp2)
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp(
        stamp1)
    assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp(
        stamp2)
示例#3
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = six.text_type(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    stamp2 = datetime.datetime.utcnow()

    r = results.Results({'machine': 'mach'}, {}, 'aaaa', util.datetime_to_timestamp(stamp0),
                        'py', 'env')
    value = runner.BenchmarkResult(
        result=[42],
        samples=[None],
        number=[None],
        profile=None,
        errcode=0,
        stderr=''
    )
    benchmark = {'name': 'some_benchmark', 'version': 'some version', 'params': []}
    r.add_result(benchmark, value, started_at=stamp1, ended_at=stamp2)
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp1)
    assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp2)
示例#4
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = six.text_type(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    stamp2 = datetime.datetime.utcnow()

    r = results.Results({'machine': 'mach'}, {}, 'aaaa', util.datetime_to_timestamp(stamp0),
                        'py', 'env')
    value = {
        'result': [42],
        'params': [],
        'stats': None,
        'samples': None,
        'number': None,
        'started_at': stamp1,
        'ended_at': stamp2
    }
    r.add_result('some_benchmark', value, "some version")
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp1)
    assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp2)
示例#5
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = six.text_type(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    stamp2 = datetime.datetime.utcnow()

    r = results.Results({'machine': 'mach'}, {}, 'aaaa', util.datetime_to_timestamp(stamp0),
                        'py', 'env')
    r.add_result('some_benchmark', 42, stamp1, stamp2)
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp1)
    assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp2)
示例#6
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = str(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    duration = 1.5

    r = results.Results({'machine': 'mach'}, {}, 'aaaa',
                        util.datetime_to_timestamp(stamp0), 'py', 'env', {})
    value = runner.BenchmarkResult(result=[42],
                                   samples=[None],
                                   number=[None],
                                   profile=None,
                                   errcode=0,
                                   stderr='')
    benchmark = {
        'name': 'some_benchmark',
        'version': 'some version',
        'params': []
    }
    r.add_result(benchmark, value, started_at=stamp1, duration=duration)
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    keys = r['result_columns']
    values = dict(zip(keys, r['results']['some_benchmark']))
    assert values['started_at'] == util.datetime_to_js_timestamp(stamp1)
    assert values['duration'] == duration
示例#7
0
def test_datetime_to_js_timestamp():
    tss = [0, 0.5, -0.5, 12345.6789, -12345.6789, 1535910708.7767508]
    for ts in tss:
        t = datetime.datetime.utcfromtimestamp(ts)
        ts2 = util.datetime_to_js_timestamp(t)
        assert abs(ts * 1000 - ts2) <= 0.5

    # Check sub-second precision
    ms = 50
    ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 1000 * ms)
    assert util.datetime_to_js_timestamp(ts) == ms

    # Check rounding
    ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 500)
    assert util.datetime_to_js_timestamp(ts) == 1
    ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 499)
    assert util.datetime_to_js_timestamp(ts) == 0
示例#8
0
def test_datetime_to_js_timestamp():
    tss = [0, 0.5, -0.5, 12345.6789, -12345.6789,
           1535910708.7767508]
    for ts in tss:
        t = datetime.datetime.utcfromtimestamp(ts)
        ts2 = util.datetime_to_js_timestamp(t)
        assert abs(ts * 1000 - ts2) <= 0.5

    # Check sub-second precision
    ms = 50
    ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 1000*ms)
    assert util.datetime_to_js_timestamp(ts) == ms

    # Check rounding
    ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 500)
    assert util.datetime_to_js_timestamp(ts) == 1
    ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 499)
    assert util.datetime_to_js_timestamp(ts) == 0
示例#9
0
文件: test_run.py 项目: mtreinish/asv
def test_run_build_failure(basic_conf):
    tmpdir, local, conf, machine_file = basic_conf

    conf.matrix = {}

    # Add a commit that fails to build
    dvcs = tools.Git(conf.repo)
    setup_py = join(dvcs.path, 'setup.py')
    with open(setup_py, 'r') as f:
        setup_py_content = f.read()
    with open(setup_py, 'w') as f:
        f.write("assert False")
    dvcs.add(join(dvcs.path, 'setup.py'))
    dvcs.commit("Break setup.py")
    with open(setup_py, 'w') as f:
        f.write(setup_py_content)
    dvcs.add(join(dvcs.path, 'setup.py'))
    dvcs.commit("Fix setup.py")

    # Test running it
    timestamp = util.datetime_to_js_timestamp(datetime.datetime.utcnow())

    bench_name = 'time_secondary.track_value'
    for commit in ['master^!', 'master~1^!']:
        tools.run_asv_with_conf(conf, 'run', commit,
                                '--quick', '--show-stderr',
                                '--bench', bench_name,
                                _machine_file=machine_file)

    # Check results
    hashes = dvcs.get_branch_hashes()
    fn_broken, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
                                    hashes[1][:8] + '-*.json'))
    fn_ok, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
                                hashes[0][:8] + '-*.json'))

    data_broken = util.load_json(fn_broken)
    data_ok = util.load_json(fn_ok)

    for data in (data_broken, data_ok):
        value = dict(zip(data['result_columns'], data['results'][bench_name]))
        assert value['started_at'] >= timestamp
        if data is data_broken:
            assert 'duration' not in value
        else:
            assert value['duration'] >= 0

    assert len(data_broken['results']) == 1
    assert len(data_ok['results']) == 1
    assert data_broken['result_columns'][0] == 'result'
    assert data_ok['result_columns'][0] == 'result'
    assert data_broken['results'][bench_name][0] is None
    assert data_ok['results'][bench_name][0] == [42.0]

    # Check that parameters were also saved
    assert data_broken['params'] == data_ok['params']
示例#10
0
def test_run_build_failure(basic_conf):
    tmpdir, local, conf, machine_file = basic_conf

    conf.matrix = {}

    # Add a commit that fails to build
    dvcs = tools.Git(conf.repo)
    setup_py = join(dvcs.path, 'setup.py')
    with open(setup_py, 'r') as f:
        setup_py_content = f.read()
    with open(setup_py, 'w') as f:
        f.write("assert False")
    dvcs.add(join(dvcs.path, 'setup.py'))
    dvcs.commit("Break setup.py")
    with open(setup_py, 'w') as f:
        f.write(setup_py_content)
    dvcs.add(join(dvcs.path, 'setup.py'))
    dvcs.commit("Fix setup.py")

    # Test running it
    timestamp = util.datetime_to_js_timestamp(datetime.datetime.utcnow())

    bench_name = 'time_secondary.track_value'
    for commit in ['master^!', 'master~1^!']:
        tools.run_asv_with_conf(conf, 'run', commit,
                                '--quick', '--show-stderr',
                                '--bench', bench_name,
                                _machine_file=machine_file)

    # Check results
    hashes = dvcs.get_branch_hashes()
    fn_broken, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
                                    hashes[1][:8] + '-*.json'))
    fn_ok, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
                                hashes[0][:8] + '-*.json'))

    data_broken = util.load_json(fn_broken)
    data_ok = util.load_json(fn_ok)

    for data in (data_broken, data_ok):
        assert data['started_at'][bench_name] >= timestamp
        assert data['ended_at'][bench_name] >= data['started_at'][bench_name]

    assert len(data_broken['results']) == 1
    assert len(data_ok['results']) == 1
    assert data_broken['results'][bench_name] is None
    assert data_ok['results'][bench_name] == 42.0

    # Check that parameters were also saved
    assert data_broken['params'] == data_ok['params']
示例#11
0
def test_run_benchmarks(benchmarks_fixture, tmpdir):
    conf, repo, envs, commit_hash = benchmarks_fixture

    start_timestamp = datetime.datetime.utcnow()

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])

    # Old results to append to
    results = Results.unnamed()
    name = 'time_examples.TimeSuite.time_example_benchmark_1'
    results.add_result(b[name],
                       runner.BenchmarkResult(result=[1],
                                              samples=[[42.0, 24.0]],
                                              number=[1],
                                              errcode=0,
                                              stderr='',
                                              profile=None),
                       record_samples=True)

    # Run
    runner.run_benchmarks(b,
                          envs[0],
                          results=results,
                          profile=True,
                          show_stderr=True,
                          append_samples=True,
                          record_samples=True)
    times = ResultsWrapper(results, b)

    end_timestamp = datetime.datetime.utcnow()

    assert len(times) == len(b)
    assert times[
        'time_examples.TimeSuite.time_example_benchmark_1'].result != [None]
    stats = results.get_result_stats(name, b[name]['params'])
    assert isinstance(stats[0]['std'], float)
    # The exact number of samples may vary if the calibration is not fully accurate
    samples = results.get_result_samples(name, b[name]['params'])
    assert len(samples[0]) >= 4
    # Explicitly provided 'prev_samples` should come first
    assert samples[0][:2] == [42.0, 24.0]
    # Benchmarks that raise exceptions should have a time of "None"
    assert times['time_secondary.TimeSecondary.time_exception'].result == [
        None
    ]
    assert times['subdir.time_subdir.time_foo'].result != [None]
    if not ON_PYPY:
        # XXX: the memory benchmarks don't work on Pypy, since asizeof
        # is CPython-only
        assert times['mem_examples.mem_list'].result[0] > 1000
    assert times['time_secondary.track_value'].result == [42.0]
    assert times['time_secondary.track_value'].profile is not None
    assert isinstance(times['time_examples.time_with_warnings'].stderr,
                      type(''))
    assert times['time_examples.time_with_warnings'].errcode != 0

    assert times['time_examples.TimeWithBadTimer.time_it'].result == [0.0]

    assert times['params_examples.track_param'].params == [[
        "<class 'benchmark.params_examples.ClassOne'>",
        "<class 'benchmark.params_examples.ClassTwo'>"
    ]]
    assert times['params_examples.track_param'].result == [42, 42]

    assert times['params_examples.mem_param'].params == [['10', '20'],
                                                         ['2', '3']]
    assert len(times['params_examples.mem_param'].result) == 2 * 2

    assert times['params_examples.ParamSuite.track_value'].params == [[
        "'a'", "'b'", "'c'"
    ]]
    assert times['params_examples.ParamSuite.track_value'].result == [
        1 + 0, 2 + 0, 3 + 0
    ]

    assert isinstance(times['params_examples.TuningTest.time_it'].result[0],
                      float)
    assert isinstance(times['params_examples.TuningTest.time_it'].result[1],
                      float)

    assert isinstance(times['params_examples.time_skip'].result[0], float)
    assert isinstance(times['params_examples.time_skip'].result[1], float)
    assert util.is_nan(times['params_examples.time_skip'].result[2])

    assert times['peakmem_examples.peakmem_list'].result[0] >= 4 * 2**20

    assert times['cache_examples.ClassLevelSetup.track_example'].result == [
        500
    ]
    assert times['cache_examples.ClassLevelSetup.track_example2'].result == [
        500
    ]

    assert times['cache_examples.track_cache_foo'].result == [42]
    assert times['cache_examples.track_cache_bar'].result == [12]
    assert times['cache_examples.track_my_cache_foo'].result == [0]

    assert times['cache_examples.ClassLevelSetupFail.track_fail'].result == [
        None
    ]
    assert 'raise RuntimeError()' in times[
        'cache_examples.ClassLevelSetupFail.track_fail'].stderr

    assert times[
        'cache_examples.ClassLevelCacheTimeout.track_fail'].result == [None]
    assert times[
        'cache_examples.ClassLevelCacheTimeoutSuccess.track_success'].result == [
            0
        ]

    assert times['cache_examples.time_fail_second_run'].result == [None]
    assert times['cache_examples.time_fail_second_run'].samples == [None]

    profile_path = join(six.text_type(tmpdir), 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value'].profile)
    pstats.Stats(profile_path)

    # Check for running setup on each repeat (one extra run from profile)
    # The output would contain error messages if the asserts in the benchmark fail.
    expected = ["<%d>" % j for j in range(1, 12)]
    assert times['time_examples.TimeWithRepeat.time_it'].stderr.split(
    ) == expected

    # Calibration of iterations should not rerun setup
    expected = (['setup'] * 2, ['setup'] * 3)
    assert times['time_examples.TimeWithRepeatCalibrate.time_it'].stderr.split(
    ) in expected

    # Check tuple-form repeat attribute produced results
    assert 2 <= len(times['time_examples.time_auto_repeat'].samples[0]) <= 4

    # Check run time timestamps
    for name, result in times.items():
        assert result.started_at >= util.datetime_to_js_timestamp(
            start_timestamp)
        assert result.ended_at >= result.started_at
        assert result.ended_at <= util.datetime_to_js_timestamp(end_timestamp)