Esempio n. 1
0
File: tools.py Progetto: philpep/asv
def generate_result_dir(tmpdir, dvcs, values, branches=None):
    result_dir = join(tmpdir, "results")
    os.makedirs(result_dir)
    html_dir = join(tmpdir, "html")
    machine_dir = join(result_dir, "tarzan")
    os.makedirs(machine_dir)

    if branches is None:
        branches = [None]

    conf = config.Config.from_json({
        'results_dir': result_dir,
        'html_dir': html_dir,
        'repo': dvcs.path,
        'project': 'asv',
        'branches': branches or [None],
    })
    repo = get_repo(conf)

    util.write_json(join(machine_dir, "machine.json"), {
        'machine': 'tarzan',
        'version': 1,
    })

    timestamp = datetime.datetime.utcnow()

    benchmark_version = sha256(os.urandom(16)).hexdigest()

    params = None
    param_names = None
    for commit, value in values.items():
        if isinstance(value, dict):
            params = value["params"]
        result = Results({"machine": "tarzan"}, {}, commit,
                         repo.get_date_from_name(commit), "2.7", None)
        value = {
            'result': [value],
            'params': [],
            'started_at': timestamp,
            'ended_at': timestamp,
            'stats': None,
            'samples': None,
            'number': None,
        }
        result.add_result("time_func", value, benchmark_version)
        result.save(result_dir)

    if params:
        param_names = ["param{}".format(k) for k in range(len(params))]

    util.write_json(join(result_dir, "benchmarks.json"), {
        "time_func": {
            "name": "time_func",
            "params": params or [],
            "param_names": param_names or [],
            "version": benchmark_version,
        }
    }, api_version=1)
    return conf
Esempio n. 2
0
File: tools.py Progetto: craig8/asv
def generate_result_dir(tmpdir, dvcs, values, branches=None):
    result_dir = join(tmpdir, "results")
    os.makedirs(result_dir)
    html_dir = join(tmpdir, "html")
    machine_dir = join(result_dir, "tarzan")
    os.makedirs(machine_dir)

    if branches is None:
        branches = [None]

    conf = config.Config.from_json({
        'results_dir': result_dir,
        'html_dir': html_dir,
        'repo': dvcs.path,
        'project': 'asv',
        'branches': branches or [None],
    })
    repo = get_repo(conf)

    util.write_json(join(machine_dir, "machine.json"), {
        'machine': 'tarzan',
    })

    timestamp = datetime.datetime.utcnow()

    params = None
    for commit, value in values.items():
        if isinstance(value, dict):
            params = value["params"]
        result = Results({"machine": "tarzan"}, {}, commit,
                         repo.get_date_from_name(commit), "2.7", None)
        result.add_result("time_func", value, timestamp, timestamp)
        result.save(result_dir)

    util.write_json(join(result_dir, "benchmarks.json"), {
        "time_func": {
            "name": "time_func",
            "params": params or [],
            "param_names": params or [],
        }
    }, api_version=1)
    return conf
Esempio n. 3
0
def generate_result_dir(tmpdir, dvcs, values, branches=None):
    result_dir = join(tmpdir, "results")
    os.makedirs(result_dir)
    html_dir = join(tmpdir, "html")
    machine_dir = join(result_dir, "tarzan")
    os.makedirs(machine_dir)

    if branches is None:
        branches = [None]

    conf = config.Config.from_json({
        'results_dir': result_dir,
        'html_dir': html_dir,
        'repo': dvcs.path,
        'project': 'asv',
        'branches': branches or [None],
    })
    repo = get_repo(conf)

    util.write_json(join(machine_dir, "machine.json"), {
        'machine': 'tarzan',
        'version': 1,
    })

    timestamp = datetime.datetime.utcnow()

    benchmark_version = sha256(os.urandom(16)).hexdigest()

    params = None
    param_names = None
    for commit, value in values.items():
        if isinstance(value, dict):
            params = value["params"]
        result = Results({"machine": "tarzan"}, {}, commit,
                         repo.get_date_from_name(commit), "2.7", None)
        value = {
            'result': [value],
            'params': [],
            'started_at': timestamp,
            'ended_at': timestamp,
            'stats': None,
            'samples': None,
            'number': None,
        }
        result.add_result("time_func", value, benchmark_version)
        result.save(result_dir)

    if params:
        param_names = ["param{}".format(k) for k in range(len(params))]

    util.write_json(join(result_dir, "benchmarks.json"), {
        "time_func": {
            "name": "time_func",
            "params": params or [],
            "param_names": param_names or [],
            "version": benchmark_version,
        }
    },
                    api_version=1)
    return conf
Esempio n. 4
0
File: tools.py Progetto: wrwrwr/asv
def generate_result_dir(tmpdir, dvcs, values, branches=None):
    result_dir = join(tmpdir, "results")
    os.makedirs(result_dir)
    html_dir = join(tmpdir, "html")
    machine_dir = join(result_dir, "tarzan")
    os.makedirs(machine_dir)

    if branches is None:
        branches = [None]

    conf = config.Config.from_json({
        'results_dir': result_dir,
        'html_dir': html_dir,
        'repo': dvcs.path,
        'project': 'asv',
        'branches': branches or [None],
    })
    repo = get_repo(conf)

    util.write_json(join(machine_dir, "machine.json"), {
        'machine': 'tarzan',
        'version': 1,
    })

    timestamp = datetime.datetime.utcnow()

    params = None
    for commit, value in values.items():
        if isinstance(value, dict):
            params = value["params"]
        result = Results({"machine": "tarzan"}, {}, commit,
                         repo.get_date_from_name(commit), "2.7", None)
        result.add_result("time_func", value, timestamp, timestamp)
        result.save(result_dir)

    util.write_json(join(result_dir, "benchmarks.json"), {
        "time_func": {
            "name": "time_func",
            "params": params or [],
            "param_names": params or [],
        }
    },
                    api_version=1)
    return conf
Esempio n. 5
0
def test_skip_param_selection():
    d = {'repo': 'foo'}
    d.update(ASV_CONF_JSON)
    conf = config.Config.from_json(d)

    class DummyEnv(object):
        name = 'env'

    d = [{
        'name': 'test_nonparam',
        'params': [],
        'version': '1'
    }, {
        'name': 'test_param',
        'params': [['1', '2', '3']],
        'param_names': ['n'],
        'version': '1'
    }]

    results = Results.unnamed()
    b = benchmarks.Benchmarks(conf, d,
                              [r'test_nonparam', r'test_param\([23]\)'])

    results.add_result(
        b['test_param'],
        runner.BenchmarkResult(result=[1, 2, 3],
                               samples=[None] * 3,
                               number=[None] * 3,
                               errcode=0,
                               stderr='',
                               profile=None))

    runner.skip_benchmarks(b, DummyEnv(), results)

    assert results._results.get('test_nonparam') == None
    assert results._results['test_param'] == [1, None, None]
Esempio n. 6
0
def generate_result_dir(tmpdir, dvcs, values, branches=None):
    result_dir = join(tmpdir, "results")
    os.makedirs(result_dir)
    html_dir = join(tmpdir, "html")
    machine_dir = join(result_dir, "tarzan")
    os.makedirs(machine_dir)

    if branches is None:
        branches = [None]

    conf = config.Config.from_json({
        'results_dir': result_dir,
        'html_dir': html_dir,
        'repo': dvcs.path,
        'project': 'asv',
        'branches': branches or [None],
    })
    repo = get_repo(conf)

    util.write_json(join(machine_dir, "machine.json"), {
        'machine': 'tarzan',
        'version': 1,
    })

    timestamp = datetime.datetime.utcnow()

    benchmark_version = sha256(os.urandom(16)).hexdigest()

    params = []
    param_names = None
    for commit, value in values.items():
        if isinstance(value, dict):
            params = value["params"]
            value = value["result"]
        else:
            value = [value]
        result = Results({"machine": "tarzan"}, {}, commit,
                         repo.get_date_from_name(commit), "2.7", None, {})
        value = runner.BenchmarkResult(result=value,
                                       samples=[None] * len(value),
                                       number=[None] * len(value),
                                       errcode=0,
                                       stderr='',
                                       profile=None)
        result.add_result(
            {
                "name": "time_func",
                "version": benchmark_version,
                "params": params
            },
            value,
            started_at=timestamp,
            duration=1.0)
        result.save(result_dir)

    if params:
        param_names = ["param{}".format(k) for k in range(len(params))]

    util.write_json(join(result_dir, "benchmarks.json"), {
        "time_func": {
            "name": "time_func",
            "params": params or [],
            "param_names": param_names or [],
            "version": benchmark_version,
        }
    },
                    api_version=2)
    return conf
Esempio n. 7
0
def test_run_benchmarks(benchmarks_fixture, tmpdir):
    conf, repo, envs, commit_hash = benchmarks_fixture

    start_timestamp = datetime.datetime.utcnow()

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])

    # Old results to append to
    results = Results.unnamed()
    name = 'time_examples.TimeSuite.time_example_benchmark_1'
    results.add_result(b[name],
                       runner.BenchmarkResult(result=[1],
                                              samples=[[42.0, 24.0]],
                                              number=[1],
                                              errcode=0,
                                              stderr='',
                                              profile=None),
                       record_samples=True)

    # Run
    runner.run_benchmarks(b,
                          envs[0],
                          results=results,
                          profile=True,
                          show_stderr=True,
                          append_samples=True,
                          record_samples=True)
    times = ResultsWrapper(results, b)

    end_timestamp = datetime.datetime.utcnow()

    assert len(times) == len(b)
    assert times[
        'time_examples.TimeSuite.time_example_benchmark_1'].result != [None]
    stats = results.get_result_stats(name, b[name]['params'])
    assert isinstance(stats[0]['std'], float)
    # The exact number of samples may vary if the calibration is not fully accurate
    samples = results.get_result_samples(name, b[name]['params'])
    assert len(samples[0]) >= 4
    # Explicitly provided 'prev_samples` should come first
    assert samples[0][:2] == [42.0, 24.0]
    # Benchmarks that raise exceptions should have a time of "None"
    assert times['time_secondary.TimeSecondary.time_exception'].result == [
        None
    ]
    assert times['subdir.time_subdir.time_foo'].result != [None]
    if not ON_PYPY:
        # XXX: the memory benchmarks don't work on Pypy, since asizeof
        # is CPython-only
        assert times['mem_examples.mem_list'].result[0] > 1000
    assert times['time_secondary.track_value'].result == [42.0]
    assert times['time_secondary.track_value'].profile is not None
    assert isinstance(times['time_examples.time_with_warnings'].stderr,
                      type(''))
    assert times['time_examples.time_with_warnings'].errcode != 0

    assert times['time_examples.TimeWithBadTimer.time_it'].result == [0.0]

    assert times['params_examples.track_param'].params == [[
        "<class 'benchmark.params_examples.ClassOne'>",
        "<class 'benchmark.params_examples.ClassTwo'>"
    ]]
    assert times['params_examples.track_param'].result == [42, 42]

    assert times['params_examples.mem_param'].params == [['10', '20'],
                                                         ['2', '3']]
    assert len(times['params_examples.mem_param'].result) == 2 * 2

    assert times['params_examples.ParamSuite.track_value'].params == [[
        "'a'", "'b'", "'c'"
    ]]
    assert times['params_examples.ParamSuite.track_value'].result == [
        1 + 0, 2 + 0, 3 + 0
    ]

    assert isinstance(times['params_examples.TuningTest.time_it'].result[0],
                      float)
    assert isinstance(times['params_examples.TuningTest.time_it'].result[1],
                      float)

    assert isinstance(times['params_examples.time_skip'].result[0], float)
    assert isinstance(times['params_examples.time_skip'].result[1], float)
    assert util.is_nan(times['params_examples.time_skip'].result[2])

    assert times['peakmem_examples.peakmem_list'].result[0] >= 4 * 2**20

    assert times['cache_examples.ClassLevelSetup.track_example'].result == [
        500
    ]
    assert times['cache_examples.ClassLevelSetup.track_example2'].result == [
        500
    ]

    assert times['cache_examples.track_cache_foo'].result == [42]
    assert times['cache_examples.track_cache_bar'].result == [12]
    assert times['cache_examples.track_my_cache_foo'].result == [0]

    assert times['cache_examples.ClassLevelSetupFail.track_fail'].result == [
        None
    ]
    assert 'raise RuntimeError()' in times[
        'cache_examples.ClassLevelSetupFail.track_fail'].stderr

    assert times[
        'cache_examples.ClassLevelCacheTimeout.track_fail'].result == [None]
    assert times[
        'cache_examples.ClassLevelCacheTimeoutSuccess.track_success'].result == [
            0
        ]

    assert times['cache_examples.time_fail_second_run'].result == [None]
    assert times['cache_examples.time_fail_second_run'].samples == [None]

    profile_path = join(six.text_type(tmpdir), 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value'].profile)
    pstats.Stats(profile_path)

    # Check for running setup on each repeat (one extra run from profile)
    # The output would contain error messages if the asserts in the benchmark fail.
    expected = ["<%d>" % j for j in range(1, 12)]
    assert times['time_examples.TimeWithRepeat.time_it'].stderr.split(
    ) == expected

    # Calibration of iterations should not rerun setup
    expected = (['setup'] * 2, ['setup'] * 3)
    assert times['time_examples.TimeWithRepeatCalibrate.time_it'].stderr.split(
    ) in expected

    # Check tuple-form repeat attribute produced results
    assert 2 <= len(times['time_examples.time_auto_repeat'].samples[0]) <= 4

    # Check run time timestamps
    for name, result in times.items():
        assert result.started_at >= util.datetime_to_js_timestamp(
            start_timestamp)
        assert result.ended_at >= result.started_at
        assert result.ended_at <= util.datetime_to_js_timestamp(end_timestamp)