Exemplo n.º 1
0
def test_run_append_samples(basic_conf):
    tmpdir, local, conf, machine_file = basic_conf

    # Only one environment
    conf.matrix['asv_dummy_test_package_2'] = conf.matrix['asv_dummy_test_package_2'][:1]

    # Tests multiple calls to "asv run --append-samples"
    def run_it():
        tools.run_asv_with_conf(conf, 'run', "master^!",
                                '--bench', 'time_examples.TimeSuite.time_example_benchmark_1',
                                '--append-samples', '-a', 'repeat=(1, 1, 10.0)', '-a', 'processes=1',
                                '-a', 'number=1', '-a', 'warmup_time=0',
                                _machine_file=machine_file)

    run_it()

    result_dir = join(tmpdir, 'results_workflow', 'orangutan')
    result_fn, = [join(result_dir, fn) for fn in os.listdir(result_dir)
                  if fn != 'machine.json']

    data = util.load_json(result_fn)
    assert data['results']['time_examples.TimeSuite.time_example_benchmark_1']['stats'][0] is not None
    assert len(data['results']['time_examples.TimeSuite.time_example_benchmark_1']['samples'][0]) == 1

    run_it()
    data = util.load_json(result_fn)
    assert len(data['results']['time_examples.TimeSuite.time_example_benchmark_1']['samples'][0]) == 2
Exemplo n.º 2
0
def test_quickstart(tmpdir):
    tmpdir = six.text_type(tmpdir)

    dest = join(tmpdir, "separate")
    os.makedirs(dest)

    tools.run_asv("quickstart", "--no-top-level", "--dest", dest)

    assert isfile(join(dest, "asv.conf.json"))
    assert isfile(join(dest, "benchmarks", "benchmarks.py"))
    conf = util.load_json(join(dest, "asv.conf.json"))
    assert "env_dir" not in conf
    assert "html_dir" not in conf
    assert "results_dir" not in conf

    dest = join(tmpdir, "same")
    os.makedirs(dest)

    try:
        asv.commands.quickstart.raw_input = lambda msg: "y"
        tools.run_asv("quickstart", "--dest", dest)
    finally:
        del asv.commands.quickstart.raw_input

    assert isfile(join(dest, "asv.conf.json"))
    assert isfile(join(dest, "benchmarks", "benchmarks.py"))
    conf = util.load_json(join(dest, "asv.conf.json"))
    assert conf["env_dir"] != "env"
    assert conf["html_dir"] != "html"
    assert conf["results_dir"] != "results"
Exemplo n.º 3
0
def test_presence_checks(tmpdir):
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = ["2.7"]
    conf.matrix = {}
    environments = list(environment.get_environments(conf))

    for env in environments:
        env.create()

        # Check env is recreated when info file is clobbered
        info_fn = os.path.join(env._path, 'asv-env-info.json')
        data = util.load_json(info_fn)
        data['python'] = '3.4'
        data = util.write_json(info_fn, data)
        env._is_setup = False
        env.create()
        data = util.load_json(info_fn)
        assert data['python'] == '2.7'
        env.run(['-c', 'import os'])

        # Check env is recreated if crucial things are missing
        pip_fn = os.path.join(env._path, 'bin', 'pip')
        os.remove(pip_fn)
        env._is_setup = False
        env.create()
        assert os.path.isfile(pip_fn)
        env.run(['-c', 'import os'])
Exemplo n.º 4
0
def test_presence_checks(tmpdir, monkeypatch):
    conf = config.Config()

    if WIN:
        # Tell conda to not use hardlinks: on Windows it's not possible
        # to delete hard links to files in use, which causes problem when
        # trying to cleanup environments during this test
        monkeypatch.setenv(str('CONDA_ALWAYS_COPY'), str('True'))

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}
    environments = list(environment.get_environments(conf, None))

    for env in environments:
        env.create()
        assert env.check_presence()

        # Check env is recreated when info file is clobbered
        info_fn = os.path.join(env._path, 'asv-env-info.json')
        data = util.load_json(info_fn)
        data['python'] = '0'
        data = util.write_json(info_fn, data)
        env._is_setup = False
        env.create()
        data = util.load_json(info_fn)
        assert data['python'] == PYTHON_VER1
        env.run(['-c', 'import os'])

        # Check env is recreated if crucial things are missing
        pip_fns = [os.path.join(env._path, 'bin', 'pip')]
        if WIN:
            pip_fns += [
                os.path.join(env._path, 'bin', 'pip.exe'),
                os.path.join(env._path, 'Scripts', 'pip'),
                os.path.join(env._path, 'Scripts', 'pip.exe')
            ]

        some_removed = False
        for pip_fn in pip_fns:
            if os.path.isfile(pip_fn):
                some_removed = True
                os.remove(pip_fn)
        assert some_removed

        env._is_setup = False
        env.create()
        assert os.path.isfile(pip_fn)
        env.run(['-c', 'import os'])
Exemplo n.º 5
0
def test_run_build_failure(basic_conf):
    tmpdir, local, conf, machine_file = basic_conf

    conf.matrix = {}

    # Add a commit that fails to build
    dvcs = tools.Git(conf.repo)
    setup_py = join(dvcs.path, 'setup.py')
    with open(setup_py, 'r') as f:
        setup_py_content = f.read()
    with open(setup_py, 'w') as f:
        f.write("assert False")
    dvcs.add(join(dvcs.path, 'setup.py'))
    dvcs.commit("Break setup.py")
    with open(setup_py, 'w') as f:
        f.write(setup_py_content)
    dvcs.add(join(dvcs.path, 'setup.py'))
    dvcs.commit("Fix setup.py")

    # Test running it
    timestamp = util.datetime_to_js_timestamp(datetime.datetime.utcnow())

    bench_name = 'time_secondary.track_value'
    for commit in ['master^!', 'master~1^!']:
        tools.run_asv_with_conf(conf, 'run', commit,
                                '--quick', '--show-stderr',
                                '--bench', bench_name,
                                _machine_file=machine_file)

    # Check results
    hashes = dvcs.get_branch_hashes()
    fn_broken, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
                                    hashes[1][:8] + '-*.json'))
    fn_ok, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
                                hashes[0][:8] + '-*.json'))

    data_broken = util.load_json(fn_broken)
    data_ok = util.load_json(fn_ok)

    for data in (data_broken, data_ok):
        assert data['started_at'][bench_name] >= timestamp
        assert data['ended_at'][bench_name] >= data['started_at'][bench_name]

    assert len(data_broken['results']) == 1
    assert len(data_ok['results']) == 1
    assert data_broken['results'][bench_name] is None
    assert data_ok['results'][bench_name] == 42.0

    # Check that parameters were also saved
    assert data_broken['params'] == data_ok['params']
Exemplo n.º 6
0
def test_run_build_failure(basic_conf):
    tmpdir, local, conf, machine_file = basic_conf

    conf.matrix = {}

    # Add a commit that fails to build
    dvcs = tools.Git(conf.repo)
    setup_py = join(dvcs.path, 'setup.py')
    with open(setup_py, 'r') as f:
        setup_py_content = f.read()
    with open(setup_py, 'w') as f:
        f.write("assert False")
    dvcs.add(join(dvcs.path, 'setup.py'))
    dvcs.commit("Break setup.py")
    with open(setup_py, 'w') as f:
        f.write(setup_py_content)
    dvcs.add(join(dvcs.path, 'setup.py'))
    dvcs.commit("Fix setup.py")

    # Test running it
    timestamp = util.datetime_to_js_timestamp(datetime.datetime.utcnow())

    bench_name = 'time_secondary.track_value'
    for commit in ['master^!', 'master~1^!']:
        tools.run_asv_with_conf(conf, 'run', commit,
                                '--quick', '--show-stderr',
                                '--bench', bench_name,
                                _machine_file=machine_file)

    # Check results
    hashes = dvcs.get_branch_hashes()
    fn_broken, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
                                    hashes[1][:8] + '-*.json'))
    fn_ok, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
                                hashes[0][:8] + '-*.json'))

    data_broken = util.load_json(fn_broken)
    data_ok = util.load_json(fn_ok)

    for data in (data_broken, data_ok):
        assert data['started_at'][bench_name] >= timestamp
        assert data['ended_at'][bench_name] >= data['started_at'][bench_name]

    assert len(data_broken['results']) == 1
    assert len(data_ok['results']) == 1
    assert data_broken['results'][bench_name] is None
    assert data_ok['results'][bench_name] == 42.0

    # Check that parameters were also saved
    assert data_broken['params'] == data_ok['params']
Exemplo n.º 7
0
def test_write_load_json(tmpdir):
    data = {
        'a': 1,
        'b': 2,
        'c': 3
    }
    orig_data = dict(data)

    filename = os.path.join(str(tmpdir), 'test.json')

    util.write_json(filename, data)
    data2 = util.load_json(filename)
    assert data == orig_data
    assert data2 == orig_data

    util.write_json(filename, data, 3)
    data2 = util.load_json(filename, 3)
    assert data == orig_data
    assert data2 == orig_data

    # Wrong API version must fail to load
    with pytest.raises(util.UserError):
        util.load_json(filename, 2)
    with pytest.raises(util.UserError):
        util.load_json(filename, 4)
    util.write_json(filename, data)
    with pytest.raises(util.UserError):
        util.load_json(filename, 3)
Exemplo n.º 8
0
def test_write_load_json(tmpdir):
    data = {
        'a': 1,
        'b': 2,
        'c': 3
    }
    orig_data = dict(data)

    filename = os.path.join(six.text_type(tmpdir), 'test.json')

    util.write_json(filename, data)
    data2 = util.load_json(filename)
    assert data == orig_data
    assert data2 == orig_data

    util.write_json(filename, data, 3)
    data2 = util.load_json(filename, 3)
    assert data == orig_data
    assert data2 == orig_data

    # Wrong API version must fail to load
    with pytest.raises(util.UserError):
        util.load_json(filename, 2)
    with pytest.raises(util.UserError):
        util.load_json(filename, 4)
    util.write_json(filename, data)
    with pytest.raises(util.UserError):
        util.load_json(filename, 3)
Exemplo n.º 9
0
def test_update_simple(monkeypatch, generate_result_dir):
    conf, repo, commits = generate_result_dir(5 * [1] + 5 * [10])

    basedir = os.path.abspath(os.path.dirname(conf.results_dir))
    local = os.path.abspath(os.path.dirname(__file__))

    shutil.copyfile(os.path.join(local, 'asv-machine.json'),
                    os.path.join(basedir, 'asv-machine.json'))
    machine_file = 'asv-machine.json'

    conf_values = {}
    for key in ['results_dir', 'html_dir', 'repo', 'project', 'branches']:
        conf_values[key] = getattr(conf, key)

    util.write_json(os.path.join(basedir, 'asv.conf.json'),
                    conf_values,
                    api_version=1)

    # Check renaming of long result files
    machine_dir = os.path.join(basedir, 'results', 'tarzan')

    result_fns = [
        fn for fn in sorted(os.listdir(machine_dir)) if fn != 'machine.json'
    ]
    long_result_fn = 'abbacaca-' + 'a' * 128 + '.json'
    hash_result_fn = ('abbacaca-env-' + hashlib.md5(b'a' * 128).hexdigest() +
                      '.json')

    shutil.copyfile(os.path.join(machine_dir, result_fns[0]),
                    os.path.join(machine_dir, long_result_fn))

    old_env_name = util.load_json(os.path.join(machine_dir,
                                               result_fns[0]))['env_name']

    # Should succeed
    monkeypatch.chdir(basedir)
    tools.run_asv_with_conf(conf, "update", _machine_file=machine_file)

    # Check file rename
    items = [
        fn for fn in sorted(os.listdir(machine_dir)) if fn != 'machine.json'
    ]
    assert long_result_fn.lower() not in [x.lower() for x in items]
    assert hash_result_fn.lower() in [x.lower() for x in items]

    # Check env name is preserved
    new_env_name = util.load_json(os.path.join(machine_dir,
                                               items[0]))['env_name']
    assert old_env_name == new_env_name
Exemplo n.º 10
0
def test_run_append_samples(basic_conf):
    tmpdir, local, conf, machine_file = basic_conf

    # Only one environment
    conf.matrix['asv_dummy_test_package_2'] = conf.matrix[
        'asv_dummy_test_package_2'][:1]

    # Tests multiple calls to "asv run --append-samples"
    def run_it():
        tools.run_asv_with_conf(
            conf,
            'run',
            "master^!",
            '--bench',
            'time_examples.TimeSuite.time_example_benchmark_1',
            '--append-samples',
            '-a',
            'repeat=(1, 1, 10.0)',
            '-a',
            'processes=1',
            '-a',
            'number=1',
            '-a',
            'warmup_time=0',
            _machine_file=machine_file)

    run_it()

    result_dir = join(tmpdir, 'results_workflow', 'orangutan')
    result_fn, = [
        join(result_dir, fn) for fn in os.listdir(result_dir)
        if fn != 'machine.json'
    ]

    data = util.load_json(result_fn)
    value = dict(
        zip(
            data['result_columns'], data['results']
            ['time_examples.TimeSuite.time_example_benchmark_1']))
    assert value['stats_q_25'][0] is not None
    assert len(value['samples'][0]) == 1

    run_it()
    data = util.load_json(result_fn)
    value = dict(
        zip(
            data['result_columns'], data['results']
            ['time_examples.TimeSuite.time_example_benchmark_1']))
    assert len(value['samples'][0]) == 2
Exemplo n.º 11
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = six.text_type(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    stamp2 = datetime.datetime.utcnow()

    r = results.Results({'machine': 'mach'}, {}, 'aaaa',
                        util.datetime_to_timestamp(stamp0), 'py', 'env')
    value = runner.BenchmarkResult(result=[42],
                                   params=[],
                                   stats=None,
                                   samples=None,
                                   started_at=stamp1,
                                   ended_at=stamp2,
                                   profile=None,
                                   errcode=0,
                                   stderr='')
    r.add_result('some_benchmark', value, "some version")
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp(
        stamp1)
    assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp(
        stamp2)
Exemplo n.º 12
0
 def get_results():
     results = util.load_json(glob.glob(join(
         tmpdir, 'results_workflow', 'orangutan', '*-*.json'))[0])
     # replacing NaN by 'n/a' make assertions easier
     return ['n/a' if util.is_nan(item) else item
             for item in results['results'][
                 'params_examples.track_param_selection']['result']]
Exemplo n.º 13
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = six.text_type(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    stamp2 = datetime.datetime.utcnow()

    r = results.Results({'machine': 'mach'}, {}, 'aaaa', util.datetime_to_timestamp(stamp0),
                        'py', 'env')
    value = {
        'result': [42],
        'params': [],
        'stats': None,
        'samples': None,
        'number': None,
        'started_at': stamp1,
        'ended_at': stamp2
    }
    r.add_result('some_benchmark', value, "some version")
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp1)
    assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp2)
Exemplo n.º 14
0
def test_regression_non_monotonic(dvcs_type, tmpdir):
    tmpdir = six.text_type(tmpdir)
    now = datetime.datetime.now()

    dates = [now + datetime.timedelta(days=i) for i in range(5)
             ] + [now - datetime.timedelta(days=i) for i in range(5)]
    # last commit in the past
    dates[-1] = now - datetime.timedelta(days=1)

    dvcs = tools.generate_repo_from_ops(tmpdir, dvcs_type,
                                        [("commit", i, d)
                                         for i, d in enumerate(dates)])
    commits = list(reversed(dvcs.get_branch_hashes()))
    commit_values = {}
    for commit, value in zip(commits, 5 * [1] + 5 * [2]):
        commit_values[commit] = value
    conf = tools.generate_result_dir(tmpdir, dvcs, commit_values)
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {
        'regressions': [[
            'time_func',
            _graph_path(dvcs_type), {}, None, 2.0, 1.0, [[None, 5, 1.0, 2.0]]
        ]]
    }
    assert regressions == expected
Exemplo n.º 15
0
def locked_cache_dir(config, cache_key, timeout=900, tag=None):
    if LockFile is DummyLock:
        cache_key = cache_key + os.environ.get('PYTEST_XDIST_WORKER', '')

    base_dir = config.cache.makedir(cache_key)

    lockfile = join(six.text_type(base_dir), 'lock')
    cache_dir = join(six.text_type(base_dir), 'cache')

    lock = LockFile(lockfile)
    lock.acquire(timeout=timeout)
    try:
        # Clear cache dir contents if it was generated with different
        # asv version
        tag_fn = join(six.text_type(base_dir), 'tag.json')
        tag_content = [asv.__version__, repr(tag)]
        if os.path.isdir(cache_dir):
            try:
                if util.load_json(tag_fn) != tag_content:
                    raise ValueError()
            except (IOError, ValueError, util.UserError):
                shutil.rmtree(cache_dir)

        if not os.path.isdir(cache_dir):
            os.makedirs(cache_dir)

        yield cache_dir

        util.write_json(tag_fn, tag_content)
    finally:
        lock.release()
Exemplo n.º 16
0
def test_run_with_repo_subdir(basic_conf_with_subdir):
    """
    Check 'asv run' with the Python project inside a subdirectory.
    """
    tmpdir, local, conf, machine_file = basic_conf_with_subdir

    conf.matrix = {}

    # This benchmark imports the project under test (asv_test_repo)
    bench_name = 'params_examples.track_find_test'
    # Test with a single changeset
    tools.run_asv_with_conf(conf,
                            'run',
                            'master^!',
                            '--quick',
                            '--show-stderr',
                            '--bench',
                            bench_name,
                            _machine_file=machine_file)

    # Check it ran ok
    fn_results, = glob.glob(
        join(tmpdir, 'results_workflow', 'orangutan',
             '*-*.json'))  # avoid machine.json
    data = util.load_json(fn_results)
    assert data['results'][bench_name] == {
        'params': [['1', '2']],
        'result': [6, 6]
    }
Exemplo n.º 17
0
def locked_cache_dir(config, cache_key, timeout=900, tag=None):
    base_dir = config.cache.makedir(cache_key)

    lockfile = join(six.text_type(base_dir), 'lock')
    cache_dir = join(six.text_type(base_dir), 'cache')

    lock = FileLock(lockfile)
    lock.acquire(timeout=timeout)
    try:
        # Clear cache dir contents if it was generated with different
        # asv version
        tag_fn = join(six.text_type(base_dir), 'tag.json')
        tag_content = [asv.__version__, repr(tag)]
        if os.path.isdir(cache_dir):
            try:
                if util.load_json(tag_fn) != tag_content:
                    raise ValueError()
            except (IOError, ValueError, util.UserError):
                shutil.rmtree(cache_dir)

        if not os.path.isdir(cache_dir):
            os.makedirs(cache_dir)

        yield cache_dir

        util.write_json(tag_fn, tag_content)
    finally:
        lock.release()
Exemplo n.º 18
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = six.text_type(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    stamp2 = datetime.datetime.utcnow()

    r = results.Results({'machine': 'mach'}, {}, 'aaaa',
                        util.datetime_to_timestamp(stamp0), 'py', 'env')
    value = {
        'result': [42],
        'params': [],
        'stats': None,
        'samples': None,
        'number': None,
        'started_at': stamp1,
        'ended_at': stamp2
    }
    r.add_result('some_benchmark', value)
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp(
        stamp1)
    assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp(
        stamp2)
Exemplo n.º 19
0
def test_regression_parameterized(generate_result_dir):
    before = {"params": [["a", "b", "c", "d"]], "result": [5, 1, 1, 10]}
    after = {"params": [["a", "b", "c", "d"]], "result": [6, 1, 10, 1]}
    conf, repo, commits = generate_result_dir(5 * [before] + 5 * [after])
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {
        'regressions': [[
            'time_func(a)',
            _graph_path(repo.dvcs),
            {},
            0,
            6.0,
            5.0,
            [[None, 5, 5.0, 6.0]],
        ],
                        [
                            'time_func(c)',
                            _graph_path(repo.dvcs),
                            {},
                            2,
                            10.0,
                            1.0,
                            [[None, 5, 1.0, 10.0]],
                        ]]
    }
    assert regressions == expected
Exemplo n.º 20
0
def test_cpu_affinity(basic_conf):
    tmpdir, local, conf, machine_file = basic_conf

    # Only one environment
    conf.matrix = {}

    # Tests multiple calls to "asv run --append-samples"
    tools.run_asv_with_conf(conf,
                            'run',
                            "master^!",
                            '--bench',
                            'time_examples.TimeSuite.time_example_benchmark_1',
                            '--cpu-affinity=0',
                            '-a',
                            'repeat=(1, 1, 10.0)',
                            '-a',
                            'processes=1',
                            '-a',
                            'number=1',
                            '-a',
                            'warmup_time=0',
                            _machine_file=machine_file)

    # Check run produced a result
    result_dir = join(tmpdir, 'results_workflow', 'orangutan')
    result_fn, = [
        join(result_dir, fn) for fn in os.listdir(result_dir)
        if fn != 'machine.json'
    ]
    data = util.load_json(result_fn)
    assert data['results']['time_examples.TimeSuite.time_example_benchmark_1']
Exemplo n.º 21
0
 def get_results():
     results = util.load_json(glob.glob(join(
         tmpdir, 'results_workflow', 'orangutan', '*-*.json'))[0])
     # replacing NaN by 'n/a' make assertions easier
     return ['n/a' if util.is_nan(item) else item
             for item in results['results'][
                 'params_examples.track_param_selection']['result']]
Exemplo n.º 22
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = six.text_type(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    stamp2 = datetime.datetime.utcnow()

    r = results.Results({'machine': 'mach'}, {}, 'aaaa', util.datetime_to_timestamp(stamp0),
                        'py', 'env')
    value = runner.BenchmarkResult(
        result=[42],
        samples=[None],
        number=[None],
        profile=None,
        errcode=0,
        stderr=''
    )
    benchmark = {'name': 'some_benchmark', 'version': 'some version', 'params': []}
    r.add_result(benchmark, value, started_at=stamp1, ended_at=stamp2)
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp1)
    assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp2)
Exemplo n.º 23
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = six.text_type(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    duration = 1.5

    r = results.Results({'machine': 'mach'}, {}, 'aaaa',
                        util.datetime_to_timestamp(stamp0), 'py', 'env', {})
    value = runner.BenchmarkResult(result=[42],
                                   samples=[None],
                                   number=[None],
                                   profile=None,
                                   errcode=0,
                                   stderr='')
    benchmark = {
        'name': 'some_benchmark',
        'version': 'some version',
        'params': []
    }
    r.add_result(benchmark, value, started_at=stamp1, duration=duration)
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    keys = r['result_columns']
    values = dict(zip(keys, r['results']['some_benchmark']))
    assert values['started_at'] == util.datetime_to_js_timestamp(stamp1)
    assert values['duration'] == duration
Exemplo n.º 24
0
def test_json_non_ascii(tmpdir):
    non_ascii_data = [{'😼': '難', 'ä': 3}]

    fn = os.path.join(str(tmpdir), "nonascii.json")
    util.write_json(fn, non_ascii_data)
    data = util.load_json(fn)

    assert data == non_ascii_data
Exemplo n.º 25
0
def test_json_non_ascii(tmpdir):
    non_ascii_data = [{'😼': '難', 'ä': 3}]

    fn = os.path.join(str(tmpdir), "nonascii.json")
    util.write_json(fn, non_ascii_data)
    data = util.load_json(fn)

    assert data == non_ascii_data
Exemplo n.º 26
0
def test_regression_double(generate_result_dir):
    conf, repo, commits = generate_result_dir(5 * [1] + 5 * [10] + 5 * [15])
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {"regressions": [["time_func", _graph_path(repo.dvcs), {}, None, [
        [[None, 5, 1.0, 10.0], [None, 10, 10.0, 15.0]], 15.0, 1.0,
    ]]]}
    assert regressions == expected
Exemplo n.º 27
0
def test_regression_double(generate_result_dir):
    conf, repo, commits = generate_result_dir(5 * [1] + 5 * [10] + 5 * [15])
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {"regressions": [["time_func", _graph_path(repo.dvcs), {}, None, 15.0, 1.0,
                [[None, 5, 1.0, 10.0], [None, 10, 10.0, 15.0]],
    ]]}
    assert regressions == expected
Exemplo n.º 28
0
def test_presence_checks(tmpdir):
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}
    environments = list(environment.get_environments(conf, None))

    for env in environments:
        env.create()
        assert env.check_presence()

        # Check env is recreated when info file is clobbered
        info_fn = os.path.join(env._path, 'asv-env-info.json')
        data = util.load_json(info_fn)
        data['python'] = '0'
        data = util.write_json(info_fn, data)
        env._is_setup = False
        env.create()
        data = util.load_json(info_fn)
        assert data['python'] == PYTHON_VER1
        env.run(['-c', 'import os'])

        # Check env is recreated if crucial things are missing
        pip_fns = [
            os.path.join(env._path, 'bin', 'pip')
        ]
        if WIN:
            pip_fns += [
                os.path.join(env._path, 'bin', 'pip.exe'),
                os.path.join(env._path, 'Scripts', 'pip'),
                os.path.join(env._path, 'Scripts', 'pip.exe')
            ]

        some_removed = False
        for pip_fn in pip_fns:
            if os.path.isfile(pip_fn):
                some_removed = True
                os.remove(pip_fn)
        assert some_removed

        env._is_setup = False
        env.create()
        assert os.path.isfile(pip_fn)
        env.run(['-c', 'import os'])
Exemplo n.º 29
0
def test_presence_checks(tmpdir):
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = ["2.7"]
    conf.matrix = {}
    environments = list(environment.get_environments(conf))

    for env in environments:
        env.create()
        assert env.check_presence()

        # Check env is recreated when info file is clobbered
        info_fn = os.path.join(env._path, 'asv-env-info.json')
        data = util.load_json(info_fn)
        data['python'] = '3.4'
        data = util.write_json(info_fn, data)
        env._is_setup = False
        env.create()
        data = util.load_json(info_fn)
        assert data['python'] == '2.7'
        env.run(['-c', 'import os'])

        # Check env is recreated if crucial things are missing
        pip_fns = [
            os.path.join(env._path, 'bin', 'pip')
        ]
        if WIN:
            pip_fns += [
                os.path.join(env._path, 'bin', 'pip.exe'),
                os.path.join(env._path, 'Scripts', 'pip'),
                os.path.join(env._path, 'Scripts', 'pip.exe')
            ]

        some_removed = False
        for pip_fn in pip_fns:
            if os.path.isfile(pip_fn):
                some_removed = True
                os.remove(pip_fn)
        assert some_removed

        env._is_setup = False
        env.create()
        assert os.path.isfile(pip_fn)
        env.run(['-c', 'import os'])
Exemplo n.º 30
0
    def check_env_matrix(env_build, env_nobuild):
        conf.matrix = {"env": env_build, "env_nobuild": env_nobuild}

        tools.run_asv_with_conf(conf, 'run', "master^!",
                                '--bench', 'time_secondary.track_environment_value',
                                _machine_file=machine_file)

        # Check run produced a result
        result_dir = join(tmpdir, 'results_workflow', 'orangutan')

        result_fn1, = glob.glob(result_dir + '/*-SOME_TEST_VAR1.json')
        result_fn2, = glob.glob(result_dir + '/*-SOME_TEST_VAR2.json')

        data = util.load_json(result_fn1)
        assert data['results']['time_secondary.track_environment_value'] == 1

        data = util.load_json(result_fn2)
        assert data['results']['time_secondary.track_environment_value'] == 2
Exemplo n.º 31
0
def test_regression_threshold(generate_result_dir):
    conf, repo, commits = generate_result_dir(5 * [1.0] + 5 * [1.1] + 5 * [2.0])

    conf.regressions_thresholds = {'.*': 0}
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {"regressions": [["time_func", _graph_path(repo.dvcs), {}, None, [
        [[None, 5, 1.0, 1.1], [None, 10, 1.1, 2.0]], 2.0, 1.0,
    ]]]}
    assert regressions == expected

    conf.regressions_thresholds = {'.*': 0, 'time_func.*': 0.2}
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {"regressions": [["time_func", _graph_path(repo.dvcs), {}, None, [
        [[None, 10, 1.1, 2.0]], 2.0, 1.0,
    ]]]}
    assert regressions == expected
Exemplo n.º 32
0
def test_publish_range_spec(generate_result_dir):
    conf, repo, commits = generate_result_dir(5 * [1])
    for range_spec, expected in (
        ([commits[0], commits[-1]], set([commits[0], commits[-1]])),
        ('HEAD~2..HEAD' if repo.dvcs == 'git' else '.~1:', set(commits[-2:])),
    ):
        tools.run_asv_with_conf(conf, "publish", range_spec)
        data = util.load_json(join(conf.html_dir, 'index.json'))
        assert set(data['revision_to_hash'].values()) == expected
Exemplo n.º 33
0
def test_regression_threshold(generate_result_dir):
    conf, repo, commits = generate_result_dir(5 * [1.0] + 5 * [1.1] + 5 * [2.0])

    conf.regressions_thresholds = {'.*': 0}
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {"regressions": [["time_func", _graph_path(repo.dvcs), {}, None,
                                 2.0, 1.0, [[None, 5, 1.0, 1.1], [None, 10, 1.1, 2.0]]]]}

    assert regressions == expected

    conf.regressions_thresholds = {'.*': 0, 'time_func.*': 0.2}
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {"regressions": [["time_func", _graph_path(repo.dvcs), {}, None, 2.0, 1.0,
                                 [[None, 10, 1.1, 2.0]]]]}

    assert regressions == expected
Exemplo n.º 34
0
def test_publish_range_spec(generate_result_dir):
    conf, repo, commits = generate_result_dir(5 * [1])
    for range_spec, expected in (
        ([commits[0], commits[-1]], set([commits[0], commits[-1]])),
        ('HEAD~2..HEAD' if repo.dvcs == 'git' else '.~1:',
            set(commits[-2:])),
    ):
        tools.run_asv_with_conf(conf, "publish", range_spec)
        data = util.load_json(join(conf.html_dir, 'index.json'))
        assert set(data['revision_to_hash'].values()) == expected
Exemplo n.º 35
0
 def check_file(branch):
     fn = join(tmpdir, 'html', 'graphs', 'Cython', 'arch-x86_64', 'branch-' + branch,
               'cpu-Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
               'machine-cheetah', 'numpy-1.8', 'os-Linux (Fedora 20)', 'python-2.7', 'ram-8.2G',
               'time_coordinates.time_latitude.json')
     data = util.load_json(fn, cleanup=False)
     if branch == 'master':
         # we set all dates positive for master above
         assert all(x[0] >= 0 for x in data)
     else:
         # we set some dates negative for some-branch above
         assert any(x[0] < 0 for x in data) and any(x[0] >= 0 for x in data)
Exemplo n.º 36
0
def test_regression_multiple_branches(dvcs_type, tmpdir):
    tmpdir = six.text_type(tmpdir)
    if dvcs_type == "git":
        master = "master"
    elif dvcs_type == "hg":
        master = "default"
    dvcs = tools.generate_repo_from_ops(
        tmpdir,
        dvcs_type,
        [
            ("commit", 1),
            ("checkout", "stable", master),
            ("commit", 1),
            ("checkout", master),
        ] + 4 * [
            ("commit", 1),
            ("checkout", "stable"),
            ("commit", 1),
            ("checkout", master),
        ] + 5 * [
            ("commit", 1),
            ("checkout", "stable"),
            ("commit", 2),
            ("checkout", master),
        ],
    )
    commit_values = {}
    branches = dict((branch, list(reversed(dvcs.get_branch_hashes(branch))))
                    for branch in (master, "stable"))
    for branch, values in (
        (master, 10 * [1]),
        ("stable", 5 * [1] + 5 * [2]),
    ):
        for commit, value in zip(branches[branch], values):
            commit_values[commit] = value
    conf = tools.generate_result_dir(tmpdir, dvcs, commit_values)
    conf.branches = [master, "stable"]
    tools.run_asv_with_conf(conf, "publish")
    repo = get_repo(conf)
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    graph_path = join('graphs', 'branch-stable', 'machine-tarzan',
                      'time_func.json')
    # Regression occur on 5th commit of stable branch
    revision = repo.get_revisions(commit_values.keys())[branches["stable"][5]]
    expected = {
        'regressions': [[
            'time_func', graph_path, {
                'branch': 'stable'
            }, None, 2.0, 1.0, [[None, revision, 1.0, 2.0]]
        ]]
    }
    assert regressions == expected
Exemplo n.º 37
0
def test_regression_first_commits(generate_result_dir):
    conf, repo, commits = generate_result_dir(5 * [1] + 10 * [10])
    # Ignore before 5th commit
    conf.regressions_first_commits = {"^time_*": commits[5]}
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    assert regressions == {"regressions": []}

    # Ignore all
    conf.regressions_first_commits = {"^time_*": None}
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    assert regressions == {"regressions": []}

    # Ignore before 2th commit (-> regression not ignored)
    conf.regressions_first_commits = {"^time_*": commits[2]}
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {"regressions": [["time_func", _graph_path(repo.dvcs), {}, None, [
        [[None, 5, 1.0, 10.0]], 10.0, 1.0,
    ]]]}
    assert regressions == expected
Exemplo n.º 38
0
def test_regression_first_commits(generate_result_dir):
    conf, repo, commits = generate_result_dir(5 * [1] + 10 * [10])
    # Ignore before 5th commit
    conf.regressions_first_commits = {"^time_*": commits[5]}
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    assert regressions == {"regressions": []}

    # Ignore all
    conf.regressions_first_commits = {"^time_*": None}
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    assert regressions == {"regressions": []}

    # Ignore before 2th commit (-> regression not ignored)
    conf.regressions_first_commits = {"^time_*": commits[2]}
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {"regressions": [["time_func", _graph_path(repo.dvcs), {}, None, 10.0, 1.0,
                                 [[None, 5, 1.0, 10.0]]]]
                }
    assert regressions == expected
Exemplo n.º 39
0
 def check_file(branch):
     fn = join(tmpdir, 'html', 'graphs', 'Cython', 'arch-x86_64',
               'branch-' + branch,
               'cpu-Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
               'machine-cheetah', 'numpy-1.8', 'os-Linux (Fedora 20)',
               'python-2.7', 'ram-8.2G',
               'time_coordinates.time_latitude.json')
     data = util.load_json(fn, cleanup=False)
     if branch == 'master':
         # we set all dates positive for master above
         assert all(x[0] >= 0 for x in data)
     else:
         # we set some dates negative for some-branch above
         assert any(x[0] < 0 for x in data) and any(x[0] >= 0 for x in data)
Exemplo n.º 40
0
def test_json_timestamp(tmpdir):
    # Check that per-benchmark timestamps are saved as JS timestamps in the result file
    tmpdir = six.text_type(tmpdir)

    stamp0 = datetime.datetime(1970, 1, 1)
    stamp1 = datetime.datetime(1971, 1, 1)
    stamp2 = datetime.datetime.utcnow()

    r = results.Results({'machine': 'mach'}, {}, 'aaaa', util.datetime_to_timestamp(stamp0),
                        'py', 'env')
    r.add_result('some_benchmark', 42, stamp1, stamp2)
    r.save(tmpdir)

    r = util.load_json(join(tmpdir, 'mach', 'aaaa-env.json'))
    assert r['started_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp1)
    assert r['ended_at']['some_benchmark'] == util.datetime_to_js_timestamp(stamp2)
Exemplo n.º 41
0
    def check_file(branch, cython):
        fn = join(tmpdir, 'html', 'graphs', cython, 'arch-x86_64', 'branch-' + branch,
                  'cpu-Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
                  'machine-cheetah', 'numpy-1.8', 'os-Linux (Fedora 20)', 'python-2.7', 'ram-8.2G',
                  'time_coordinates.time_latitude.json')
        data = util.load_json(fn, cleanup=False)
        data_commits = [revision_to_hash[x[0]] for x in data]
        if branch == "master":
            assert all(c in master_commits for c in data_commits)
        else:
            # Must contains commits from some-branch
            assert any(c in only_branch for c in data_commits)
            # And commits from master
            assert any(c in master_commits for c in data_commits)

        # Check that revisions are strictly increasing
        assert all(x[0] < y[0] for x, y in zip(data, data[1:]))
Exemplo n.º 42
0
def test_regression_multiple_branches(dvcs_type, tmpdir):
    tmpdir = six.text_type(tmpdir)
    if dvcs_type == "git":
        master = "master"
    elif dvcs_type == "hg":
        master = "default"
    dvcs = tools.generate_repo_from_ops(
        tmpdir, dvcs_type, [
            ("commit", 1),
            ("checkout", "stable", master),
            ("commit", 1),
            ("checkout", master),
        ] + 4 * [
            ("commit", 1),
            ("checkout", "stable"),
            ("commit", 1),
            ("checkout", master),
        ] + 5 * [
            ("commit", 1),
            ("checkout", "stable"),
            ("commit", 2),
            ("checkout", master),
        ],
    )
    commit_values = {}
    branches = dict(
        (branch, list(reversed(dvcs.get_branch_hashes(branch))))
        for branch in (master, "stable")
    )
    for branch, values in (
        (master, 10 * [1]),
        ("stable", 5 * [1] + 5 * [2]),
    ):
        for commit, value in zip(branches[branch], values):
            commit_values[commit] = value
    conf = tools.generate_result_dir(tmpdir, dvcs, commit_values)
    conf.branches = [master, "stable"]
    tools.run_asv_with_conf(conf, "publish")
    repo = get_repo(conf)
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    graph_path = join('graphs', 'branch-stable', 'machine-tarzan', 'time_func.json')
    # Regression occur on 5th commit of stable branch
    revision = repo.get_revisions(commit_values.keys())[branches["stable"][5]]
    expected = {'regressions': [['time_func', graph_path, {'branch': 'stable'}, None,
                                 [[[None, revision, 1.0, 2.0]], 2.0, 1.0]]]}
    assert regressions == expected
Exemplo n.º 43
0
    def check_file(branch, cython):
        fn = join(tmpdir, 'html', 'graphs', cython, 'arch-x86_64', 'branch-' + branch,
                  'cpu-Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
                  'machine-cheetah', 'numpy-1.8', 'os-Linux (Fedora 20)', 'python-2.7', 'ram-8.2G',
                  'time_coordinates.time_latitude.json')
        data = util.load_json(fn)
        data_commits = [revision_to_hash[x[0]] for x in data]
        if branch == "master":
            assert all(c in master_commits for c in data_commits)
        else:
            # Must contains commits from some-branch
            assert any(c in only_branch for c in data_commits)
            # And commits from master
            assert any(c in master_commits for c in data_commits)

        # Check that revisions are strictly increasing
        assert all(x[0] < y[0] for x, y in zip(data, data[1:]))
Exemplo n.º 44
0
def test_regression_non_monotonic(dvcs_type, tmpdir):
    tmpdir = six.text_type(tmpdir)
    now = datetime.datetime.now()

    dates = [now + datetime.timedelta(days=i) for i in range(5)] + [now - datetime.timedelta(days=i) for i in range(5)]
    # last commit in the past
    dates[-1] = now - datetime.timedelta(days=1)

    dvcs = tools.generate_repo_from_ops(tmpdir, dvcs_type, [("commit", i, d) for i, d in enumerate(dates)])
    commits = list(reversed(dvcs.get_branch_hashes()))
    commit_values = {}
    for commit, value in zip(commits, 5 * [1] + 5 * [2]):
        commit_values[commit] = value
    conf = tools.generate_result_dir(tmpdir, dvcs, commit_values)
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {'regressions': [['time_func', _graph_path(dvcs_type), {}, None,
                                 [[[None, 5, 1.0, 2.0]], 2.0, 1.0]]]}
    assert regressions == expected
Exemplo n.º 45
0
def test_compare_name_lookup(dvcs_type, capsys, tmpdir, example_results):
    tmpdir = str(tmpdir)
    os.chdir(tmpdir)

    repo = tools.generate_test_repo(tmpdir, dvcs_type=dvcs_type)
    branch_name = 'master' if dvcs_type == 'git' else 'default'
    commit_hash = repo.get_branch_hashes(branch_name)[0]

    result_dir = os.path.join(tmpdir, 'results')

    src = os.path.join(example_results, 'cheetah')
    dst = os.path.join(result_dir, 'cheetah')
    os.makedirs(dst)

    for fn in ['feea15ca-py2.7-Cython-numpy1.8.json', 'machine.json']:
        shutil.copyfile(os.path.join(src, fn), os.path.join(dst, fn))

    shutil.copyfile(os.path.join(example_results, 'benchmarks.json'),
                    os.path.join(result_dir, 'benchmarks.json'))

    # Copy to different commit
    fn_1 = os.path.join(dst, 'feea15ca-py2.7-Cython-numpy1.8.json')
    fn_2 = os.path.join(dst, commit_hash[:8] + '-py2.7-Cython-numpy1.8.json')
    data = util.load_json(fn_1)
    data['commit_hash'] = commit_hash
    util.write_json(fn_2, data)

    conf = config.Config.from_json({
        'results_dir': result_dir,
        'repo': repo.path,
        'project': 'asv',
        'environment_type': "shouldn't matter what"
    })

    # Lookup with symbolic name
    tools.run_asv_with_conf(conf, 'compare', branch_name, 'feea15ca',
                            '--machine=cheetah', '--factor=2',
                            '--environment=py2.7-Cython-numpy1.8',
                            '--only-changed')

    # Nothing should be printed since no results were changed
    text, err = capsys.readouterr()
    assert text.strip() == ''
Exemplo n.º 46
0
def test_regression_parameterized(generate_result_dir):
    before = {"params": [["a", "b", "c", "d"]], "result": [5, 1, 1, 10]}
    after = {"params": [["a", "b", "c", "d"]], "result": [6, 1, 10, 1]}
    conf, repo, commits = generate_result_dir(5 * [before] + 5 * [after])
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {'regressions': [[
        'time_func(a)',
        _graph_path(repo.dvcs),
        {},
        0,
        [[[None, 5, 5.0, 6.0]], 6.0, 5.0],
    ], [
        'time_func(c)',
        _graph_path(repo.dvcs),
        {},
        2,
        [[[None, 5, 1.0, 10.0]], 10.0, 1.0],
    ]]}
    assert regressions == expected
Exemplo n.º 47
0
def test_cpu_affinity(basic_conf):
    tmpdir, local, conf, machine_file = basic_conf

    # Only one environment
    conf.matrix = {}

    # Tests multiple calls to "asv run --append-samples"
    tools.run_asv_with_conf(conf, 'run', "master^!",
                            '--bench', 'time_examples.TimeSuite.time_example_benchmark_1',
                            '--cpu-affinity=0', '-a', 'repeat=(1, 1, 10.0)', '-a', 'processes=1',
                            '-a', 'number=1', '-a', 'warmup_time=0',
                            _machine_file=machine_file)


    # Check run produced a result
    result_dir = join(tmpdir, 'results_workflow', 'orangutan')
    result_fn, = [join(result_dir, fn) for fn in os.listdir(result_dir)
                  if fn != 'machine.json']
    data = util.load_json(result_fn)
    assert data['results']['time_examples.TimeSuite.time_example_benchmark_1']
Exemplo n.º 48
0
def test_compare_name_lookup(dvcs_type, capsys, tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    repo = tools.generate_test_repo(tmpdir, dvcs_type=dvcs_type)
    branch_name = 'master' if dvcs_type == 'git' else 'default'
    commit_hash = repo.get_branch_hashes(branch_name)[0]

    result_dir = os.path.join(tmpdir, 'results')

    src = os.path.join(RESULT_DIR, 'cheetah')
    dst = os.path.join(result_dir, 'cheetah')
    os.makedirs(dst)

    for fn in ['feea15ca-py2.7-Cython-numpy1.8.json', 'machine.json']:
        shutil.copyfile(os.path.join(src, fn), os.path.join(dst, fn))

    shutil.copyfile(os.path.join(RESULT_DIR, 'benchmarks.json'),
                    os.path.join(result_dir, 'benchmarks.json'))

    # Copy to different commit
    fn_1 = os.path.join(dst, 'feea15ca-py2.7-Cython-numpy1.8.json')
    fn_2 = os.path.join(dst, commit_hash[:8] + '-py2.7-Cython-numpy1.8.json')
    data = util.load_json(fn_1)
    data['commit_hash'] = commit_hash
    util.write_json(fn_2, data)

    conf = config.Config.from_json(
        {'results_dir': result_dir,
         'repo': repo.path,
         'project': 'asv',
         'environment_type': "shouldn't matter what"})

    # Lookup with symbolic name
    tools.run_asv_with_conf(conf, 'compare', branch_name, 'feea15ca', '--machine=cheetah',
                            '--factor=2', '--environment=py2.7-Cython-numpy1.8',
                            '--only-changed')

    # Nothing should be printed since no results were changed
    text, err = capsys.readouterr()
    assert text.strip() == ''
Exemplo n.º 49
0
def test_run_with_repo_subdir(basic_conf_with_subdir):
    """
    Check 'asv run' with the Python project inside a subdirectory.
    """
    tmpdir, local, conf, machine_file = basic_conf_with_subdir

    conf.matrix = {}

    # This benchmark imports the project under test (asv_test_repo)
    bench_name = 'params_examples.track_find_test'
    # Test with a single changeset
    tools.run_asv_with_conf(conf, 'run', 'master^!',
                            '--quick', '--show-stderr',
                            '--bench', bench_name,
                            _machine_file=machine_file)

    # Check it ran ok
    fn_results, = glob.glob(join(tmpdir, 'results_workflow', 'orangutan',
                                 '*-*.json'))  # avoid machine.json
    data = util.load_json(fn_results)
    assert data['results'][bench_name] == {'params': [['1', '2']],
                                           'result': [6, 6]}
Exemplo n.º 50
0
def test_publish(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    result_dir = join(tmpdir, 'sample_results')
    os.makedirs(result_dir)
    os.makedirs(join(result_dir, 'cheetah'))

    # Synthesize history with two branches that both have commits
    result_files = [fn for fn in os.listdir(join(RESULT_DIR, 'cheetah'))
                    if fn.endswith('.json') and fn != 'machine.json']
    result_files.sort()
    master_values = list(range(len(result_files)*2//3))
    branch_values = list(range(len(master_values), len(result_files)))
    dvcs = tools.generate_test_repo(tmpdir, master_values, 'git',
                                    [('master~6', 'some-branch', branch_values)])

    # Copy and modify result files, fixing commit hashes and setting result
    # dates to distinguish the two branches
    master_commits = dvcs.get_branch_hashes('master')
    only_branch = [x for x in dvcs.get_branch_hashes('some-branch')
                   if x not in master_commits]
    commits = master_commits + only_branch
    for k, item in enumerate(zip(result_files, commits)):
        fn, commit = item
        src = join(RESULT_DIR, 'cheetah', fn)
        dst = join(result_dir, 'cheetah', commit[:8] + fn[8:])
        data = util.load_json(src, cleanup=False)
        data['commit_hash'] = commit
        util.write_json(dst, data)

    shutil.copyfile(join(RESULT_DIR, 'benchmarks.json'),
                    join(result_dir, 'benchmarks.json'))
    shutil.copyfile(join(RESULT_DIR, 'cheetah', 'machine.json'),
                    join(result_dir, 'cheetah', 'machine.json'))

    # Publish the synthesized data
    conf = config.Config.from_json(
        {'benchmark_dir': BENCHMARK_DIR,
         'results_dir': result_dir,
         'html_dir': join(tmpdir, 'html'),
         'repo': dvcs.path,
         'project': 'asv'})

    tools.run_asv_with_conf(conf, 'publish')

    # Check output
    assert isfile(join(tmpdir, 'html', 'index.html'))
    assert isfile(join(tmpdir, 'html', 'index.json'))
    assert isfile(join(tmpdir, 'html', 'asv.js'))
    assert isfile(join(tmpdir, 'html', 'asv.css'))
    assert not isdir(join(tmpdir, 'html', 'graphs', 'Cython', 'arch-x86_64',
                          'branch-some-branch'))
    assert not isdir(join(tmpdir, 'html', 'graphs', 'Cython-null', 'arch-x86_64',
                          'branch-some-branch'))
    index = util.load_json(join(tmpdir, 'html', 'index.json'))
    assert index['params']['branch'] == ['master']

    repo = get_repo(conf)
    revision_to_hash = dict((r, h) for h, r in six.iteritems(repo.get_revisions(commits)))

    def check_file(branch, cython):
        fn = join(tmpdir, 'html', 'graphs', cython, 'arch-x86_64', 'branch-' + branch,
                  'cpu-Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
                  'machine-cheetah', 'numpy-1.8', 'os-Linux (Fedora 20)', 'python-2.7', 'ram-8.2G',
                  'time_coordinates.time_latitude.json')
        data = util.load_json(fn, cleanup=False)
        data_commits = [revision_to_hash[x[0]] for x in data]
        if branch == "master":
            assert all(c in master_commits for c in data_commits)
        else:
            # Must contains commits from some-branch
            assert any(c in only_branch for c in data_commits)
            # And commits from master
            assert any(c in master_commits for c in data_commits)

        # Check that revisions are strictly increasing
        assert all(x[0] < y[0] for x, y in zip(data, data[1:]))

    check_file("master", "Cython")
    check_file("master", "Cython-null")

    # Publish with branches set in the config
    conf.branches = ['master', 'some-branch']
    tools.run_asv_with_conf(conf, 'publish')

    # Check output
    check_file("master", "Cython")
    check_file("master", "Cython-null")
    check_file("some-branch", "Cython")
    check_file("some-branch", "Cython-null")

    index = util.load_json(join(tmpdir, 'html', 'index.json'))
    assert index['params']['branch'] == ['master', 'some-branch']
    assert index['params']['Cython'] == ['', None]
    assert index['params']['ram'] == ['8.2G', 8804682956.8]

    expected_graph_list = [{'Cython': cython, 'arch': 'x86_64',
                            'branch': branch,
                            'cpu': 'Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
                            'machine': 'cheetah',
                            'numpy': '1.8',
                            'os': 'Linux (Fedora 20)',
                            'python': '2.7',
                            'ram': '8.2G'}
                            for cython in ["", None] for branch in ["master", "some-branch"]]
    d = dict(expected_graph_list[0])
    d['ram'] = 8804682956.8
    expected_graph_list.append(d)

    assert len(index['graph_param_list']) == len(expected_graph_list)
    for item in expected_graph_list:
        assert item in index['graph_param_list']
Exemplo n.º 51
0
def test_regression_fixed(generate_result_dir):
    conf, repo, commits = generate_result_dir(5 * [1] + 5 * [10] + [1])
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {"regressions": []}
    assert regressions == expected
Exemplo n.º 52
0
def test_regression_fixed(generate_result_dir):
    conf, repo, commits = generate_result_dir(5 * [1] + 5 * [10] + [1])
    tools.run_asv_with_conf(conf, "publish")
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    expected = {"regressions": []}
    assert regressions == expected
Exemplo n.º 53
0
def basic_html(request):
    if hasattr(request.config, 'cache'):
        # Cache the generated html, if py.test is new enough to support it
        cache_dir = request.config.cache.makedir("asv-test_web-basic_html")
        tmpdir = join(six.text_type(cache_dir), 'cached')

        if os.path.isdir(tmpdir):
            # Cached result found
            try:
                if util.load_json(join(tmpdir, 'tag.json')) != [asv.__version__]:
                    raise ValueError()

                html_dir = join(tmpdir, 'html')
                dvcs = tools.Git(join(tmpdir, 'repo'))
                return html_dir, dvcs
            except (IOError, ValueError):
                shutil.rmtree(tmpdir)

        os.makedirs(tmpdir)
    else:
        tmpdir = tempfile.mkdtemp()
        request.addfinalizer(lambda: shutil.rmtree(tmpdir))

    local = abspath(dirname(__file__))
    cwd = os.getcwd()

    os.chdir(tmpdir)
    try:
        machine_file = join(tmpdir, 'asv-machine.json')

        shutil.copyfile(join(local, 'asv-machine.json'),
                        machine_file)

        values = [[x]*2 for x in [0, 0, 0, 0, 0,
                                  1, 1, 1, 1, 1,
                                  3, 3, 3, 3, 3,
                                  2, 2, 2, 2, 2]]
        dvcs = tools.generate_test_repo(tmpdir, values)
        first_tested_commit_hash = dvcs.get_hash('master~14')

        repo_path = dvcs.path
        shutil.move(repo_path, join(tmpdir, 'repo'))
        dvcs = tools.Git(join(tmpdir, 'repo'))

        conf = config.Config.from_json({
            'env_dir': join(tmpdir, 'env'),
            'benchmark_dir': join(local, 'benchmark'),
            'results_dir': join(tmpdir, 'results_workflow'),
            'html_dir': join(tmpdir, 'html'),
            'repo': join(tmpdir, 'repo'),
            'dvcs': 'git',
            'project': 'asv',
            'matrix': {},
            'regressions_first_commits': {
                '.*': first_tested_commit_hash
            },
        })

        tools.run_asv_with_conf(conf, 'run', 'ALL',
                                '--show-stderr', '--quick', '--bench=params_examples.*track_.*',
                                _machine_file=machine_file)

        # Swap CPU info and obtain some results
        info = util.load_json(machine_file, api_version=1)

        # Put in parameter values that need quoting in file names
        info['orangutan']['cpu'] = 'Not /really/ <fast>'
        info['orangutan']['ram'] = '?'
        info['orangutan']['NUL'] = ''

        util.write_json(machine_file, info, api_version=1)

        tools.run_asv_with_conf(conf, 'run', 'master~10..', '--steps=3',
                                '--show-stderr', '--quick', '--bench=params_examples.*track_.*',
                                _machine_file=machine_file)

        # Output
        tools.run_asv_with_conf(conf, 'publish')

        shutil.rmtree(join(tmpdir, 'env'))
    finally:
        os.chdir(cwd)

    util.write_json(join(tmpdir, 'tag.json'), [asv.__version__])

    return conf.html_dir, dvcs
Exemplo n.º 54
0
def _rebuild_basic_html(basedir):
    local = abspath(dirname(__file__))
    cwd = os.getcwd()

    if os.path.isdir(basedir):
        html_dir = join(basedir, 'html')
        dvcs = tools.Git(join(basedir, 'repo'))
        return html_dir, dvcs

    os.makedirs(basedir)
    os.chdir(basedir)
    try:
        machine_file = join(basedir, 'asv-machine.json')

        shutil.copyfile(join(local, 'asv-machine.json'),
                        machine_file)

        values = [[x]*2 for x in [0, 0, 0, 0, 0,
                                  1, 1, 1, 1, 1,
                                  3, 3, 3, 3, 3,
                                  2, 2, 2, 2, 2]]
        dvcs = tools.generate_test_repo(basedir, values)
        first_tested_commit_hash = dvcs.get_hash('master~14')

        repo_path = dvcs.path
        shutil.move(repo_path, join(basedir, 'repo'))
        dvcs = tools.Git(join(basedir, 'repo'))

        conf = config.Config.from_json({
            'env_dir': join(basedir, 'env'),
            'benchmark_dir': join(local, 'benchmark'),
            'results_dir': join(basedir, 'results_workflow'),
            'html_dir': join(basedir, 'html'),
            'repo': join(basedir, 'repo'),
            'dvcs': 'git',
            'project': 'asv',
            'matrix': {},
            'regressions_first_commits': {
                '.*': first_tested_commit_hash
            },
        })

        tools.run_asv_with_conf(conf, 'run', 'ALL',
                                '--show-stderr', '--quick', '--bench=params_examples.*track_.*',
                                _machine_file=machine_file)

        # Swap CPU info and obtain some results
        info = util.load_json(machine_file, api_version=1)

        # Put in parameter values that need quoting in file names
        info['orangutan']['cpu'] = 'Not /really/ <fast>'
        info['orangutan']['ram'] = '?'
        info['orangutan']['NUL'] = ''

        util.write_json(machine_file, info, api_version=1)

        tools.run_asv_with_conf(conf, 'run', 'master~10..', '--steps=3',
                                '--show-stderr', '--quick', '--bench=params_examples.*track_.*',
                                _machine_file=machine_file)

        # Output
        tools.run_asv_with_conf(conf, 'publish')

        shutil.rmtree(join(basedir, 'env'))
    finally:
        os.chdir(cwd)

    return conf.html_dir, dvcs
Exemplo n.º 55
0
def run_asv(args, current_repo=False):
    cwd = os.path.abspath(os.path.dirname(__file__))

    if current_repo:
        try:
            from asv.util import load_json, write_json
            conf = load_json(os.path.join(cwd, 'asv.conf.json'))
            conf['repo'] = os.path.normpath(os.path.join(cwd, '..'))
            cfg_fn = os.path.join(cwd, '.asvconf.tmp')
            write_json(cfg_fn, conf)
            args = ['--config', cfg_fn] + args
        except ImportError:
            pass

    repo_dir = os.path.join(cwd, 'scipy')
    if is_git_repo_root(repo_dir):
        if current_repo:
            url = os.path.normpath(os.path.join(cwd, '..'))
        else:
            url = "https://github.com/scipy/scipy.git"
        subprocess.call(['git', 'remote', 'set-url', "origin", url],
                        cwd=repo_dir)

    cmd = ['asv'] + list(args)
    env = dict(os.environ)

    # Inject ccache/f90cache paths
    if sys.platform.startswith('linux'):
        env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))

    # Control BLAS and CFLAGS
    env['OPENBLAS_NUM_THREADS'] = '1'
    env['CFLAGS'] = drop_bad_flags(sysconfig.get_config_var('CFLAGS'))

    # Limit memory usage
    try:
        set_mem_rlimit()
    except (ImportError, RuntimeError):
        pass

    # Check scipy version if in dev mode; otherwise clone and setup results
    # repository
    if args and (args[0] == 'dev' or '--python=same' in args):
        import scipy
        print("Running benchmarks for Scipy version %s at %s" % (scipy.__version__, scipy.__file__))

    # Override gh-pages
    if 'gh-pages' in args:
        print("gh-pages command is disabled")
        return 1

    # Run
    try:
        return subprocess.call(cmd, env=env, cwd=cwd)
    except OSError as err:
        if err.errno == 2:
            print("Error when running '%s': %s\n" % (" ".join(cmd), str(err),))
            print("You need to install Airspeed Velocity https://spacetelescope.github.io/asv/")
            print("to run Scipy benchmarks")
            return 1
        raise
Exemplo n.º 56
0
Arquivo: test_web.py Projeto: eteq/asv
def _rebuild_basic_html(basedir):
    local = abspath(dirname(__file__))
    cwd = os.getcwd()

    if os.path.isdir(basedir):
        html_dir = join(basedir, 'html')
        dvcs = tools.Git(join(basedir, 'repo'))
        return html_dir, dvcs

    os.makedirs(basedir)
    os.chdir(basedir)
    try:
        machine_file = join(basedir, 'asv-machine.json')

        shutil.copyfile(join(local, 'asv-machine.json'), machine_file)

        values = [[x] * 2 for x in
                  [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2]]
        dvcs = tools.generate_test_repo(basedir, values)
        first_tested_commit_hash = dvcs.get_hash('master~14')

        repo_path = dvcs.path
        shutil.move(repo_path, join(basedir, 'repo'))
        dvcs = tools.Git(join(basedir, 'repo'))

        conf = config.Config.from_json({
            'env_dir':
            join(basedir, 'env'),
            'benchmark_dir':
            join(local, 'benchmark'),
            'results_dir':
            join(basedir, 'results_workflow'),
            'html_dir':
            join(basedir, 'html'),
            'repo':
            join(basedir, 'repo'),
            'dvcs':
            'git',
            'project':
            'asv',
            'matrix': {},
            'regressions_first_commits': {
                '.*': first_tested_commit_hash
            },
        })

        tools.run_asv_with_conf(conf,
                                'run',
                                'ALL',
                                '--show-stderr',
                                '--quick',
                                '--bench=params_examples.*track_.*',
                                _machine_file=machine_file)

        # Swap CPU info and obtain some results
        info = util.load_json(machine_file, api_version=1)

        # Put in parameter values that need quoting in file names
        info['orangutan']['cpu'] = 'Not /really/ <fast>'
        info['orangutan']['ram'] = '?'
        info['orangutan']['NUL'] = ''

        util.write_json(machine_file, info, api_version=1)

        tools.run_asv_with_conf(conf,
                                'run',
                                'master~10..',
                                '--steps=3',
                                '--show-stderr',
                                '--quick',
                                '--bench=params_examples.*track_.*',
                                _machine_file=machine_file)

        # Output
        tools.run_asv_with_conf(conf, 'publish')

        shutil.rmtree(join(basedir, 'env'))
    finally:
        os.chdir(cwd)

    return conf.html_dir, dvcs
Exemplo n.º 57
0
def test_publish(tmpdir, example_results):
    tmpdir = str(tmpdir)
    os.chdir(tmpdir)

    result_dir = join(tmpdir, 'sample_results')
    os.makedirs(result_dir)
    os.makedirs(join(result_dir, 'cheetah'))

    # Synthesize history with two branches that both have commits
    result_files = [fn for fn in os.listdir(join(example_results, 'cheetah'))
                    if fn.endswith('.json') and fn != 'machine.json']
    result_files.sort()
    master_values = list(range(len(result_files) * 2 // 3))
    branch_values = list(range(len(master_values), len(result_files)))
    dvcs = tools.generate_test_repo(tmpdir, master_values, 'git',
                                    [('master~6', 'some-branch', branch_values)])

    # Copy and modify result files, fixing commit hashes and setting result
    # dates to distinguish the two branches
    master_commits = dvcs.get_branch_hashes('master')
    only_branch = [x for x in dvcs.get_branch_hashes('some-branch')
                   if x not in master_commits]
    commits = master_commits + only_branch
    for k, item in enumerate(zip(result_files, commits)):
        fn, commit = item
        src = join(example_results, 'cheetah', fn)
        dst = join(result_dir, 'cheetah', commit[:8] + fn[8:])
        try:
            data = util.load_json(src)
        except util.UserError:
            # intentionally malformed file, ship it as is
            shutil.copyfile(src, dst)
            continue
        data['commit_hash'] = commit
        util.write_json(dst, data)

    shutil.copyfile(join(example_results, 'benchmarks.json'),
                    join(result_dir, 'benchmarks.json'))
    shutil.copyfile(join(example_results, 'cheetah', 'machine.json'),
                    join(result_dir, 'cheetah', 'machine.json'))

    # Publish the synthesized data
    conf = config.Config.from_json(
        {'benchmark_dir': BENCHMARK_DIR,
         'results_dir': result_dir,
         'html_dir': join(tmpdir, 'html'),
         'repo': dvcs.path,
         'project': 'asv'})

    tools.run_asv_with_conf(conf, 'publish')

    # Check output
    assert isfile(join(tmpdir, 'html', 'index.html'))
    assert isfile(join(tmpdir, 'html', 'index.json'))
    assert isfile(join(tmpdir, 'html', 'asv.js'))
    assert isfile(join(tmpdir, 'html', 'asv.css'))
    assert not isdir(join(tmpdir, 'html', 'graphs', 'Cython', 'arch-x86_64',
                          'branch-some-branch'))
    assert not isdir(join(tmpdir, 'html', 'graphs', 'Cython-null', 'arch-x86_64',
                          'branch-some-branch'))
    index = util.load_json(join(tmpdir, 'html', 'index.json'))
    assert index['params']['branch'] == ['master']

    repo = get_repo(conf)
    revision_to_hash = dict((r, h) for h, r in repo.get_revisions(commits).items())

    def check_file(branch, cython):
        fn = join(tmpdir, 'html', 'graphs', cython, 'arch-x86_64', 'branch-' + branch,
                  'cpu-Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
                  'machine-cheetah', 'numpy-1.8', 'os-Linux (Fedora 20)', 'python-2.7', 'ram-8.2G',
                  'time_coordinates.time_latitude.json')
        data = util.load_json(fn)
        data_commits = [revision_to_hash[x[0]] for x in data]
        if branch == "master":
            assert all(c in master_commits for c in data_commits)
        else:
            # Must contains commits from some-branch
            assert any(c in only_branch for c in data_commits)
            # And commits from master
            assert any(c in master_commits for c in data_commits)

        # Check that revisions are strictly increasing
        assert all(x[0] < y[0] for x, y in zip(data, data[1:]))

    check_file("master", "Cython")
    check_file("master", "Cython-null")

    # Publish with branches set in the config
    conf.branches = ['master', 'some-branch']
    tools.run_asv_with_conf(conf, 'publish')

    # Check output
    check_file("master", "Cython")
    check_file("master", "Cython-null")
    check_file("some-branch", "Cython")
    check_file("some-branch", "Cython-null")

    index = util.load_json(join(tmpdir, 'html', 'index.json'))
    assert index['params']['branch'] == ['master', 'some-branch']
    assert index['params']['Cython'] == ['', None]
    assert index['params']['ram'] == ['8.2G', 8804682956.8]

    expected_graph_list = [{'Cython': cython, 'arch': 'x86_64',
                            'branch': branch,
                            'cpu': 'Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
                            'machine': 'cheetah',
                            'numpy': '1.8',
                            'os': 'Linux (Fedora 20)',
                            'python': '2.7',
                            'ram': '8.2G'}
                           for cython in ["", None] for branch in ["master", "some-branch"]]
    d = dict(expected_graph_list[0])
    d['ram'] = 8804682956.8
    expected_graph_list.append(d)

    assert len(index['graph_param_list']) == len(expected_graph_list)
    for item in expected_graph_list:
        assert item in index['graph_param_list']
Exemplo n.º 58
0
def test_publish(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    result_dir = join(tmpdir, 'sample_results')
    os.makedirs(result_dir)
    os.makedirs(join(result_dir, 'cheetah'))

    # Synthesize history with two branches that both have commits
    result_files = [fn for fn in os.listdir(join(RESULT_DIR, 'cheetah'))
                    if fn.endswith('.json') and fn != 'machine.json']
    master_values = list(range(len(result_files)*2//3))
    branch_values = list(range(len(master_values), len(result_files)))
    dvcs = tools.generate_test_repo(tmpdir, master_values, 'git',
                                    [('master~6', 'some-branch', branch_values)])

    # Copy and modify result files, fixing commit hashes and setting result
    # dates to distinguish the two branches
    master_commits = dvcs.get_branch_hashes('master')
    only_branch = [x for x in dvcs.get_branch_hashes('some-branch')
                   if x not in master_commits]
    commits = master_commits + only_branch
    for k, item in enumerate(zip(result_files, commits)):
        fn, commit = item
        src = join(RESULT_DIR, 'cheetah', fn)
        dst = join(result_dir, 'cheetah', commit[:8] + fn[8:])
        data = util.load_json(src, cleanup=False)
        data['commit_hash'] = commit
        if commit in only_branch:
            data['date'] = -k
        else:
            data['date'] = k
        util.write_json(dst, data)

    shutil.copyfile(join(RESULT_DIR, 'benchmarks.json'),
                    join(result_dir, 'benchmarks.json'))
    shutil.copyfile(join(RESULT_DIR, 'cheetah', 'machine.json'),
                    join(result_dir, 'cheetah', 'machine.json'))


    # Publish the synthesized data
    conf = config.Config.from_json(
        {'benchmark_dir': BENCHMARK_DIR,
         'results_dir': result_dir,
         'html_dir': join(tmpdir, 'html'),
         'repo': dvcs.path,
         'project': 'asv'})

    Publish.run(conf)

    # Check output
    assert isfile(join(tmpdir, 'html', 'index.html'))
    assert isfile(join(tmpdir, 'html', 'index.json'))
    assert isfile(join(tmpdir, 'html', 'asv.js'))
    assert isfile(join(tmpdir, 'html', 'asv.css'))
    assert not isdir(join(tmpdir, 'html', 'graphs', 'Cython', 'arch-x86_64',
                          'branch-some-branch'))
    index = util.load_json(join(tmpdir, 'html', 'index.json'))
    assert index['params']['branch'] == ['master']

    def check_file(branch):
        fn = join(tmpdir, 'html', 'graphs', 'Cython', 'arch-x86_64', 'branch-' + branch,
                  'cpu-Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
                  'machine-cheetah', 'numpy-1.8', 'os-Linux (Fedora 20)', 'python-2.7', 'ram-8.2G',
                  'time_coordinates.time_latitude.json')
        data = util.load_json(fn, cleanup=False)
        if branch == 'master':
            # we set all dates positive for master above
            assert all(x[0] >= 0 for x in data)
        else:
            # we set some dates negative for some-branch above
            assert any(x[0] < 0 for x in data) and any(x[0] >= 0 for x in data)

    check_file("master")

    # Publish with branches set in the config
    conf.branches = ['master', 'some-branch']
    Publish.run(conf)

    # Check output
    check_file("master")
    check_file("some-branch")

    index = util.load_json(join(tmpdir, 'html', 'index.json'))
    assert index['params']['branch'] == ['master', 'some-branch']
Exemplo n.º 59
0
def _rebuild_basic_html(basedir):
    local = abspath(dirname(__file__))
    cwd = os.getcwd()

    if os.path.isdir(basedir):
        html_dir = join(basedir, 'html')
        dvcs = tools.Git(join(basedir, 'repo'))
        return html_dir, dvcs

    os.makedirs(basedir)
    os.chdir(basedir)
    try:
        machine_file = join(basedir, 'asv-machine.json')

        shutil.copyfile(join(local, 'asv-machine.json'), machine_file)

        values = [[x] * 2 for x in
                  [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2]]
        dvcs = tools.generate_test_repo(basedir, values)
        first_tested_commit_hash = dvcs.get_hash('master~14')

        repo_path = dvcs.path
        shutil.move(repo_path, join(basedir, 'repo'))
        dvcs = tools.Git(join(basedir, 'repo'))

        conf = config.Config.from_json({
            'env_dir':
            join(basedir, 'env'),
            'benchmark_dir':
            join(local, 'benchmark'),
            'results_dir':
            join(basedir, 'results_workflow'),
            'html_dir':
            join(basedir, 'html'),
            'repo':
            join(basedir, 'repo'),
            'dvcs':
            'git',
            'project':
            'asv',
            'matrix': {
                "env": {
                    "SOME_TEST_VAR": ["1"]
                }
            },
            'regressions_first_commits': {
                '.*': first_tested_commit_hash
            },
        })

        if WIN:
            # Tell conda to not use hardlinks: on Windows it's not possible
            # to delete hard links to files in use, which causes problem when
            # trying to cleanup environments during this test (since the
            # same cache directory may get reused).
            conf.matrix["env"]["CONDA_ALWAYS_COPY"] = ["True"]

        tools.run_asv_with_conf(conf,
                                'run',
                                'ALL',
                                '--show-stderr',
                                '--quick',
                                '--bench=params_examples[a-z0-9_.]*track_',
                                _machine_file=machine_file)

        # Swap CPU info and obtain some results
        info = util.load_json(machine_file, api_version=1)

        # Put in parameter values that need quoting in file names
        info['orangutan']['cpu'] = 'Not /really/ <fast>'
        info['orangutan']['ram'] = '?'
        info['orangutan']['NUL'] = ''

        util.write_json(machine_file, info, api_version=1)

        tools.run_asv_with_conf(conf,
                                'run',
                                'master~10..',
                                '--steps=3',
                                '--show-stderr',
                                '--quick',
                                '--bench=params_examples[a-z0-9_.]*track_',
                                _machine_file=machine_file)

        # Output
        tools.run_asv_with_conf(conf, 'publish')

        shutil.rmtree(join(basedir, 'env'))
    finally:
        os.chdir(cwd)

    return conf.html_dir, dvcs