def test_environment_select_autodetect():
    conf = config.Config()
    conf.environment_type = "conda"
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {
        "six": ["1.10"],
    }

    # Check autodetect
    environments = list(environment.get_environments(conf, [":" + PYTHON_VER1]))
    assert len(environments) == 1
    assert environments[0].python == PYTHON_VER1
    assert environments[0].tool_name in ("virtualenv", "conda")

    # Check interaction with exclude
    conf.exclude = [{'environment_type': 'matches nothing'}]
    environments = list(environment.get_environments(conf, [":" + PYTHON_VER1]))
    assert len(environments) == 1

    conf.exclude = [{'environment_type': 'virtualenv|conda'}]
    environments = list(environment.get_environments(conf, [":" + PYTHON_VER1]))
    assert len(environments) == 1

    conf.exclude = [{'environment_type': 'conda'}]
    environments = list(environment.get_environments(conf, ["conda:" + PYTHON_VER1]))
    assert len(environments) == 1
def test_matrix_existing():
    conf = config.Config()
    conf.environment_type = "existing"
    conf.pythons = ["same"]
    conf.matrix = {'foo': ['a', 'b'], 'bar': ['c', 'd']}

    # ExistingEnvironment should ignore the matrix
    environments = list(environment.get_environments(conf, None))
    items = [(env.tool_name, tuple(env.requirements.keys())) for env in environments]
    assert items == [('existing', ())]

    conf.exclude = {'environment_type': '.*'}
    environments = list(environment.get_environments(conf, None))
    items = [(env.tool_name, tuple(env.requirements.keys())) for env in environments]
    assert items == [('existing', ())]
Exemple #3
0
def test_presence_checks(tmpdir):
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = ["2.7"]
    conf.matrix = {}
    environments = list(environment.get_environments(conf))

    for env in environments:
        env.create()

        # Check env is recreated when info file is clobbered
        info_fn = os.path.join(env._path, 'asv-env-info.json')
        data = util.load_json(info_fn)
        data['python'] = '3.4'
        data = util.write_json(info_fn, data)
        env._is_setup = False
        env.create()
        data = util.load_json(info_fn)
        assert data['python'] == '2.7'
        env.run(['-c', 'import os'])

        # Check env is recreated if crucial things are missing
        pip_fn = os.path.join(env._path, 'bin', 'pip')
        os.remove(pip_fn)
        env._is_setup = False
        env.create()
        assert os.path.isfile(pip_fn)
        env.run(['-c', 'import os'])
Exemple #4
0
def test_quick(tmpdir):
    # Check that the quick option works
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])
    skip_names = [name for name in b.keys() if name != 'time_examples.TimeWithRepeat.time_it']
    times = b.run_benchmarks(envs[0], quick=True, show_stderr=True, skip=skip_names)

    assert len(times) == 1

    # Check that the benchmark was run only once. The result for quick==False
    # is tested above in test_find_benchmarks
    expected = ["<1>"]
    assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split() == expected
Exemple #5
0
def test_matrix_environments(tmpdir):
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = ["2.7", "3.4"]
    conf.matrix = {
        "six": ["1.4", None],
        "colorama": ["0.3.1", "0.3.3"]
    }
    environments = list(environment.get_environments(conf))

    assert len(environments) == 2 * 2 * 2

    # Only test the first two environments, since this is so time
    # consuming
    for env in environments[:2]:
        env.create()

        output = env.run(
            ['-c', 'import six, sys; sys.stdout.write(six.__version__)'],
            valid_return_codes=None)
        if 'six' in env._requirements:
            assert output.startswith(six.text_type(env._requirements['six']))

        output = env.run(
            ['-c', 'import colorama, sys; sys.stdout.write(colorama.__version__)'])
        assert output.startswith(six.text_type(env._requirements['colorama']))
Exemple #6
0
def test_matrix_environments(tmpdir):
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = ["2.7", "3.4"]
    conf.matrix = {
        "six": ["1.4", None],
        "psutil": ["1.2", "2.1"]
    }
    environments = list(environment.get_environments(conf))

    assert len(environments) == 2 * 2 * 2

    # Only test the first two environments, since this is so time
    # consuming
    for env in environments[:2]:
        env.create()

        output = env.run(
            ['-c', 'import six, sys; sys.stdout.write(six.__version__)'])
        if env._requirements['six'] is not None:
            assert output.startswith(six.text_type(env._requirements['six']))

        output = env.run(
            ['-c', 'import psutil, sys; sys.stdout.write(psutil.__version__)'])
        assert output.startswith(six.text_type(env._requirements['psutil']))
Exemple #7
0
def _test_run_branches(tmpdir, dvcs, conf, machine_file, range_spec,
                       branches, initial_commit):
    # Find the current head commits for each branch
    commits = [initial_commit]
    for branch in branches:
        commits.append(dvcs.get_hash(branch))

    # Run tests
    tools.run_asv_with_conf(conf, 'run', range_spec, '--quick',
                            _machine_file=machine_file)

    # Check that files for all commits expected were generated
    envs = list(environment.get_environments(conf, None))
    tool_name = envs[0].tool_name

    expected = set(['machine.json'])
    for commit in commits:
        for psver in ['0.3.6', '0.3.7']:
            expected.add('{0}-{1}-py{2[0]}.{2[1]}-colorama{3}-six.json'.format(
                commit[:8], tool_name, sys.version_info, psver))

    result_files = os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))

    if range_spec == 'NEW':
        assert set(result_files) == expected
    elif range_spec == 'ALL':
        assert set(expected).difference(result_files) == set([])
    else:
        raise ValueError()
def test_environment_environ_path(environment_type, tmpdir, monkeypatch):
    # Check that virtualenv binary dirs are in the PATH
    conf = config.Config()
    conf.env_dir = six.text_type(tmpdir.join("env"))
    conf.environment_type = environment_type
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}

    env, = environment.get_environments(conf, [])
    env.create()
    output = env.run(['-c', 'import os; print(os.environ["PATH"])'])
    paths = output.strip().split(os.pathsep)
    assert os.path.commonprefix([paths[0], conf.env_dir]) == conf.env_dir

    # Check user-site directory is not in sys.path
    output = env.run(['-c', 'import site; print(site.ENABLE_USER_SITE)'])
    usersite_in_syspath = output.strip()
    assert usersite_in_syspath == "False"

    # Check PYTHONPATH is ignored
    monkeypatch.setenv(str('PYTHONPATH'), str(tmpdir))
    output = env.run(['-c', 'import os; print(os.environ.get("PYTHONPATH", ""))'])
    assert output.strip() == ""

    monkeypatch.setenv(str('ASV_PYTHONPATH'), str("Hello python path"))
    output = env.run(['-c', 'import os; print(os.environ["PYTHONPATH"])'])
    assert output.strip() == "Hello python path"
def test_build_isolation(tmpdir):
    # build should not fail with build_cache on projects that have pyproject.toml
    tmpdir = six.text_type(tmpdir)

    # Create installable repository with pyproject.toml in it
    dvcs = generate_test_repo(tmpdir, [0], dvcs_type='git')
    fn = os.path.join(dvcs.path, 'pyproject.toml')
    with open(fn, 'w') as f:
        f.write('[build-system]\n'
                'requires = ["wheel", "setuptools"]')
    dvcs.add(fn)
    dvcs.commit("Add pyproject.toml")
    commit_hash = dvcs.get_hash("master")

    # Setup config
    conf = config.Config()
    conf.env_dir = os.path.join(tmpdir, "env")
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}
    conf.repo = os.path.abspath(dvcs.path)
    conf.build_cache_size = 8

    repo = get_repo(conf)

    env = list(environment.get_environments(conf, None))[0]
    env.create()

    # Project installation should succeed
    env.install_project(conf, repo, commit_hash)
def test_matrix_environments(tmpdir, dummy_packages):
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = [PYTHON_VER1, PYTHON_VER2]
    conf.matrix = {
        "asv_dummy_test_package_1": [DUMMY1_VERSION, None],
        "asv_dummy_test_package_2": DUMMY2_VERSIONS
    }
    environments = list(environment.get_environments(conf, None))

    assert len(environments) == 2 * 2 * 2

    # Only test the first two environments, since this is so time
    # consuming
    for env in environments[:2]:
        env.create()

        output = env.run(
            ['-c', 'import asv_dummy_test_package_1 as p, sys; sys.stdout.write(p.__version__)'],
            valid_return_codes=None)
        if 'asv_dummy_test_package_1' in env._requirements:
            assert output.startswith(six.text_type(env._requirements['asv_dummy_test_package_1']))

        output = env.run(
            ['-c', 'import asv_dummy_test_package_2 as p, sys; sys.stdout.write(p.__version__)'])
        assert output.startswith(six.text_type(env._requirements['asv_dummy_test_package_2']))
Exemple #11
0
    def _test_run(range_spec, branches, expected_commits):
        # Rollback initial results
        shutil.rmtree(results_dir)
        shutil.copytree(template_dir, results_dir)

        args = ["run", "--quick", "--skip-existing-successful",
                "--bench=time_secondary.track_value",
                "-s", "1000"  # large number of steps should be noop
               ]
        if range_spec is not None:
            args.append(range_spec)
        conf.branches = branches
        tools.run_asv_with_conf(conf, *args, _machine_file=machine_file)

        # Check that files for all commits expected were generated
        envs = list(environment.get_environments(conf, None))
        tool_name = envs[0].tool_name

        pyver = conf.pythons[0]
        if pyver.startswith('pypy'):
            pyver = pyver[2:]

        expected = set(['machine.json'])
        for commit in expected_commits:
            for psver in tools.DUMMY2_VERSIONS:
                expected.add('{0}-{1}-py{2}-asv_dummy_test_package_1-asv_dummy_test_package_2{3}.json'.format(
                    commit[:8], tool_name, pyver, psver))

        result_files = os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))

        assert set(result_files) == expected
Exemple #12
0
    def _test_run(range_spec, branches, expected_commits):
        # Rollback initial results
        shutil.rmtree(results_dir)
        shutil.copytree(template_dir, results_dir)

        args = ["run", "--quick", "--skip-existing-successful",
                "--bench=time_secondary.track_value"]
        if range_spec is not None:
            args.append(range_spec)
        conf.branches = branches
        tools.run_asv_with_conf(conf, *args, _machine_file=machine_file)

        # Check that files for all commits expected were generated
        envs = list(environment.get_environments(conf, None))
        tool_name = envs[0].tool_name

        expected = set(['machine.json'])
        for commit in expected_commits:
            for psver in ['0.3.6', '0.3.7']:
                expected.add('{0}-{1}-py{2[0]}.{2[1]}-colorama{3}-six.json'.format(
                    commit[:8], tool_name, sys.version_info, psver))

        result_files = os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))

        assert set(result_files) == expected
Exemple #13
0
def test_conf_inside_benchmarks_dir(tmpdir):
    # Test that the configuration file can be inside the benchmark suite

    tmpdir = six.text_type(tmpdir)
    benchmark_dir = os.path.join(tmpdir, 'benchmark')

    os.makedirs(benchmark_dir)
    with open(os.path.join(benchmark_dir, '__init__.py'), 'w') as f:
        # Test also benchmark in top-level __init__.py
        f.write("def track_this(): pass")

    with open(os.path.join(benchmark_dir, 'bench.py'), 'w') as f:
        f.write("def track_this(): pass")

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = '.'
    d['repo'] = tools.generate_test_repo(tmpdir, [[0, 1]]).path
    conf = config.Config.from_json(d)

    # NB. conf_dir == getcwd()
    os.chdir(benchmark_dir)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                                       regex='track_this')
    assert set(b.keys()) == {'track_this', 'bench.track_this'}
def test_matrix_empty():
    conf = config.Config()
    conf.environment_type = ""
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}

    # Check default environment config
    environments = list(environment.get_environments(conf, None))
    items = [env.python for env in environments]
    assert items == [PYTHON_VER1]
Exemple #15
0
def test_matrix_empty():
    conf = config.Config()
    conf.environment_type = ""
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}

    # Check default environment config
    environments = list(environment.get_environments(conf, None))
    items = [env.python for env in environments]
    assert items == [PYTHON_VER1]
def test_environment_name_sanitization():
    conf = config.Config()
    conf.environment_type = "conda"
    conf.pythons = ["3.5"]
    conf.matrix = {
        "pip+git+http://github.com/space-telescope/asv.git": [],
    }

    # Check name sanitization
    environments = list(environment.get_environments(conf, []))
    assert len(environments) == 1
    assert environments[0].name == "conda-py3.5-pip+git+http___github.com_space-telescope_asv.git"
Exemple #17
0
def test_environment_name_sanitization():
    conf = config.Config()
    conf.environment_type = "conda"
    conf.pythons = ["3.5"]
    conf.matrix = {
        "pip+git+http://github.com/space-telescope/asv.git": [],
    }

    # Check name sanitization
    environments = list(environment.get_environments(conf, []))
    assert len(environments) == 1
    assert environments[0].name == "conda-py3.5-pip+git+http___github.com_space-telescope_asv.git"
def test_presence_checks(tmpdir, monkeypatch):
    conf = config.Config()

    if WIN:
        # Tell conda to not use hardlinks: on Windows it's not possible
        # to delete hard links to files in use, which causes problem when
        # trying to cleanup environments during this test
        monkeypatch.setenv(str('CONDA_ALWAYS_COPY'), str('True'))

    conf.env_dir = str(tmpdir.join("env"))

    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}
    environments = list(environment.get_environments(conf, None))

    for env in environments:
        env.create()
        assert env.check_presence()

        # Check env is recreated when info file is clobbered
        info_fn = os.path.join(env._path, 'asv-env-info.json')
        data = util.load_json(info_fn)
        data['python'] = '0'
        data = util.write_json(info_fn, data)
        env._is_setup = False
        env.create()
        data = util.load_json(info_fn)
        assert data['python'] == PYTHON_VER1
        env.run(['-c', 'import os'])

        # Check env is recreated if crucial things are missing
        pip_fns = [
            os.path.join(env._path, 'bin', 'pip')
        ]
        if WIN:
            pip_fns += [
                os.path.join(env._path, 'bin', 'pip.exe'),
                os.path.join(env._path, 'Scripts', 'pip'),
                os.path.join(env._path, 'Scripts', 'pip.exe')
            ]

        some_removed = False
        for pip_fn in pip_fns:
            if os.path.isfile(pip_fn):
                some_removed = True
                os.remove(pip_fn)
        assert some_removed

        env._is_setup = False
        env.create()
        assert os.path.isfile(pip_fn)
        env.run(['-c', 'import os'])
Exemple #19
0
def test_environment_select_autodetect():
    conf = config.Config()
    conf.environment_type = "conda"
    conf.pythons = ["3.4"]
    conf.matrix = {
        "six": ["1.4"],
    }

    # Check autodetect
    environments = list(environment.get_environments(conf, [":2.7"]))
    assert len(environments) == 1
    assert environments[0].python == "2.7"
    assert environments[0].tool_name in ("virtualenv", "conda")
Exemple #20
0
def test_environment_select_autodetect():
    conf = config.Config()
    conf.environment_type = "conda"
    conf.pythons = ["3.4"]
    conf.matrix = {
        "six": ["1.4"],
    }

    # Check autodetect
    environments = list(environment.get_environments(conf, [":2.7"]))
    assert len(environments) == 1
    assert environments[0].python == "2.7"
    assert environments[0].tool_name in ("virtualenv", "conda")
Exemple #21
0
def test_invalid_benchmark_tree(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    d = {}
    d.update(ASV_CONF_JSON)
    d['benchmark_dir'] = INVALID_BENCHMARK_DIR
    d['env_dir'] = "env"
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    envs = list(environment.get_environments(conf, None))

    with pytest.raises(util.UserError):
        b = benchmarks.Benchmarks(conf, envs)
Exemple #22
0
def test_set_commit_hash(capsys, existing_env_conf):
    tmpdir, local, conf, machine_file = existing_env_conf

    r = repo.get_repo(conf)
    commit_hash = r.get_hash_from_name(r.get_branch_name())

    tools.run_asv_with_conf(conf, 'run', '--set-commit-hash=' + commit_hash, _machine_file=join(tmpdir, 'asv-machine.json'))

    env_name = list(environment.get_environments(conf, None))[0].name
    result_filename = commit_hash[:conf.hash_length] + '-' + env_name + '.json'
    assert result_filename in os.listdir(join('results_workflow', 'orangutan'))

    result_path = join('results_workflow', 'orangutan', result_filename)
    times = results.Results.load(result_path)
    assert times.commit_hash == commit_hash
Exemple #23
0
def test_set_commit_hash(capsys, existing_env_conf):
    tmpdir, local, conf, machine_file = existing_env_conf

    r = repo.get_repo(conf)
    commit_hash = r.get_hash_from_name(r.get_branch_name())

    tools.run_asv_with_conf(conf, 'run', '--set-commit-hash=' + commit_hash, _machine_file=join(tmpdir, 'asv-machine.json'))

    env_name = list(environment.get_environments(conf, None))[0].name
    result_filename = commit_hash[:conf.hash_length] + '-' + env_name + '.json'
    assert result_filename in os.listdir(join('results_workflow', 'orangutan'))

    result_path = join('results_workflow', 'orangutan', result_filename)
    times = results.Results.load(result_path)
    assert times.commit_hash == commit_hash
Exemple #24
0
def test_conda_run_executable(tmpdir):
    # test that we can install with pip into a conda environment.
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.environment_type = "conda"
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}
    environments = list(environment.get_environments(conf, None))

    assert len(environments) == 1 * 1 * 1

    for env in environments:
        env.create()
        env.run_executable('conda', ['info'])
Exemple #25
0
def test_environment_select():
    conf = config.Config()
    conf.environment_type = "conda"
    conf.pythons = ["2.7", "3.4"]
    conf.matrix = {
        "six": ["1.4"],
    }
    conf.include = [
        {'environment_type': 'conda', 'python': '1.9'}
    ]

    # Check default environment config
    environments = list(environment.get_environments(conf, None))
    items = set((env.tool_name, env.python) for env in environments)
    assert items == set([('conda', '2.7'), ('conda', '3.4'), ('conda', '1.9')])

    if HAS_PYTHON_27 and HAS_VIRTUALENV:
        # Virtualenv plugin fails on initialization if not available,
        # so these tests pass only if virtualenv is present

        conf.pythons = ["2.7"]

        # Check default python specifiers
        environments = list(environment.get_environments(conf, ["conda", "virtualenv"]))
        items = set((env.tool_name, env.python) for env in environments)
        assert items == set([('conda', '1.9'), ('conda', '2.7'), ('virtualenv', '2.7')])

        # Check specific python specifiers
        environments = list(environment.get_environments(conf, ["conda:3.4", "virtualenv:2.7"]))
        items = set((env.tool_name, env.python) for env in environments)
        assert items == set([('conda', '3.4'), ('virtualenv', '2.7')])

    # Check same specifier
    environments = list(environment.get_environments(conf, ["existing:same", ":same", "existing"]))
    items = [env.tool_name for env in environments]
    assert items == ['existing', 'existing', 'existing']

    # Check autodetect existing
    executable = os.path.relpath(os.path.abspath(sys.executable))
    environments = list(environment.get_environments(conf, ["existing",
                                                            ":same",
                                                            ":" + executable]))
    assert len(environments) == 3
    for env in environments:
        assert env.tool_name == "existing"
        assert env.python == "{0[0]}.{0[1]}".format(sys.version_info)
        assert os.path.normcase(os.path.abspath(env._executable)) == os.path.normcase(os.path.abspath(sys.executable))

    # Select by environment name
    environments = list(environment.get_environments(conf, ["conda-py2.7-six1.4"]))
    assert len(environments) == 1
    assert environments[0].python == "2.7"
    assert environments[0].tool_name == "conda"
    assert environments[0].requirements == {'six': '1.4'}
Exemple #26
0
def test_pypy_virtualenv(tmpdir):
    # test that we can setup a pypy environment
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.environment_type = "virtualenv"
    conf.pythons = ["pypy"]
    conf.matrix = {}
    environments = list(environment.get_environments(conf, None))

    assert len(environments) == 1

    for env in environments:
        env.create()
        output = env.run(['-c', 'import sys; print(sys.pypy_version_info)'])
        assert output.startswith(six.text_type("(major="))
Exemple #27
0
def test_invalid_benchmark_tree(tmpdir):
    tmpdir = str(tmpdir)
    os.chdir(tmpdir)

    d = {}
    d.update(ASV_CONF_JSON)
    d['benchmark_dir'] = INVALID_BENCHMARK_DIR
    d['env_dir'] = "env"
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    with pytest.raises(util.UserError):
        benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])
def test_presence_checks(tmpdir):
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}
    environments = list(environment.get_environments(conf, None))

    for env in environments:
        env.create()
        assert env.check_presence()

        # Check env is recreated when info file is clobbered
        info_fn = os.path.join(env._path, 'asv-env-info.json')
        data = util.load_json(info_fn)
        data['python'] = '0'
        data = util.write_json(info_fn, data)
        env._is_setup = False
        env.create()
        data = util.load_json(info_fn)
        assert data['python'] == PYTHON_VER1
        env.run(['-c', 'import os'])

        # Check env is recreated if crucial things are missing
        pip_fns = [
            os.path.join(env._path, 'bin', 'pip')
        ]
        if WIN:
            pip_fns += [
                os.path.join(env._path, 'bin', 'pip.exe'),
                os.path.join(env._path, 'Scripts', 'pip'),
                os.path.join(env._path, 'Scripts', 'pip.exe')
            ]

        some_removed = False
        for pip_fn in pip_fns:
            if os.path.isfile(pip_fn):
                some_removed = True
                os.remove(pip_fn)
        assert some_removed

        env._is_setup = False
        env.create()
        assert os.path.isfile(pip_fn)
        env.run(['-c', 'import os'])
Exemple #29
0
def test_presence_checks(tmpdir):
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = ["2.7"]
    conf.matrix = {}
    environments = list(environment.get_environments(conf))

    for env in environments:
        env.create()
        assert env.check_presence()

        # Check env is recreated when info file is clobbered
        info_fn = os.path.join(env._path, 'asv-env-info.json')
        data = util.load_json(info_fn)
        data['python'] = '3.4'
        data = util.write_json(info_fn, data)
        env._is_setup = False
        env.create()
        data = util.load_json(info_fn)
        assert data['python'] == '2.7'
        env.run(['-c', 'import os'])

        # Check env is recreated if crucial things are missing
        pip_fns = [
            os.path.join(env._path, 'bin', 'pip')
        ]
        if WIN:
            pip_fns += [
                os.path.join(env._path, 'bin', 'pip.exe'),
                os.path.join(env._path, 'Scripts', 'pip'),
                os.path.join(env._path, 'Scripts', 'pip.exe')
            ]

        some_removed = False
        for pip_fn in pip_fns:
            if os.path.isfile(pip_fn):
                some_removed = True
                os.remove(pip_fn)
        assert some_removed

        env._is_setup = False
        env.create()
        assert os.path.isfile(pip_fn)
        env.run(['-c', 'import os'])
Exemple #30
0
def test_pypy_virtualenv(tmpdir):
    # test that we can setup a pypy environment
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.environment_type = "virtualenv"
    conf.pythons = ["pypy"]
    conf.matrix = {}
    environments = list(environment.get_environments(conf, None))

    assert len(environments) == 1

    for env in environments:
        env.create()
        output = env.run(['-c', 'import sys; print(sys.pypy_version_info)'])
        assert output.startswith(six.text_type("(major="))
Exemple #31
0
def test_environment_environ_path(environment_type, tmpdir):
    # Check that virtualenv binary dirs are in the PATH
    conf = config.Config()
    conf.env_dir = six.text_type(tmpdir.join("env"))
    conf.environment_type = environment_type
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}

    env, = environment.get_environments(conf, [])
    env.create()
    output = env.run(['-c', 'import os; print(os.environ["PATH"])'])
    paths = output.strip().split(os.pathsep)
    assert os.path.commonprefix([paths[0], conf.env_dir]) == conf.env_dir

    # Check user-site directory is not in sys.path
    output = env.run(['-c', 'import site; print(site.ENABLE_USER_SITE)'])
    usersite_in_syspath = output.strip()
    assert usersite_in_syspath == "False"
Exemple #32
0
def test_find_benchmarks(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = os.path.join(tmpdir, "env")
    conf = config.Config.from_json(d)

    b = benchmarks.Benchmarks(conf, regex='secondary')
    assert len(b) == 3

    b = benchmarks.Benchmarks(conf, regex='example')
    assert len(b) == 5

    b = benchmarks.Benchmarks(conf, regex='time_example_benchmark_1')
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf)
    assert len(b) == 9

    envs = list(environment.get_environments(conf))
    b = benchmarks.Benchmarks(conf)
    times = b.run_benchmarks(envs[0], profile=True, show_exc=True)

    assert len(times) == 9
    assert times[
        'time_examples.TimeSuite.time_example_benchmark_1']['result'] is not None
    # Benchmarks that raise exceptions should have a time of "None"
    assert times[
        'time_secondary.TimeSecondary.time_exception']['result'] is None
    assert times[
        'subdir.time_subdir.time_foo']['result'] is not None
    assert times[
        'mem_examples.mem_list']['result'] > 2000
    assert times[
        'time_secondary.track_value']['result'] == 42.0
    assert 'profile' in times[
        'time_secondary.track_value']

    profile_path = os.path.join(tmpdir, 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value']['profile'])
    pstats.Stats(profile_path)
Exemple #33
0
def benchmarks_fixture(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    d['branches'] = ["master"]
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    return conf, repo, envs, commit_hash
Exemple #34
0
def benchmarks_fixture(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    d['branches'] = ["master"]
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    return conf, repo, envs, commit_hash
Exemple #35
0
def test_conda_channel_addition(tmpdir,
                                channel_list,
                                expected_channel):
    # test that we can add conda channels to environments
    # and that we respect the specified priority order
    # of channels
    conf = config.Config()
    conf.env_dir = six.text_type(tmpdir.join("env"))
    conf.environment_type = "conda"
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}
    # these have to be valid channels
    # available for online access
    conf.conda_channels = channel_list
    environments = list(environment.get_environments(conf, None))

    # should have one environment per Python version
    assert len(environments) == 1

    # create the environments
    for env in environments:
        env.create()
        # generate JSON output from conda list
        # and parse to verify added channels
        # for current env
        # (conda info would be more direct, but
        # seems to reflect contents of condarc file,
        # which we are intentionally trying not to modify)
        conda = util.which('conda')
        print("\n**conda being used:", conda)
        out_str = six.text_type(util.check_output([conda,
                                                    'list',
                                                    '-p',
                                                    os.path.normpath(env._path),
                                                    '--json']))
        json_package_list = json.loads(out_str)
        print(json_package_list)
        for installed_package in json_package_list:
            # check only explicitly installed packages
            if installed_package['name'] not in ('python',):
                continue
            print(installed_package)
            assert installed_package['channel'] == expected_channel
def test_conda_channel_addition(tmpdir,
                                channel_list,
                                expected_channel):
    # test that we can add conda channels to environments
    # and that we respect the specified priority order
    # of channels
    conf = config.Config()
    conf.env_dir = six.text_type(tmpdir.join("env"))
    conf.environment_type = "conda"
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}
    # these have to be valid channels
    # available for online access
    conf.conda_channels = channel_list
    environments = list(environment.get_environments(conf, None))

    # should have one environment per Python version
    assert len(environments) == 1

    # create the environments
    for env in environments:
        env.create()
        # generate JSON output from conda list
        # and parse to verify added channels
        # for current env
        # (conda info would be more direct, but
        # seems to reflect contents of condarc file,
        # which we are intentionally trying not to modify)
        conda = util.which('conda')
        print("\n**conda being used:", conda)
        out_str = six.text_type(util.check_output([conda,
                                                    'list',
                                                    '-p',
                                                    os.path.normpath(env._path),
                                                    '--json']))
        json_package_list = json.loads(out_str)
        print(json_package_list)
        for installed_package in json_package_list:
            # check only explicitly installed packages
            if installed_package['name'] not in ('python',):
                continue
            print(installed_package)
            assert installed_package['channel'] == expected_channel
Exemple #37
0
def test_environment_env_matrix():
    # (build_vars, non_build_vars, environ_count, build_count)
    configs = [
        ({}, {}, 1, 1),
        ({
            "var1": ["val1"]
        }, {}, 1, 1),
        ({
            "var1": ["val1", "val2", "val3"]
        }, {}, 3, 3),
        ({
            "var1": ["val1", "val2"],
            "var2": ['val3', 'val4']
        }, {}, 4, 4),
        ({
            "var1": ["val1", "val2"],
            "var2": ['val3', None]
        }, {}, 4, 4),
        ({
            "var1": ["val1", "val2"]
        }, {
            "var2": ['val3', None]
        }, 4, 2),
        ({
            "var1": ["val1", "val2"],
            "var2": ['val3', 'val4']
        }, {
            "var3": ['val5', None]
        }, 8, 4),
    ]

    for build_vars, non_build_vars, environ_count, build_count in configs:
        conf = config.Config()

        conf.matrix = {
            "env": build_vars,
            "env_nobuild": non_build_vars,
        }
        environments = list(environment.get_environments(conf, None))

        assert len(environments) == environ_count
        assert len(set(e.dir_name for e in environments)) == build_count
Exemple #38
0
def test_find_benchmarks_cwd_imports(tmpdir):
    # Test that files in the directory above the benchmark suite are
    # not importable

    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    os.makedirs('benchmark')
    with open(os.path.join('benchmark', '__init__.py'), 'w') as f:
        pass

    with open(os.path.join('benchmark', 'test.py'), 'w') as f:
        f.write("""
try:
    import this_should_really_not_be_here
    raise AssertionError('This should not happen!')
except ImportError:
    pass

def track_this():
    return 0
""")

    with open(os.path.join('this_should_really_not_be_here.py'), 'w') as f:
        f.write("raise AssertionError('Should not be imported!')")

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [[0, 1]]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf,
                                       repo,
                                       envs, [commit_hash],
                                       regex='track_this')
    assert len(b) == 1
Exemple #39
0
def test_large_environment_matrix(tmpdir):
    # As seen in issue #169, conda can't handle using really long
    # directory names in its environment.  This creates an environment
    # with many dependencies in order to ensure it still works.

    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))
    conf.pythons = ["2.7"]
    for i in range(25):
        conf.matrix['foo{0}'.format(i)] = []

    environments = list(environment.get_environments(conf))

    for env in environments:
        # Since *actually* installing all the dependencies would make
        # this test run a long time, we only set up the environment,
        # but don't actually install dependencies into it.  This is
        # enough to trigger the bug in #169.
        env.setup()
Exemple #40
0
def test_conda_pip_install(tmpdir):
    # test that we can install with pip into a conda environment.
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = ["3.4"]
    conf.matrix = {
        "pip+colorama": ["0.3.1"]
    }
    environments = list(environment.get_environments(conf))

    assert len(environments) == 1 * 1 * 1

    for env in environments:
        env.create()

        output = env.run(
            ['-c', 'import colorama, sys; sys.stdout.write(colorama.__version__)'])
        assert output.startswith(six.text_type(env._requirements['pip+colorama']))
Exemple #41
0
def test_matrix_environments(tmpdir):
    try:
        util.which('python2.7')
    except RuntimeError:
        raise RuntimeError(
            "python 2.7 must be installed for this test to pass")

    try:
        util.which('python3.3')
    except RuntimeError:
        raise RuntimeError(
            "python 3.3 must be installed for this test to pass")

    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.pythons = ["2.7", "3.3"]
    conf.matrix = {
        "six": ["1.4", None],
        "psutil": ["1.2", "1.1"]
    }

    environments = list(environment.get_environments(conf))

    assert len(environments) == 2 * 2 * 2

    # Only test the first two environments, since this is so time
    # consuming
    for env in environments[:2]:
        env.setup()
        env.install_requirements()

        output = env.run(
            ['-c', 'import six, sys; sys.stdout.write(six.__version__)'])
        if env._requirements['six'] is not None:
            assert output.startswith(six.text_type(env._requirements['six']))

        output = env.run(
            ['-c', 'import psutil, sys; sys.stdout.write(psutil.__version__)'])
        assert output.startswith(six.text_type(env._requirements['psutil']))
Exemple #42
0
def test_large_environment_matrix(tmpdir):
    # As seen in issue #169, conda can't handle using really long
    # directory names in its environment.  This creates an environment
    # with many dependencies in order to ensure it still works.

    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))
    conf.pythons = ["2.7"]
    for i in range(25):
        conf.matrix['foo{0}'.format(i)] = []

    environments = list(environment.get_environments(conf))

    for env in environments:
        # Since *actually* installing all the dependencies would make
        # this test run a long time, we only set up the environment,
        # but don't actually install dependencies into it.  This is
        # enough to trigger the bug in #169.
        env._install_requirements = lambda *a: None
        env.create()
def test_conda_pip_install(tmpdir, dummy_packages):
    # test that we can install with pip into a conda environment.
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.environment_type = "conda"
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {
        "pip+asv_dummy_test_package_2": [DUMMY2_VERSIONS[0]]
    }
    environments = list(environment.get_environments(conf, None))

    assert len(environments) == 1 * 1 * 1

    for env in environments:
        env.create()

        output = env.run(
            ['-c', 'import asv_dummy_test_package_2 as p, sys; sys.stdout.write(p.__version__)'])
        assert output.startswith(six.text_type(env._requirements['pip+asv_dummy_test_package_2']))
Exemple #44
0
def test_conda_pip_install(tmpdir, dummy_packages):
    # test that we can install with pip into a conda environment.
    conf = config.Config()

    conf.env_dir = six.text_type(tmpdir.join("env"))

    conf.environment_type = "conda"
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {
        "pip+asv_dummy_test_package_2": [DUMMY2_VERSIONS[0]]
    }
    environments = list(environment.get_environments(conf, None))

    assert len(environments) == 1 * 1 * 1

    for env in environments:
        env.create()

        output = env.run(
            ['-c', 'import asv_dummy_test_package_2 as p, sys; sys.stdout.write(p.__version__)'])
        assert output.startswith(six.text_type(env._requirements['pip+asv_dummy_test_package_2']))
Exemple #45
0
def test_find_benchmarks_cwd_imports(tmpdir):
    # Test that files in the directory above the benchmark suite are
    # not importable

    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    os.makedirs('benchmark')
    with open(os.path.join('benchmark', '__init__.py'), 'w') as f:
        pass

    with open(os.path.join('benchmark', 'test.py'), 'w') as f:
        f.write("""
try:
    import this_should_really_not_be_here
    raise AssertionError('This should not happen!')
except ImportError:
    pass

def track_this():
    return 0
""")

    with open(os.path.join('this_should_really_not_be_here.py'), 'w') as f:
        f.write("raise AssertionError('Should not be imported!')")

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                                       regex='track_this')
    assert len(b) == 1
def test_install_success(tmpdir):
    # Check that install_project really installs the package. (gh-805)
    # This may fail if pip in install_command e.g. gets confused by an .egg-info
    # directory in its cwd to think the package is already installed.
    tmpdir = six.text_type(tmpdir)

    dvcs = generate_test_repo(tmpdir, [0], dvcs_type='git')
    commit_hash = dvcs.get_branch_hashes()[0]

    conf = config.Config()
    conf.env_dir = os.path.join(tmpdir, "env")
    conf.pythons = [PYTHON_VER1]
    conf.repo = os.path.abspath(dvcs.path)
    conf.matrix = {}
    conf.build_cache_size = 0

    repo = get_repo(conf)

    env = list(environment.get_environments(conf, None))[0]
    env.create()
    env.install_project(conf, repo, commit_hash)

    env.run(['-c', 'import asv_test_repo as t, sys; sys.exit(0 if t.dummy_value == 0 else 1)'])
Exemple #47
0
def test_install_success(tmpdir):
    # Check that install_project really installs the package. (gh-805)
    # This may fail if pip in install_command e.g. gets confused by an .egg-info
    # directory in its cwd to think the package is already installed.
    tmpdir = six.text_type(tmpdir)

    dvcs = generate_test_repo(tmpdir, [0], dvcs_type='git')
    commit_hash = dvcs.get_branch_hashes()[0]

    conf = config.Config()
    conf.env_dir = os.path.join(tmpdir, "env")
    conf.pythons = [PYTHON_VER1]
    conf.repo = os.path.abspath(dvcs.path)
    conf.matrix = {}
    conf.build_cache_size = 0

    repo = get_repo(conf)

    env = list(environment.get_environments(conf, None))[0]
    env.create()
    env.install_project(conf, repo, commit_hash)

    env.run(['-c', 'import asv_test_repo as t, sys; sys.exit(0 if t.dummy_value == 0 else 1)'])
Exemple #48
0
def test_import_failure_retry(tmpdir):
    # Test that a different commit is tried on import failure

    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    os.makedirs('benchmark')
    with open(os.path.join('benchmark', '__init__.py'), 'w') as f:
        f.write(
            textwrap.dedent("""
        import asv_test_repo

        def time_foo():
            pass

        time_foo.number = asv_test_repo.dummy_value

        if asv_test_repo.dummy_value == 0:
            raise RuntimeError("fail discovery")
        """))

    dvcs = tools.generate_test_repo(tmpdir, [2, 1, 0])

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = dvcs.path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hashes = dvcs.get_branch_hashes()

    b = benchmarks.Benchmarks.discover(conf, repo, envs, commit_hashes)
    assert len(b) == 1
    assert b['time_foo']['number'] == 1
Exemple #49
0
    def _test_run(range_spec, branches, expected_commits):
        # Rollback initial results
        shutil.rmtree(results_dir)
        shutil.copytree(template_dir, results_dir)

        args = [
            "run",
            "--quick",
            "--skip-existing-successful",
            "--bench=time_secondary.track_value",
            "-s",
            "1000"  # large number of steps should be noop
        ]
        if range_spec is not None:
            args.append(range_spec)
        conf.branches = branches
        tools.run_asv_with_conf(conf, *args, _machine_file=machine_file)

        # Check that files for all commits expected were generated
        envs = list(environment.get_environments(conf, None))
        tool_name = envs[0].tool_name

        pyver = conf.pythons[0]
        if pyver.startswith('pypy'):
            pyver = pyver[2:]

        expected = set(['machine.json'])
        for commit in expected_commits:
            for psver in tools.DUMMY2_VERSIONS:
                expected.add(
                    '{0}-{1}-py{2}-asv_dummy_test_package_1-asv_dummy_test_package_2{3}.json'
                    .format(commit[:8], tool_name, pyver, psver))

        result_files = os.listdir(join(tmpdir, 'results_workflow',
                                       'orangutan'))

        assert set(result_files) == expected
Exemple #50
0
 def get_env():
     env = list(environment.get_environments(conf, None))[0]
     env.create()
     return env
Exemple #51
0
def test_code_extraction(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf,
                                       repo,
                                       envs, [commit_hash],
                                       regex=r'^code_extraction\.')

    expected_code = textwrap.dedent("""
    def track_test():
        # module-level 難
        return 0

    def setup():
        # module-level
        pass

    def setup_cache():
        # module-level
        pass
    """).strip()

    bench = b['code_extraction.track_test']
    assert bench['version'] == sha256(
        bench['code'].encode('utf-8')).hexdigest()
    assert bench['code'] == expected_code

    expected_code = textwrap.dedent("""
    int track_pretty_source_test() {
        return 0;
    }

    def setup():
        # module-level
        pass

    def setup_cache():
        # module-level
        pass
    """).strip()

    bench = b['code_extraction.track_pretty_source_test']
    assert bench['version'] == sha256(
        bench['code'].encode('utf-8')).hexdigest()
    assert bench['code'] == expected_code

    expected_code = textwrap.dedent("""
    class MyClass:
        def track_test(self):
            # class-level 難
            return 0

    def setup():
        # module-level
        pass

    class MyClass:
        def setup(self):
            # class-level
            pass

        def setup_cache(self):
            # class-level
            pass
    """).strip()

    bench = b['code_extraction.MyClass.track_test']
    assert bench['version'] == sha256(
        bench['code'].encode('utf-8')).hexdigest()

    if sys.version_info[:2] != (3, 2):
        # Python 3.2 doesn't have __qualname__
        assert bench['code'] == expected_code
Exemple #52
0
def test_environment_select():
    conf = config.Config()
    conf.environment_type = "conda"
    conf.pythons = ["2.7", "3.4"]
    conf.matrix = {
        "six": ["1.4"],
    }
    conf.include = [{'environment_type': 'conda', 'python': '1.9'}]

    # Check default environment config
    environments = list(environment.get_environments(conf, None))
    items = set((env.tool_name, env.python) for env in environments)
    assert items == set([('conda', '2.7'), ('conda', '3.4'), ('conda', '1.9')])

    if HAS_PYTHON_27 and HAS_VIRTUALENV:
        # Virtualenv plugin fails on initialization if not available,
        # so these tests pass only if virtualenv is present

        conf.pythons = ["2.7"]

        # Check default python specifiers
        environments = list(
            environment.get_environments(conf, ["conda", "virtualenv"]))
        items = set((env.tool_name, env.python) for env in environments)
        assert items == set([('conda', '1.9'), ('conda', '2.7'),
                             ('virtualenv', '2.7')])

        # Check specific python specifiers
        environments = list(
            environment.get_environments(conf,
                                         ["conda:3.4", "virtualenv:2.7"]))
        items = set((env.tool_name, env.python) for env in environments)
        assert items == set([('conda', '3.4'), ('virtualenv', '2.7')])

    # Check same specifier
    environments = list(
        environment.get_environments(conf,
                                     ["existing:same", ":same", "existing"]))
    items = [env.tool_name for env in environments]
    assert items == ['existing', 'existing', 'existing']

    # Check autodetect existing
    executable = os.path.relpath(os.path.abspath(sys.executable))
    environments = list(
        environment.get_environments(conf,
                                     ["existing", ":same", ":" + executable]))
    assert len(environments) == 3
    for env in environments:
        assert env.tool_name == "existing"
        assert env.python == "{0[0]}.{0[1]}".format(sys.version_info)
        assert os.path.normcase(os.path.abspath(
            env._executable)) == os.path.normcase(
                os.path.abspath(sys.executable))

    # Select by environment name
    environments = list(
        environment.get_environments(conf, ["conda-py2.7-six1.4"]))
    assert len(environments) == 1
    assert environments[0].python == "2.7"
    assert environments[0].tool_name == "conda"
    assert environments[0].requirements == {'six': '1.4'}
Exemple #53
0
def test_find_benchmarks(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)

    envs = list(environment.get_environments(conf, None))

    b = benchmarks.Benchmarks(conf, repo, envs, regex='secondary')
    assert len(b) == 3

    b = benchmarks.Benchmarks(conf, repo, envs, regex='example')
    assert len(b) == 25

    b = benchmarks.Benchmarks(conf,
                              repo,
                              envs,
                              regex='time_example_benchmark_1')
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf,
                              repo,
                              envs,
                              regex=[
                                  'time_example_benchmark_1',
                                  'some regexp that does not match anything'
                              ])
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf, repo, envs, regex='custom')
    assert sorted(b.keys()) == ['custom.time_function', 'custom.track_method']

    b = benchmarks.Benchmarks(conf, repo, envs)
    assert len(b) == 33

    start_timestamp = datetime.datetime.utcnow()

    b = benchmarks.Benchmarks(conf, repo, envs)
    times = b.run_benchmarks(envs[0], profile=True, show_stderr=True)

    end_timestamp = datetime.datetime.utcnow()

    assert len(times) == len(b)
    assert times['time_examples.TimeSuite.time_example_benchmark_1'][
        'result'] != [None]
    assert isinstance(
        times['time_examples.TimeSuite.time_example_benchmark_1']['stats'][0]
        ['std'], float)
    # The exact number of samples may vary if the calibration is not fully accurate
    assert len(times['time_examples.TimeSuite.time_example_benchmark_1']
               ['samples'][0]) in (8, 9, 10)
    # Benchmarks that raise exceptions should have a time of "None"
    assert times['time_secondary.TimeSecondary.time_exception']['result'] == [
        None
    ]
    assert times['subdir.time_subdir.time_foo']['result'] != [None]
    if not ON_PYPY:
        # XXX: the memory benchmarks don't work on Pypy, since asizeof
        # is CPython-only
        assert times['mem_examples.mem_list']['result'][0] > 1000
    assert times['time_secondary.track_value']['result'] == [42.0]
    assert 'profile' in times['time_secondary.track_value']
    assert 'stderr' in times['time_examples.time_with_warnings']
    assert times['time_examples.time_with_warnings']['errcode'] != 0

    assert times['time_examples.TimeWithBadTimer.time_it']['result'] == [0.0]

    assert times['params_examples.track_param']['params'] == [[
        "<class 'benchmark.params_examples.ClassOne'>",
        "<class 'benchmark.params_examples.ClassTwo'>"
    ]]
    assert times['params_examples.track_param']['result'] == [42, 42]

    assert times['params_examples.mem_param']['params'] == [['10', '20'],
                                                            ['2', '3']]
    assert len(times['params_examples.mem_param']['result']) == 2 * 2

    assert times['params_examples.ParamSuite.track_value']['params'] == [[
        "'a'", "'b'", "'c'"
    ]]
    assert times['params_examples.ParamSuite.track_value']['result'] == [
        1 + 0, 2 + 0, 3 + 0
    ]

    assert isinstance(times['params_examples.TuningTest.time_it']['result'][0],
                      float)

    assert isinstance(times['params_examples.time_skip']['result'][0], float)
    assert isinstance(times['params_examples.time_skip']['result'][1], float)
    assert util.is_nan(times['params_examples.time_skip']['result'][2])

    assert times['peakmem_examples.peakmem_list']['result'][0] >= 4 * 2**20

    assert times['cache_examples.ClassLevelSetup.track_example']['result'] == [
        500
    ]
    assert times['cache_examples.ClassLevelSetup.track_example2'][
        'result'] == [500]

    assert times['cache_examples.track_cache_foo']['result'] == [42]
    assert times['cache_examples.track_cache_bar']['result'] == [12]
    assert times['cache_examples.track_my_cache_foo']['result'] == [0]

    assert times['cache_examples.ClassLevelSetupFail.track_fail'][
        'result'] == None
    assert 'raise RuntimeError()' in times[
        'cache_examples.ClassLevelSetupFail.track_fail']['stderr']

    assert times['cache_examples.ClassLevelCacheTimeout.track_fail'][
        'result'] == None
    assert times['cache_examples.ClassLevelCacheTimeoutSuccess.track_success'][
        'result'] == [0]

    profile_path = join(tmpdir, 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value']['profile'])
    pstats.Stats(profile_path)

    # Check for running setup on each repeat (one extra run from profile)
    # The output would contain error messages if the asserts in the benchmark fail.
    expected = ["<%d>" % j for j in range(1, 12)]
    assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split(
    ) == expected

    # Calibration of iterations should not rerun setup
    expected = (['setup'] * 2, ['setup'] * 3)
    assert times['time_examples.TimeWithRepeatCalibrate.time_it'][
        'stderr'].split() in expected

    # Check run time timestamps
    for name, result in times.items():
        assert result['started_at'] >= start_timestamp
        assert result['ended_at'] >= result['started_at']
        assert result['ended_at'] <= end_timestamp
Exemple #54
0
 def get_env():
     return list(environment.get_environments(conf, None))[0]
Exemple #55
0
def test_find_benchmarks(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    b = benchmarks.Benchmarks(conf, regex='secondary')
    assert len(b) == 3

    b = benchmarks.Benchmarks(conf, regex='example')
    assert len(b) == 22

    b = benchmarks.Benchmarks(conf, regex='time_example_benchmark_1')
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf,
                              regex=[
                                  'time_example_benchmark_1',
                                  'some regexp that does not match anything'
                              ])
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf)
    assert len(b) == 26

    envs = list(environment.get_environments(conf))
    b = benchmarks.Benchmarks(conf)
    times = b.run_benchmarks(envs[0], profile=True, show_stderr=True)

    assert len(times) == len(b)
    assert times['time_examples.TimeSuite.time_example_benchmark_1'][
        'result'] is not None
    # Benchmarks that raise exceptions should have a time of "None"
    assert times['time_secondary.TimeSecondary.time_exception'][
        'result'] is None
    assert times['subdir.time_subdir.time_foo']['result'] is not None
    assert times['mem_examples.mem_list']['result'] > 1000
    assert times['time_secondary.track_value']['result'] == 42.0
    assert 'profile' in times['time_secondary.track_value']
    assert 'stderr' in times['time_examples.time_with_warnings']
    assert times['time_examples.time_with_warnings']['errcode'] != 0

    assert times['params_examples.track_param']['result']['params'] == [[
        "<class 'benchmark.params_examples.ClassOne'>",
        "<class 'benchmark.params_examples.ClassTwo'>"
    ]]
    assert times['params_examples.track_param']['result']['result'] == [42, 42]

    assert times['params_examples.mem_param']['result']['params'] == [[
        '10', '20'
    ], ['2', '3']]
    assert len(times['params_examples.mem_param']['result']['result']) == 2 * 2

    assert times['params_examples.ParamSuite.track_value']['result'][
        'params'] == [["'a'", "'b'", "'c'"]]
    assert times['params_examples.ParamSuite.track_value']['result'][
        'result'] == [1 + 0, 2 + 0, 3 + 0]

    assert isinstance(
        times['params_examples.TuningTest.time_it']['result']['result'][0],
        float)

    assert isinstance(
        times['params_examples.time_skip']['result']['result'][0], float)
    assert isinstance(
        times['params_examples.time_skip']['result']['result'][1], float)
    assert util.is_nan(
        times['params_examples.time_skip']['result']['result'][2])

    assert times['peakmem_examples.peakmem_list']['result'] >= 4 * 2**20

    assert times['cache_examples.ClassLevelSetup.track_example'][
        'result'] == 500
    assert times['cache_examples.ClassLevelSetup.track_example2'][
        'result'] == 500

    assert times['cache_examples.track_cache_foo']['result'] == 42
    assert times['cache_examples.track_cache_bar']['result'] == 12
    assert times['cache_examples.track_my_cache_foo']['result'] == 0

    assert times['cache_examples.ClassLevelSetupFail.track_fail'][
        'result'] == None
    assert 'raise RuntimeError()' in times[
        'cache_examples.ClassLevelSetupFail.track_fail']['stderr']

    profile_path = join(tmpdir, 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value']['profile'])
    pstats.Stats(profile_path)

    # Check for running setup on each repeat (one extra run from profile)
    # The output would contain error messages if the asserts in the benchmark fail.
    expected = ["<%d>" % j for j in range(1, 12)]
    assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split(
    ) == expected

    # Calibration of iterations should not rerun setup
    expected = ['setup'] * 2
    assert times['time_examples.TimeWithRepeatCalibrate.time_it'][
        'stderr'].split() == expected