Ejemplo n.º 1
0
def _test_generic_repo(conf,
                       tmpdir,
                       hash_range,
                       master,
                       branch,
                       is_remote=False):
    workcopy_dir = tempfile.mkdtemp(dir=tmpdir, prefix="workcopy")
    os.rmdir(workcopy_dir)

    # check mirroring fails early if *mirror_dir* exists but is not
    # a mirror
    if is_remote:
        if os.path.isdir(conf.project):
            shutil.rmtree(conf.project)
        os.makedirs(join(conf.project, 'hello'))
        with pytest.raises(util.UserError):
            r = repo.get_repo(conf)
        shutil.rmtree(conf.project)

    # basic checkouts
    r = repo.get_repo(conf)

    # Subrepo creation
    r.checkout(workcopy_dir, master)
    assert os.path.exists(join(workcopy_dir, "setup.py"))

    for filename in ("README", "untracked"):
        with open(join(workcopy_dir, filename), "wb") as fd:
            fd.write(b"foo")

    # After checkout the subrepo has been cleaned
    r.checkout(workcopy_dir, branch)
    assert not os.path.exists(join(workcopy_dir, "untracked"))
    with open(join(workcopy_dir, "README"), "rb") as fd:
        data = fd.read(33)
        assert data == b"This is the asv_test_repo project"

    r.checkout(workcopy_dir, master)

    # check recovering from corruption
    for pth in ['.hg', '.git']:
        pth = os.path.join(workcopy_dir, pth)
        if os.path.isdir(pth):
            shutil.rmtree(pth)
    r.checkout(workcopy_dir, master)

    hashes = r.get_hashes_from_range(hash_range)
    assert len(hashes) == 4

    dates = [r.get_date(hash) for hash in hashes]
    assert dates == sorted(dates)[::-1]

    tags = r.get_tags()
    for tag in tags:
        r.get_date_from_name(tag)
Ejemplo n.º 2
0
def _test_generic_repo(conf, tmpdir, hash_range, master, branch, is_remote=False):
    workcopy_dir = tempfile.mkdtemp(dir=tmpdir, prefix="workcopy")
    os.rmdir(workcopy_dir)

    # check mirroring fails early if *mirror_dir* exists but is not
    # a mirror
    if is_remote:
        if os.path.isdir(conf.project):
            shutil.rmtree(conf.project)
        os.makedirs(join(conf.project, 'hello'))
        with pytest.raises(util.UserError):
            r = repo.get_repo(conf)
        shutil.rmtree(conf.project)

    # basic checkouts
    r = repo.get_repo(conf)

    # Subrepo creation
    r.checkout(workcopy_dir, master)
    assert os.path.exists(join(workcopy_dir, "setup.py"))

    for filename in ("README", "untracked"):
        with open(join(workcopy_dir, filename), "wb") as fd:
            fd.write(b"foo")

    # After checkout the subrepo has been cleaned
    r.checkout(workcopy_dir, branch)
    assert not os.path.exists(join(workcopy_dir, "untracked"))
    with open(join(workcopy_dir, "README"), "rb") as fd:
        data = fd.read(33)
        assert data == b"This is the asv_test_repo project"

    r.checkout(workcopy_dir, master)

    # check recovering from corruption
    for pth in ['.hg', '.git']:
        pth = os.path.join(workcopy_dir, pth)
        if os.path.isdir(pth):
            shutil.rmtree(pth)
    r.checkout(workcopy_dir, master)

    hashes = r.get_hashes_from_range(hash_range)
    assert len(hashes) == 4

    dates = [r.get_date(hash) for hash in hashes]
    assert dates == sorted(dates)[::-1]

    tags = r.get_tags()
    for tag in tags:
        r.get_date_from_name(tag)
Ejemplo n.º 3
0
def generate_result_dir(tmpdir, dvcs, values, branches=None):
    result_dir = join(tmpdir, "results")
    os.makedirs(result_dir)
    html_dir = join(tmpdir, "html")
    machine_dir = join(result_dir, "tarzan")
    os.makedirs(machine_dir)

    if branches is None:
        branches = [None]

    conf = config.Config.from_json({
        'results_dir': result_dir,
        'html_dir': html_dir,
        'repo': dvcs.path,
        'project': 'asv',
        'branches': branches or [None],
    })
    repo = get_repo(conf)

    util.write_json(join(machine_dir, "machine.json"), {
        'machine': 'tarzan',
        'version': 1,
    })

    timestamp = datetime.datetime.utcnow()

    benchmark_version = sha256(os.urandom(16)).hexdigest()

    params = None
    param_names = None
    for commit, value in values.items():
        if isinstance(value, dict):
            params = value["params"]
        result = Results({"machine": "tarzan"}, {}, commit,
                         repo.get_date_from_name(commit), "2.7", None)
        value = {
            'result': [value],
            'params': [],
            'started_at': timestamp,
            'ended_at': timestamp,
            'stats': None,
            'samples': None,
            'number': None,
        }
        result.add_result("time_func", value, benchmark_version)
        result.save(result_dir)

    if params:
        param_names = ["param{}".format(k) for k in range(len(params))]

    util.write_json(join(result_dir, "benchmarks.json"), {
        "time_func": {
            "name": "time_func",
            "params": params or [],
            "param_names": param_names or [],
            "version": benchmark_version,
        }
    },
                    api_version=1)
    return conf
Ejemplo n.º 4
0
def test_filter_date_period(tmpdir, dvcs_type):
    tmpdir = six.text_type(tmpdir)

    dates = [
        datetime.datetime(2001, 1, 1),
        datetime.datetime(2001, 1, 2),
        datetime.datetime(2001, 1, 8)
    ]

    dvcs = tools.generate_repo_from_ops(
        tmpdir, dvcs_type,
        [("commit", j, dates[j]) for j in range(len(dates))])
    commits = dvcs.get_branch_hashes()[::-1]
    assert len(commits) == len(dates)

    conf = config.Config()
    conf.dvcs = dvcs_type
    conf.repo = dvcs.path
    r = repo.get_repo(conf)

    # Basic filtering
    weekly_commits = r.filter_date_period(commits, 60*60*24*7)
    assert weekly_commits == [commits[0], commits[2]]

    daily_commits = r.filter_date_period(commits, 60*60*24)
    assert daily_commits == commits

    # Test with old_commits specified
    monthly_commits = r.filter_date_period(commits[1:], 60*60*24*30, commits[:1])
    assert monthly_commits == []
Ejemplo n.º 5
0
def test_conf_inside_benchmarks_dir(tmpdir):
    # Test that the configuration file can be inside the benchmark suite

    tmpdir = six.text_type(tmpdir)
    benchmark_dir = os.path.join(tmpdir, 'benchmark')

    os.makedirs(benchmark_dir)
    with open(os.path.join(benchmark_dir, '__init__.py'), 'w') as f:
        # Test also benchmark in top-level __init__.py
        f.write("def track_this(): pass")

    with open(os.path.join(benchmark_dir, 'bench.py'), 'w') as f:
        f.write("def track_this(): pass")

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = '.'
    d['repo'] = tools.generate_test_repo(tmpdir, [[0, 1]]).path
    conf = config.Config.from_json(d)

    # NB. conf_dir == getcwd()
    os.chdir(benchmark_dir)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf,
                                       repo,
                                       envs, [commit_hash],
                                       regex='track_this')
    assert set(b.keys()) == {'track_this', 'bench.track_this'}
Ejemplo n.º 6
0
def test_quick(tmpdir):
    # Check that the quick option works
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))

    b = benchmarks.Benchmarks(conf, repo, envs)
    skip_names = [
        name for name in b.keys()
        if name != 'time_examples.TimeWithRepeat.time_it'
    ]
    times = b.run_benchmarks(envs[0],
                             quick=True,
                             show_stderr=True,
                             skip=skip_names)

    assert len(times) == 1

    # Check that the benchmark was run only once. The result for quick==False
    # is tested above in test_find_benchmarks
    expected = ["<1>"]
    assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split(
    ) == expected
Ejemplo n.º 7
0
def test_quick(tmpdir):
    # Check that the quick option works
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])
    skip_names = [name for name in b.keys() if name != 'time_examples.TimeWithRepeat.time_it']
    times = b.run_benchmarks(envs[0], quick=True, show_stderr=True, skip=skip_names)

    assert len(times) == 1

    # Check that the benchmark was run only once. The result for quick==False
    # is tested above in test_find_benchmarks
    expected = ["<1>"]
    assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split() == expected
Ejemplo n.º 8
0
def test_root_ceiling(dvcs_type, tmpdir):
    # Check that git/hg does not try to look for repository in parent
    # directories.
    tmpdir = six.text_type(tmpdir)
    dvcs1 = tools.generate_repo_from_ops(tmpdir, dvcs_type, [("commit", 1)])
    dvcs2 = tools.generate_repo_from_ops(tmpdir, dvcs_type, [("commit", 2)])
    commit1 = dvcs1.get_branch_hashes()[0]
    commit2 = dvcs2.get_branch_hashes()[0]

    conf = config.Config()
    conf.branches = []
    conf.dvcs = dvcs_type
    conf.project = join(tmpdir, "repo")
    conf.repo = dvcs1.path

    r = repo.get_repo(conf)

    # Checkout into a subdir inside another repository
    workcopy_dir = join(dvcs2.path, "workcopy")
    r.checkout(workcopy_dir, commit1)

    # Corrupt the checkout
    for pth in ['.hg', '.git']:
        pth = os.path.join(workcopy_dir, pth)
        if os.path.isdir(pth):
            shutil.rmtree(pth)

    # Operation must fail (commit2 is not in dvcs1), not use the
    # parent repository
    with pytest.raises(Exception):
        r.checkout(workcopy_dir, commit2)
Ejemplo n.º 9
0
def test_no_such_name_error(dvcs_type, tmpdir):
    tmpdir = six.text_type(tmpdir)
    dvcs = tools.generate_test_repo(tmpdir, values=[0], dvcs_type=dvcs_type)

    conf = config.Config()
    conf.branches = []
    conf.dvcs = dvcs_type
    conf.project = "project"
    conf.repo = dvcs.path

    r = repo.get_repo(conf)

    # Check that NoSuchNameError error gets raised correctly
    assert r.get_hash_from_name(None) == dvcs.get_hash(r._default_branch)
    with pytest.raises(repo.NoSuchNameError):
        r.get_hash_from_name("badbranch")

    if dvcs_type == "git":
        # Corrupted repository/etc should not give NoSuchNameError
        util.long_path_rmtree(join(dvcs.path, ".git"))
        with pytest.raises(Exception) as excinfo:
            r.get_hash_from_name(None)
        assert excinfo.type not in (AssertionError, repo.NoSuchNameError)
    elif dvcs_type == "hg":
        # hglib seems to do some caching, so this doesn't work
        pass
Ejemplo n.º 10
0
def test_root_ceiling(dvcs_type, tmpdir):
    # Check that git/hg does not try to look for repository in parent
    # directories.
    tmpdir = six.text_type(tmpdir)
    dvcs1 = tools.generate_repo_from_ops(tmpdir, dvcs_type, [("commit", 1)])
    dvcs2 = tools.generate_repo_from_ops(tmpdir, dvcs_type, [("commit", 2)])
    commit1 = dvcs1.get_branch_hashes()[0]
    commit2 = dvcs2.get_branch_hashes()[0]

    conf = config.Config()
    conf.branches = []
    conf.dvcs = dvcs_type
    conf.project = join(tmpdir, "repo")
    conf.repo = dvcs1.path

    r = repo.get_repo(conf)

    # Checkout into a subdir inside another repository
    workcopy_dir = join(dvcs2.path, "workcopy")
    r.checkout(workcopy_dir, commit1)

    # Corrupt the checkout
    for pth in ['.hg', '.git']:
        pth = os.path.join(workcopy_dir, pth)
        if os.path.isdir(pth):
            shutil.rmtree(pth)

    # Operation must fail (commit2 is not in dvcs1), not use the
    # parent repository
    with pytest.raises(Exception):
        r.checkout(workcopy_dir, commit2)
Ejemplo n.º 11
0
def test_no_such_name_error(dvcs_type, tmpdir):
    tmpdir = six.text_type(tmpdir)
    dvcs = tools.generate_test_repo(tmpdir, values=[0], dvcs_type=dvcs_type)

    conf = config.Config()
    conf.branches = []
    conf.dvcs = dvcs_type
    conf.project = "project"
    conf.repo = dvcs.path

    r = repo.get_repo(conf)

    # Check that NoSuchNameError error gets raised correctly
    assert r.get_hash_from_name(None) == dvcs.get_hash(r._default_branch)
    with pytest.raises(repo.NoSuchNameError):
        r.get_hash_from_name("badbranch")

    if dvcs_type == "git":
        # Corrupted repository/etc should not give NoSuchNameError
        util.long_path_rmtree(join(dvcs.path, ".git"))
        with pytest.raises(Exception) as excinfo:
            r.get_hash_from_name(None)
        assert excinfo.type not in (AssertionError, repo.NoSuchNameError)
    elif dvcs_type == "hg":
        # hglib seems to do some caching, so this doesn't work
        pass
Ejemplo n.º 12
0
def test_install_env_matrix_values(tmpdir):
    tmpdir = six.text_type(tmpdir)

    dvcs = generate_test_repo(tmpdir, [0], dvcs_type='git')
    commit_hash = dvcs.get_branch_hashes()[0]

    conf = config.Config()
    conf.env_dir = os.path.join(tmpdir, "env")
    conf.pythons = [PYTHON_VER1]
    conf.repo = os.path.abspath(dvcs.path)
    conf.matrix = {
        'env': {
            'SOME_ASV_TEST_BUILD_VALUE': '1'
        },
        'env_nobuild': {
            'SOME_ASV_TEST_NON_BUILD_VALUE': '1'
        }
    }

    repo = get_repo(conf)

    env = list(environment.get_environments(conf, None))[0]
    env.create()
    env.install_project(conf, repo, commit_hash)

    env.run([
        '-c', 'import asv_test_repo.build_time_env as t, sys; '
        'sys.exit(0 if t.env["SOME_ASV_TEST_BUILD_VALUE"] == "1" else 1)'
    ])

    env.run([
        '-c', 'import asv_test_repo.build_time_env as t, sys; '
        'sys.exit(0 if "SOME_ASV_TEST_NON_BUILD_VALUE" not in t.env else 1)'
    ])
Ejemplo n.º 13
0
def test_conf_inside_benchmarks_dir(tmpdir):
    # Test that the configuration file can be inside the benchmark suite

    tmpdir = six.text_type(tmpdir)
    benchmark_dir = os.path.join(tmpdir, 'benchmark')

    os.makedirs(benchmark_dir)
    with open(os.path.join(benchmark_dir, '__init__.py'), 'w') as f:
        # Test also benchmark in top-level __init__.py
        f.write("def track_this(): pass")

    with open(os.path.join(benchmark_dir, 'bench.py'), 'w') as f:
        f.write("def track_this(): pass")

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = '.'
    d['repo'] = tools.generate_test_repo(tmpdir, [[0, 1]]).path
    conf = config.Config.from_json(d)

    # NB. conf_dir == getcwd()
    os.chdir(benchmark_dir)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                                       regex='track_this')
    assert set(b.keys()) == {'track_this', 'bench.track_this'}
Ejemplo n.º 14
0
def test_build_isolation(tmpdir):
    # build should not fail with build_cache on projects that have pyproject.toml
    tmpdir = six.text_type(tmpdir)

    # Create installable repository with pyproject.toml in it
    dvcs = generate_test_repo(tmpdir, [0], dvcs_type='git')
    fn = os.path.join(dvcs.path, 'pyproject.toml')
    with open(fn, 'w') as f:
        f.write('[build-system]\n' 'requires = ["wheel", "setuptools"]')
    dvcs.add(fn)
    dvcs.commit("Add pyproject.toml")
    commit_hash = dvcs.get_hash("master")

    # Setup config
    conf = config.Config()
    conf.env_dir = os.path.join(tmpdir, "env")
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}
    conf.repo = os.path.abspath(dvcs.path)
    conf.build_cache_size = 8

    repo = get_repo(conf)

    env = list(environment.get_environments(conf, None))[0]
    env.create()

    # Project installation should succeed
    env.install_project(conf, repo, commit_hash)
Ejemplo n.º 15
0
def test_build_isolation(tmpdir):
    # build should not fail with build_cache on projects that have pyproject.toml
    tmpdir = six.text_type(tmpdir)

    # Create installable repository with pyproject.toml in it
    dvcs = generate_test_repo(tmpdir, [0], dvcs_type='git')
    fn = os.path.join(dvcs.path, 'pyproject.toml')
    with open(fn, 'w') as f:
        f.write('[build-system]\n'
                'requires = ["wheel", "setuptools"]')
    dvcs.add(fn)
    dvcs.commit("Add pyproject.toml")
    commit_hash = dvcs.get_hash("master")

    # Setup config
    conf = config.Config()
    conf.env_dir = os.path.join(tmpdir, "env")
    conf.pythons = [PYTHON_VER1]
    conf.matrix = {}
    conf.repo = os.path.abspath(dvcs.path)
    conf.build_cache_size = 8

    repo = get_repo(conf)

    env = list(environment.get_environments(conf, None))[0]
    env.create()

    # Project installation should succeed
    env.install_project(conf, repo, commit_hash)
Ejemplo n.º 16
0
def test_install_success(tmpdir):
    # Check that install_project really installs the package. (gh-805)
    # This may fail if pip in install_command e.g. gets confused by an .egg-info
    # directory in its cwd to think the package is already installed.
    tmpdir = six.text_type(tmpdir)

    dvcs = generate_test_repo(tmpdir, [0], dvcs_type='git')
    commit_hash = dvcs.get_branch_hashes()[0]

    conf = config.Config()
    conf.env_dir = os.path.join(tmpdir, "env")
    conf.pythons = [PYTHON_VER1]
    conf.repo = os.path.abspath(dvcs.path)
    conf.matrix = {}
    conf.build_cache_size = 0

    repo = get_repo(conf)

    env = list(environment.get_environments(conf, None))[0]
    env.create()
    env.install_project(conf, repo, commit_hash)

    env.run([
        '-c',
        'import asv_test_repo as t, sys; sys.exit(0 if t.dummy_value == 0 else 1)'
    ])
Ejemplo n.º 17
0
Archivo: tools.py Proyecto: philpep/asv
def generate_result_dir(tmpdir, dvcs, values, branches=None):
    result_dir = join(tmpdir, "results")
    os.makedirs(result_dir)
    html_dir = join(tmpdir, "html")
    machine_dir = join(result_dir, "tarzan")
    os.makedirs(machine_dir)

    if branches is None:
        branches = [None]

    conf = config.Config.from_json({
        'results_dir': result_dir,
        'html_dir': html_dir,
        'repo': dvcs.path,
        'project': 'asv',
        'branches': branches or [None],
    })
    repo = get_repo(conf)

    util.write_json(join(machine_dir, "machine.json"), {
        'machine': 'tarzan',
        'version': 1,
    })

    timestamp = datetime.datetime.utcnow()

    benchmark_version = sha256(os.urandom(16)).hexdigest()

    params = None
    param_names = None
    for commit, value in values.items():
        if isinstance(value, dict):
            params = value["params"]
        result = Results({"machine": "tarzan"}, {}, commit,
                         repo.get_date_from_name(commit), "2.7", None)
        value = {
            'result': [value],
            'params': [],
            'started_at': timestamp,
            'ended_at': timestamp,
            'stats': None,
            'samples': None,
            'number': None,
        }
        result.add_result("time_func", value, benchmark_version)
        result.save(result_dir)

    if params:
        param_names = ["param{}".format(k) for k in range(len(params))]

    util.write_json(join(result_dir, "benchmarks.json"), {
        "time_func": {
            "name": "time_func",
            "params": params or [],
            "param_names": param_names or [],
            "version": benchmark_version,
        }
    }, api_version=1)
    return conf
Ejemplo n.º 18
0
def _test_branches(conf, branch_commits):
    r = repo.get_repo(conf)

    assert len(conf.branches) == 2

    for branch in conf.branches:
        commits = r.get_branch_commits(branch)

        for commit in branch_commits[branch]:
            assert commit in commits
Ejemplo n.º 19
0
def _test_branches(conf, branch_commits):
    r = repo.get_repo(conf)

    assert len(conf.branches) == 2

    for branch in conf.branches:
        commits = r.get_branch_commits(branch)

        for commit in branch_commits[branch]:
            assert commit in commits
Ejemplo n.º 20
0
def _test_generic_repo(conf,
                       tmpdir,
                       hash_range,
                       master,
                       branch,
                       is_remote=False):
    workcopy_dir = tempfile.mkdtemp(dir=tmpdir, prefix="workcopy")
    os.rmdir(workcopy_dir)

    # check mirroring fails early if *mirror_dir* exists but is not
    # a mirror
    if is_remote:
        if os.path.isdir(conf.project):
            shutil.rmtree(conf.project)
        os.makedirs(join(conf.project, 'hello'))
        with pytest.raises(util.UserError):
            r = repo.get_repo(conf)
        shutil.rmtree(conf.project)

    # basic checkouts
    r = repo.get_repo(conf)

    r.checkout(workcopy_dir, master)
    r.checkout(workcopy_dir, branch)
    r.checkout(workcopy_dir, master)

    # check recovering from corruption
    for pth in ['.hg', '.git']:
        pth = os.path.join(workcopy_dir, pth)
        if os.path.isdir(pth):
            shutil.rmtree(pth)
    r.checkout(workcopy_dir, master)

    hashes = r.get_hashes_from_range(hash_range)
    assert len(hashes) == 4

    dates = [r.get_date(hash) for hash in hashes]
    assert dates == sorted(dates)[::-1]

    tags = r.get_tags()
    for tag in tags:
        r.get_date_from_name(tag)
Ejemplo n.º 21
0
 def _generate_result_dir(values, commits_without_result=None):
     dvcs = tools.generate_repo_from_ops(
         tmpdir, dvcs_type, [("commit", i) for i in range(len(values))])
     commits = list(reversed(dvcs.get_branch_hashes()))
     commit_values = {}
     commits_without_result = [commits[i] for i in commits_without_result or []]
     for commit, value in zip(commits, values):
         if commit not in commits_without_result:
             commit_values[commit] = value
     conf = tools.generate_result_dir(tmpdir, dvcs, commit_values)
     repo = get_repo(conf)
     return conf, repo, commits
Ejemplo n.º 22
0
 def _generate_result_dir(values, commits_without_result=None):
     dvcs = tools.generate_repo_from_ops(
         tmpdir, dvcs_type, [("commit", i) for i in range(len(values))])
     commits = list(reversed(dvcs.get_branch_hashes()))
     commit_values = {}
     commits_without_result = [commits[i] for i in commits_without_result or []]
     for commit, value in zip(commits, values):
         if commit not in commits_without_result:
             commit_values[commit] = value
     conf = tools.generate_result_dir(tmpdir, dvcs, commit_values)
     repo = get_repo(conf)
     return conf, repo, commits
Ejemplo n.º 23
0
def test_regression_multiple_branches(dvcs_type, tmpdir):
    tmpdir = six.text_type(tmpdir)
    if dvcs_type == "git":
        master = "master"
    elif dvcs_type == "hg":
        master = "default"
    dvcs = tools.generate_repo_from_ops(
        tmpdir,
        dvcs_type,
        [
            ("commit", 1),
            ("checkout", "stable", master),
            ("commit", 1),
            ("checkout", master),
        ] + 4 * [
            ("commit", 1),
            ("checkout", "stable"),
            ("commit", 1),
            ("checkout", master),
        ] + 5 * [
            ("commit", 1),
            ("checkout", "stable"),
            ("commit", 2),
            ("checkout", master),
        ],
    )
    commit_values = {}
    branches = dict((branch, list(reversed(dvcs.get_branch_hashes(branch))))
                    for branch in (master, "stable"))
    for branch, values in (
        (master, 10 * [1]),
        ("stable", 5 * [1] + 5 * [2]),
    ):
        for commit, value in zip(branches[branch], values):
            commit_values[commit] = value
    conf = tools.generate_result_dir(tmpdir, dvcs, commit_values)
    conf.branches = [master, "stable"]
    tools.run_asv_with_conf(conf, "publish")
    repo = get_repo(conf)
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    graph_path = join('graphs', 'branch-stable', 'machine-tarzan',
                      'time_func.json')
    # Regression occur on 5th commit of stable branch
    revision = repo.get_revisions(commit_values.keys())[branches["stable"][5]]
    expected = {
        'regressions': [[
            'time_func', graph_path, {
                'branch': 'stable'
            }, None, 2.0, 1.0, [[None, revision, 1.0, 2.0]]
        ]]
    }
    assert regressions == expected
Ejemplo n.º 24
0
def test_repo_git_annotated_tag_date(tmpdir):
    tmpdir = six.text_type(tmpdir)

    dvcs = tools.generate_test_repo(tmpdir, list(range(5)), dvcs_type='git')

    conf = config.Config()
    conf.project = 'sometest'
    conf.repo = dvcs.path

    r = repo.get_repo(conf)
    d1 = r.get_date('tag1')
    d2 = r.get_date(r.get_hash_from_name('tag1'))
    assert d1 == d2
Ejemplo n.º 25
0
def test_repo_git_annotated_tag_date(tmpdir):
    tmpdir = six.text_type(tmpdir)

    dvcs = tools.generate_test_repo(tmpdir, list(range(5)), dvcs_type='git')

    conf = config.Config()
    conf.project = 'sometest'
    conf.repo = dvcs.path

    r = repo.get_repo(conf)
    d1 = r.get_date('tag1')
    d2 = r.get_date(r.get_hash_from_name('tag1'))
    assert d1 == d2
Ejemplo n.º 26
0
def two_branch_repo_case(request, tmpdir):
    r"""
    This test ensure we follow the first parent in case of merges

    The revision graph looks like this:

        @  Revision 6 (default)
        |
        | o  Revision 5 (stable)
        | |
        | o  Merge master
        |/|
        o |  Revision 4
        | |
        o |  Merge stable
        |\|
        o |  Revision 3
        | |
        | o  Revision 2
        |/
        o  Revision 1

    """
    dvcs_type = request.param
    tmpdir = six.text_type(tmpdir)
    if dvcs_type == "git":
        master = "master"
    elif dvcs_type == "hg":
        master = "default"
    dvcs = tools.generate_repo_from_ops(tmpdir, dvcs_type, [
        ("commit", 1),
        ("checkout", "stable", master),
        ("commit", 2),
        ("checkout", master),
        ("commit", 3),
        ("merge", "stable"),
        ("commit", 4),
        ("checkout", "stable"),
        ("merge", master, "Merge master"),
        ("commit", 5),
        ("checkout", master),
        ("commit", 6),
    ])

    conf = config.Config()
    conf.branches = [master, "stable"]
    conf.repo = dvcs.path
    conf.project = join(tmpdir, "repo")
    r = repo.get_repo(conf)
    return dvcs, master, r, conf
Ejemplo n.º 27
0
def two_branch_repo_case(request, tmpdir):
    r"""
    This test ensure we follow the first parent in case of merges

    The revision graph looks like this:

        @  Revision 6 (default)
        |
        | o  Revision 5 (stable)
        | |
        | o  Merge master
        |/|
        o |  Revision 4
        | |
        o |  Merge stable
        |\|
        o |  Revision 3
        | |
        | o  Revision 2
        |/
        o  Revision 1

    """
    dvcs_type = request.param
    tmpdir = six.text_type(tmpdir)
    if dvcs_type == "git":
        master = "master"
    elif dvcs_type == "hg":
        master = "default"
    dvcs = tools.generate_repo_from_ops(tmpdir, dvcs_type, [
        ("commit", 1),
        ("checkout", "stable", master),
        ("commit", 2),
        ("checkout", master),
        ("commit", 3),
        ("merge", "stable"),
        ("commit", 4),
        ("checkout", "stable"),
        ("merge", master, "Merge master"),
        ("commit", 5),
        ("checkout", master),
        ("commit", 6),
    ])

    conf = config.Config()
    conf.branches = [master, "stable"]
    conf.repo = dvcs.path
    conf.project = join(tmpdir, "repo")
    r = repo.get_repo(conf)
    return dvcs, master, r, conf
Ejemplo n.º 28
0
def _test_generic_repo(conf, tmpdir, hash_range, master, branch, is_remote=False):
    workcopy_dir = tempfile.mkdtemp(dir=tmpdir, prefix="workcopy")
    os.rmdir(workcopy_dir)

    # check mirroring fails early if *mirror_dir* exists but is not
    # a mirror
    if is_remote:
        if os.path.isdir(conf.project):
            shutil.rmtree(conf.project)
        os.makedirs(join(conf.project, 'hello'))
        with pytest.raises(util.UserError):
            r = repo.get_repo(conf)
        shutil.rmtree(conf.project)

    # basic checkouts
    r = repo.get_repo(conf)

    r.checkout(workcopy_dir, master)
    r.checkout(workcopy_dir, branch)
    r.checkout(workcopy_dir, master)

    # check recovering from corruption
    for pth in ['.hg', '.git']:
        pth = os.path.join(workcopy_dir, pth)
        if os.path.isdir(pth):
            shutil.rmtree(pth)
    r.checkout(workcopy_dir, master)

    hashes = r.get_hashes_from_range(hash_range)
    assert len(hashes) == 4

    dates = [r.get_date(hash) for hash in hashes]
    assert dates == sorted(dates)[::-1]

    tags = r.get_tags()
    for tag in tags:
        r.get_date_from_name(tag)
Ejemplo n.º 29
0
def test_set_commit_hash(capsys, existing_env_conf):
    tmpdir, local, conf, machine_file = existing_env_conf

    r = repo.get_repo(conf)
    commit_hash = r.get_hash_from_name(r.get_branch_name())

    tools.run_asv_with_conf(conf, 'run', '--set-commit-hash=' + commit_hash, _machine_file=join(tmpdir, 'asv-machine.json'))

    env_name = list(environment.get_environments(conf, None))[0].name
    result_filename = commit_hash[:conf.hash_length] + '-' + env_name + '.json'
    assert result_filename in os.listdir(join('results_workflow', 'orangutan'))

    result_path = join('results_workflow', 'orangutan', result_filename)
    times = results.Results.load(result_path)
    assert times.commit_hash == commit_hash
Ejemplo n.º 30
0
def test_set_commit_hash(capsys, existing_env_conf):
    tmpdir, local, conf, machine_file = existing_env_conf

    r = repo.get_repo(conf)
    commit_hash = r.get_hash_from_name(r.get_branch_name())

    tools.run_asv_with_conf(conf, 'run', '--set-commit-hash=' + commit_hash, _machine_file=join(tmpdir, 'asv-machine.json'))

    env_name = list(environment.get_environments(conf, None))[0].name
    result_filename = commit_hash[:conf.hash_length] + '-' + env_name + '.json'
    assert result_filename in os.listdir(join('results_workflow', 'orangutan'))

    result_path = join('results_workflow', 'orangutan', result_filename)
    times = results.Results.load(result_path)
    assert times.commit_hash == commit_hash
Ejemplo n.º 31
0
def test_invalid_benchmark_tree(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    d = {}
    d.update(ASV_CONF_JSON)
    d['benchmark_dir'] = INVALID_BENCHMARK_DIR
    d['env_dir'] = "env"
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))

    with pytest.raises(util.UserError):
        b = benchmarks.Benchmarks(conf, repo, envs)
Ejemplo n.º 32
0
def test_invalid_benchmark_tree(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    d = {}
    d.update(ASV_CONF_JSON)
    d['benchmark_dir'] = INVALID_BENCHMARK_DIR
    d['env_dir'] = "env"
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))

    with pytest.raises(util.UserError):
        b = benchmarks.Benchmarks(conf, repo, envs)
Ejemplo n.º 33
0
def _test_branches(conf, branch_commits, require_describe=False):
    r = repo.get_repo(conf)

    assert len(conf.branches) == 2

    for branch in conf.branches:
        commits = r.get_branch_commits(branch)

        for commit in branch_commits[branch]:
            assert commit in commits

            name = r.get_name_from_hash(commit)
            if require_describe:
                assert name is not None
            if name is not None:
                assert r.get_hash_from_name(name) == commit
                assert name in r.get_decorated_hash(commit)
Ejemplo n.º 34
0
def test_regression_multiple_branches(dvcs_type, tmpdir):
    tmpdir = six.text_type(tmpdir)
    if dvcs_type == "git":
        master = "master"
    elif dvcs_type == "hg":
        master = "default"
    dvcs = tools.generate_repo_from_ops(
        tmpdir, dvcs_type, [
            ("commit", 1),
            ("checkout", "stable", master),
            ("commit", 1),
            ("checkout", master),
        ] + 4 * [
            ("commit", 1),
            ("checkout", "stable"),
            ("commit", 1),
            ("checkout", master),
        ] + 5 * [
            ("commit", 1),
            ("checkout", "stable"),
            ("commit", 2),
            ("checkout", master),
        ],
    )
    commit_values = {}
    branches = dict(
        (branch, list(reversed(dvcs.get_branch_hashes(branch))))
        for branch in (master, "stable")
    )
    for branch, values in (
        (master, 10 * [1]),
        ("stable", 5 * [1] + 5 * [2]),
    ):
        for commit, value in zip(branches[branch], values):
            commit_values[commit] = value
    conf = tools.generate_result_dir(tmpdir, dvcs, commit_values)
    conf.branches = [master, "stable"]
    tools.run_asv_with_conf(conf, "publish")
    repo = get_repo(conf)
    regressions = util.load_json(join(conf.html_dir, "regressions.json"))
    graph_path = join('graphs', 'branch-stable', 'machine-tarzan', 'time_func.json')
    # Regression occur on 5th commit of stable branch
    revision = repo.get_revisions(commit_values.keys())[branches["stable"][5]]
    expected = {'regressions': [['time_func', graph_path, {'branch': 'stable'}, None,
                                 [[[None, revision, 1.0, 2.0]], 2.0, 1.0]]]}
    assert regressions == expected
Ejemplo n.º 35
0
def _test_branches(conf, branch_commits, require_describe=False):
    r = repo.get_repo(conf)

    assert len(conf.branches) == 2

    for branch in conf.branches:
        commits = r.get_branch_commits(branch)

        for commit in branch_commits[branch]:
            assert commit in commits

            name = r.get_name_from_hash(commit)
            if require_describe:
                assert name is not None
            if name is not None:
                assert r.get_hash_from_name(name) == commit
                assert name in r.get_decorated_hash(commit)
Ejemplo n.º 36
0
def test_installed_commit_hash(tmpdir):
    tmpdir = six.text_type(tmpdir)
    tmpdir = six.text_type(tmpdir)

    dvcs = generate_test_repo(tmpdir, [0], dvcs_type='git')
    commit_hash = dvcs.get_branch_hashes()[0]

    conf = config.Config()
    conf.env_dir = os.path.join(tmpdir, "env")
    conf.pythons = [PYTHON_VER1]
    conf.repo = os.path.abspath(dvcs.path)
    conf.matrix = {}
    conf.build_cache_size = 0

    repo = get_repo(conf)

    def get_env():
        return list(environment.get_environments(conf, None))[0]

    env = get_env()
    env.create()

    # Check updating installed_commit_hash
    assert env.installed_commit_hash == None
    assert env._env_vars.get('ASV_COMMIT') == None
    env.install_project(conf, repo, commit_hash)
    assert env.installed_commit_hash == commit_hash
    assert env._env_vars.get('ASV_COMMIT') == commit_hash

    env = get_env()
    assert env.installed_commit_hash == commit_hash
    assert env._env_vars.get('ASV_COMMIT') == commit_hash

    # Configuration change results to reinstall
    env._project = "something"
    assert env.installed_commit_hash == None

    # Uninstall resets hash (but not ASV_COMMIT)
    env = get_env()
    env._uninstall_project()
    assert env.installed_commit_hash == None
    assert env._env_vars.get('ASV_COMMIT') != None

    env = get_env()
    assert env.installed_commit_hash == None
    assert env._env_vars.get('ASV_COMMIT') == None
Ejemplo n.º 37
0
def _test_branches(conf, branch_commits):
    r = repo.get_repo(conf)
    branch_cache = BranchCache(conf, r)

    assert len(conf.branches) == 2

    commit_branches = {}

    for branch in conf.branches:
        commits = branch_cache.get_branch_commits(branch)

        for commit in branch_commits[branch]:
            assert commit in commits
            commit_branches[commit] = branch

    for commit, branch in commit_branches.items():
        assert branch in branch_cache.get_branches(commit)
Ejemplo n.º 38
0
def test_invalid_benchmark_tree(tmpdir):
    tmpdir = str(tmpdir)
    os.chdir(tmpdir)

    d = {}
    d.update(ASV_CONF_JSON)
    d['benchmark_dir'] = INVALID_BENCHMARK_DIR
    d['env_dir'] = "env"
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    with pytest.raises(util.UserError):
        benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])
Ejemplo n.º 39
0
def test_installed_commit_hash(tmpdir):
    tmpdir = six.text_type(tmpdir)

    dvcs = generate_test_repo(tmpdir, [0], dvcs_type='git')
    commit_hash = dvcs.get_branch_hashes()[0]

    conf = config.Config()
    conf.env_dir = os.path.join(tmpdir, "env")
    conf.pythons = [PYTHON_VER1]
    conf.repo = os.path.abspath(dvcs.path)
    conf.matrix = {}
    conf.build_cache_size = 0

    repo = get_repo(conf)

    def get_env():
        return list(environment.get_environments(conf, None))[0]

    env = get_env()
    env.create()

    # Check updating installed_commit_hash
    assert env.installed_commit_hash == None
    assert env._env_vars.get('ASV_COMMIT') == None
    env.install_project(conf, repo, commit_hash)
    assert env.installed_commit_hash == commit_hash
    assert env._env_vars.get('ASV_COMMIT') == commit_hash

    env = get_env()
    assert env.installed_commit_hash == commit_hash
    assert env._env_vars.get('ASV_COMMIT') == commit_hash

    # Configuration change results to reinstall
    env._project = "something"
    assert env.installed_commit_hash == None

    # Uninstall resets hash (but not ASV_COMMIT)
    env = get_env()
    env._uninstall_project()
    assert env.installed_commit_hash == None
    assert env._env_vars.get('ASV_COMMIT') != None

    env = get_env()
    assert env.installed_commit_hash == None
    assert env._env_vars.get('ASV_COMMIT') == None
Ejemplo n.º 40
0
def _test_branches(conf, branch_commits):
    r = repo.get_repo(conf)
    branch_cache = BranchCache(conf, r)

    assert len(conf.branches) == 2

    commit_branches = {}

    for branch in conf.branches:
        commits = branch_cache.get_branch_commits(branch)

        for commit in branch_commits[branch]:
            assert commit in commits
            commit_branches[commit] = branch


    for commit, branch in commit_branches.items():
        assert branch in branch_cache.get_branches(commit)
Ejemplo n.º 41
0
def _test_generic_repo(conf, tmpdir, hash_range, master, branch):
    workcopy_dir = join(tmpdir, "workcopy")

    r = repo.get_repo(conf)

    r.checkout(workcopy_dir, master)
    r.checkout(workcopy_dir, branch)
    r.checkout(workcopy_dir, master)

    hashes = r.get_hashes_from_range(hash_range)
    assert len(hashes) == 4

    dates = [r.get_date(hash) for hash in hashes]
    assert dates == sorted(dates)[::-1]

    tags = r.get_tags()
    for tag in tags:
        r.get_date_from_name(tag)
Ejemplo n.º 42
0
Archivo: tools.py Proyecto: wrwrwr/asv
def generate_result_dir(tmpdir, dvcs, values, branches=None):
    result_dir = join(tmpdir, "results")
    os.makedirs(result_dir)
    html_dir = join(tmpdir, "html")
    machine_dir = join(result_dir, "tarzan")
    os.makedirs(machine_dir)

    if branches is None:
        branches = [None]

    conf = config.Config.from_json({
        'results_dir': result_dir,
        'html_dir': html_dir,
        'repo': dvcs.path,
        'project': 'asv',
        'branches': branches or [None],
    })
    repo = get_repo(conf)

    util.write_json(join(machine_dir, "machine.json"), {
        'machine': 'tarzan',
        'version': 1,
    })

    timestamp = datetime.datetime.utcnow()

    params = None
    for commit, value in values.items():
        if isinstance(value, dict):
            params = value["params"]
        result = Results({"machine": "tarzan"}, {}, commit,
                         repo.get_date_from_name(commit), "2.7", None)
        result.add_result("time_func", value, timestamp, timestamp)
        result.save(result_dir)

    util.write_json(join(result_dir, "benchmarks.json"), {
        "time_func": {
            "name": "time_func",
            "params": params or [],
            "param_names": params or [],
        }
    },
                    api_version=1)
    return conf
Ejemplo n.º 43
0
def benchmarks_fixture(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    d['branches'] = ["master"]
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    return conf, repo, envs, commit_hash
Ejemplo n.º 44
0
def benchmarks_fixture(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    d['branches'] = ["master"]
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    return conf, repo, envs, commit_hash
Ejemplo n.º 45
0
def test_find_benchmarks_cwd_imports(tmpdir):
    # Test that files in the directory above the benchmark suite are
    # not importable

    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    os.makedirs('benchmark')
    with open(os.path.join('benchmark', '__init__.py'), 'w') as f:
        pass

    with open(os.path.join('benchmark', 'test.py'), 'w') as f:
        f.write("""
try:
    import this_should_really_not_be_here
    raise AssertionError('This should not happen!')
except ImportError:
    pass

def track_this():
    return 0
""")

    with open(os.path.join('this_should_really_not_be_here.py'), 'w') as f:
        f.write("raise AssertionError('Should not be imported!')")

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [[0, 1]]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf,
                                       repo,
                                       envs, [commit_hash],
                                       regex='track_this')
    assert len(b) == 1
Ejemplo n.º 46
0
Archivo: tools.py Proyecto: craig8/asv
def generate_result_dir(tmpdir, dvcs, values, branches=None):
    result_dir = join(tmpdir, "results")
    os.makedirs(result_dir)
    html_dir = join(tmpdir, "html")
    machine_dir = join(result_dir, "tarzan")
    os.makedirs(machine_dir)

    if branches is None:
        branches = [None]

    conf = config.Config.from_json({
        'results_dir': result_dir,
        'html_dir': html_dir,
        'repo': dvcs.path,
        'project': 'asv',
        'branches': branches or [None],
    })
    repo = get_repo(conf)

    util.write_json(join(machine_dir, "machine.json"), {
        'machine': 'tarzan',
    })

    timestamp = datetime.datetime.utcnow()

    params = None
    for commit, value in values.items():
        if isinstance(value, dict):
            params = value["params"]
        result = Results({"machine": "tarzan"}, {}, commit,
                         repo.get_date_from_name(commit), "2.7", None)
        result.add_result("time_func", value, timestamp, timestamp)
        result.save(result_dir)

    util.write_json(join(result_dir, "benchmarks.json"), {
        "time_func": {
            "name": "time_func",
            "params": params or [],
            "param_names": params or [],
        }
    }, api_version=1)
    return conf
Ejemplo n.º 47
0
def test_repo(tmpdir):
    conf = config.Config()

    conf.project = six.text_type(tmpdir.join("repo"))
    conf.repo = "https://github.com/spacetelescope/asv.git"

    r = repo.get_repo(conf)
    r.checkout("master")
    r.checkout("gh-pages")
    r.checkout("master")

    hashes = r.get_hashes_from_range("ae0c27b65741..e6f382a704f7")
    assert len(hashes) == 4

    dates = [r.get_date(hash) for hash in hashes]
    assert dates == sorted(dates)[::-1]

    tags = r.get_tags()
    for tag in tags:
        r.get_date_from_tag(tag)
Ejemplo n.º 48
0
    def __init__(self, conf: Config, python: str, requirements: dict) -> None:
        """
        Parameters
        ----------
        conf: Config instance

        python : str
            Version of Python. Must be of the form "MAJOR.MINOR".

        requirements : dict
            Dictionary mapping a PyPI package name to a version
            identifier string.

        """
        from nox.sessions import _normalize_path

        # Need to checkout the project BEFORE the benchmark run - to access a noxfile.
        self.project_temp_checkout = TemporaryDirectory(
            prefix="nox_asv_checkout_")
        repo = get_repo(conf)
        repo.checkout(self.project_temp_checkout.name, conf.nox_setup_commit)
        self.noxfile_rel_path = conf.noxfile_rel_path
        self.setup_noxfile = (Path(self.project_temp_checkout.name) /
                              self.noxfile_rel_path)
        self.nox_session_name = conf.nox_session_name

        # Some duplication of parent code - need these attributes BEFORE
        #  running inherited code.
        self._python = python
        self._requirements = requirements
        self._env_dir = conf.env_dir

        # Prepare the actual environment path, to override self._path.
        nox_envdir = str(Path(self._env_dir).absolute() / self.hashname)
        nox_friendly_name = self._get_nox_session_name(python)
        self._nox_path = Path(_normalize_path(nox_envdir, nox_friendly_name))

        # For storing any extra conda requirements from asv.conf.json.
        self._extra_reqs_path = self._nox_path / "asv-extra-reqs.yaml"

        super().__init__(conf, python, requirements)
Ejemplo n.º 49
0
def test_find_benchmarks_cwd_imports(tmpdir):
    # Test that files in the directory above the benchmark suite are
    # not importable

    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    os.makedirs('benchmark')
    with open(os.path.join('benchmark', '__init__.py'), 'w') as f:
        pass

    with open(os.path.join('benchmark', 'test.py'), 'w') as f:
        f.write("""
try:
    import this_should_really_not_be_here
    raise AssertionError('This should not happen!')
except ImportError:
    pass

def track_this():
    return 0
""")

    with open(os.path.join('this_should_really_not_be_here.py'), 'w') as f:
        f.write("raise AssertionError('Should not be imported!')")

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                                       regex='track_this')
    assert len(b) == 1
Ejemplo n.º 50
0
def _test_generic_repo(conf,
                       tmpdir,
                       hash_range="ae0c27b65741..e6f382a704f7",
                       master="master",
                       branch="gh-pages"):

    workcopy_dir = six.text_type(tmpdir.join("workcopy"))

    r = repo.get_repo(conf)

    r.checkout(workcopy_dir, master)
    r.checkout(workcopy_dir, branch)
    r.checkout(workcopy_dir, master)

    hashes = r.get_hashes_from_range(hash_range)
    assert len(hashes) == 4

    dates = [r.get_date(hash) for hash in hashes]
    assert dates == sorted(dates)[::-1]

    tags = r.get_tags()
    for tag in tags:
        r.get_date_from_name(tag)
Ejemplo n.º 51
0
def test_install_success(tmpdir):
    # Check that install_project really installs the package. (gh-805)
    # This may fail if pip in install_command e.g. gets confused by an .egg-info
    # directory in its cwd to think the package is already installed.
    tmpdir = six.text_type(tmpdir)

    dvcs = generate_test_repo(tmpdir, [0], dvcs_type='git')
    commit_hash = dvcs.get_branch_hashes()[0]

    conf = config.Config()
    conf.env_dir = os.path.join(tmpdir, "env")
    conf.pythons = [PYTHON_VER1]
    conf.repo = os.path.abspath(dvcs.path)
    conf.matrix = {}
    conf.build_cache_size = 0

    repo = get_repo(conf)

    env = list(environment.get_environments(conf, None))[0]
    env.create()
    env.install_project(conf, repo, commit_hash)

    env.run(['-c', 'import asv_test_repo as t, sys; sys.exit(0 if t.dummy_value == 0 else 1)'])
Ejemplo n.º 52
0
def test_import_failure_retry(tmpdir):
    # Test that a different commit is tried on import failure

    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    os.makedirs('benchmark')
    with open(os.path.join('benchmark', '__init__.py'), 'w') as f:
        f.write(
            textwrap.dedent("""
        import asv_test_repo

        def time_foo():
            pass

        time_foo.number = asv_test_repo.dummy_value

        if asv_test_repo.dummy_value == 0:
            raise RuntimeError("fail discovery")
        """))

    dvcs = tools.generate_test_repo(tmpdir, [2, 1, 0])

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = dvcs.path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hashes = dvcs.get_branch_hashes()

    b = benchmarks.Benchmarks.discover(conf, repo, envs, commit_hashes)
    assert len(b) == 1
    assert b['time_foo']['number'] == 1
Ejemplo n.º 53
0
def test_import_failure_retry(tmpdir):
    # Test that a different commit is tried on import failure

    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    os.makedirs('benchmark')
    with open(os.path.join('benchmark', '__init__.py'), 'w') as f:
        f.write(textwrap.dedent("""
        import asv_test_repo

        def time_foo():
            pass

        time_foo.number = asv_test_repo.dummy_value

        if asv_test_repo.dummy_value == 0:
            raise RuntimeError("fail discovery")
        """))

    dvcs = tools.generate_test_repo(tmpdir, [2, 1, 0])

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = dvcs.path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hashes = dvcs.get_branch_hashes()

    b = benchmarks.Benchmarks.discover(conf, repo, envs, commit_hashes)
    assert len(b) == 1
    assert b['time_foo']['number'] == 1
Ejemplo n.º 54
0
def test_publish(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    result_dir = join(tmpdir, 'sample_results')
    os.makedirs(result_dir)
    os.makedirs(join(result_dir, 'cheetah'))

    # Synthesize history with two branches that both have commits
    result_files = [fn for fn in os.listdir(join(RESULT_DIR, 'cheetah'))
                    if fn.endswith('.json') and fn != 'machine.json']
    result_files.sort()
    master_values = list(range(len(result_files)*2//3))
    branch_values = list(range(len(master_values), len(result_files)))
    dvcs = tools.generate_test_repo(tmpdir, master_values, 'git',
                                    [('master~6', 'some-branch', branch_values)])

    # Copy and modify result files, fixing commit hashes and setting result
    # dates to distinguish the two branches
    master_commits = dvcs.get_branch_hashes('master')
    only_branch = [x for x in dvcs.get_branch_hashes('some-branch')
                   if x not in master_commits]
    commits = master_commits + only_branch
    for k, item in enumerate(zip(result_files, commits)):
        fn, commit = item
        src = join(RESULT_DIR, 'cheetah', fn)
        dst = join(result_dir, 'cheetah', commit[:8] + fn[8:])
        data = util.load_json(src, cleanup=False)
        data['commit_hash'] = commit
        util.write_json(dst, data)

    shutil.copyfile(join(RESULT_DIR, 'benchmarks.json'),
                    join(result_dir, 'benchmarks.json'))
    shutil.copyfile(join(RESULT_DIR, 'cheetah', 'machine.json'),
                    join(result_dir, 'cheetah', 'machine.json'))

    # Publish the synthesized data
    conf = config.Config.from_json(
        {'benchmark_dir': BENCHMARK_DIR,
         'results_dir': result_dir,
         'html_dir': join(tmpdir, 'html'),
         'repo': dvcs.path,
         'project': 'asv'})

    tools.run_asv_with_conf(conf, 'publish')

    # Check output
    assert isfile(join(tmpdir, 'html', 'index.html'))
    assert isfile(join(tmpdir, 'html', 'index.json'))
    assert isfile(join(tmpdir, 'html', 'asv.js'))
    assert isfile(join(tmpdir, 'html', 'asv.css'))
    assert not isdir(join(tmpdir, 'html', 'graphs', 'Cython', 'arch-x86_64',
                          'branch-some-branch'))
    assert not isdir(join(tmpdir, 'html', 'graphs', 'Cython-null', 'arch-x86_64',
                          'branch-some-branch'))
    index = util.load_json(join(tmpdir, 'html', 'index.json'))
    assert index['params']['branch'] == ['master']

    repo = get_repo(conf)
    revision_to_hash = dict((r, h) for h, r in six.iteritems(repo.get_revisions(commits)))

    def check_file(branch, cython):
        fn = join(tmpdir, 'html', 'graphs', cython, 'arch-x86_64', 'branch-' + branch,
                  'cpu-Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
                  'machine-cheetah', 'numpy-1.8', 'os-Linux (Fedora 20)', 'python-2.7', 'ram-8.2G',
                  'time_coordinates.time_latitude.json')
        data = util.load_json(fn, cleanup=False)
        data_commits = [revision_to_hash[x[0]] for x in data]
        if branch == "master":
            assert all(c in master_commits for c in data_commits)
        else:
            # Must contains commits from some-branch
            assert any(c in only_branch for c in data_commits)
            # And commits from master
            assert any(c in master_commits for c in data_commits)

        # Check that revisions are strictly increasing
        assert all(x[0] < y[0] for x, y in zip(data, data[1:]))

    check_file("master", "Cython")
    check_file("master", "Cython-null")

    # Publish with branches set in the config
    conf.branches = ['master', 'some-branch']
    tools.run_asv_with_conf(conf, 'publish')

    # Check output
    check_file("master", "Cython")
    check_file("master", "Cython-null")
    check_file("some-branch", "Cython")
    check_file("some-branch", "Cython-null")

    index = util.load_json(join(tmpdir, 'html', 'index.json'))
    assert index['params']['branch'] == ['master', 'some-branch']
    assert index['params']['Cython'] == ['', None]
    assert index['params']['ram'] == ['8.2G', 8804682956.8]

    expected_graph_list = [{'Cython': cython, 'arch': 'x86_64',
                            'branch': branch,
                            'cpu': 'Intel(R) Core(TM) i5-2520M CPU @ 2.50GHz (4 cores)',
                            'machine': 'cheetah',
                            'numpy': '1.8',
                            'os': 'Linux (Fedora 20)',
                            'python': '2.7',
                            'ram': '8.2G'}
                            for cython in ["", None] for branch in ["master", "some-branch"]]
    d = dict(expected_graph_list[0])
    d['ram'] = 8804682956.8
    expected_graph_list.append(d)

    assert len(index['graph_param_list']) == len(expected_graph_list)
    for item in expected_graph_list:
        assert item in index['graph_param_list']
Ejemplo n.º 55
0
def test_custom_commands(tmpdir):
    # check custom install/uninstall/build commands work
    tmpdir = six.text_type(tmpdir)

    dvcs = generate_test_repo(tmpdir, [0], dvcs_type='git')

    build_py = os.path.abspath(os.path.join(tmpdir, 'build.py'))
    install_py = os.path.abspath(os.path.join(tmpdir, 'install.py'))
    uninstall_py = os.path.abspath(os.path.join(tmpdir, 'uninstall.py'))

    conf = config.Config()
    conf.env_dir = os.path.join(tmpdir, "env")
    conf.pythons = [PYTHON_VER1]
    conf.repo = os.path.abspath(dvcs.path)
    conf.matrix = {}
    conf.build_cache_size = 0

    conf.build_command = ["python {0} {{build_cache_dir}}".format(quote(build_py))]
    conf.install_command = ["python {0} {{env_dir}} {{build_cache_dir}}".format(quote(install_py))]
    conf.uninstall_command = ["python {0} {{env_dir}}".format(quote(uninstall_py))]

    with open(build_py, 'wb') as f:
        f.write(b"import os, sys\n"
                b"assert sys.argv[1] == os.environ['ASV_BUILD_CACHE_DIR']\n"
                b"f = open(os.path.join(os.environ['ASV_BUILD_CACHE_DIR'], 'cached'), 'wb')\n"
                b"f.write(b'data')\n"
                b"f.close()\n")

    with open(install_py, 'wb') as f:
        f.write(b"import os, sys, shutil\n"
                b"assert sys.argv[1] == os.environ['ASV_ENV_DIR']\n"
                b"assert sys.argv[2] == os.environ['ASV_BUILD_CACHE_DIR']\n"
                b"shutil.copyfile(os.path.join(os.environ['ASV_BUILD_CACHE_DIR'], 'cached'),\n"
                b"                os.path.join(os.environ['ASV_ENV_DIR'], 'installed'))\n")

    with open(uninstall_py, 'wb') as f:
        f.write(b"import os, sys\n"
                b"assert sys.argv[1] == os.environ['ASV_ENV_DIR']\n"
                b"fn = os.path.join(os.environ['ASV_ENV_DIR'], 'installed')\n"
                b"if os.path.isfile(fn): os.unlink(fn)\n")

    def get_env():
        env = list(environment.get_environments(conf, None))[0]
        env.create()
        return env

    env = get_env()
    repo = get_repo(conf)
    commit_hash = dvcs.get_branch_hashes()[0]

    cache_dir = os.path.join(env._path, 'asv-build-cache')
    cache_file = os.path.join(cache_dir, commit_hash, 'cached')
    install_file = os.path.join(env._path, 'installed')

    # Project installation should succeed with cache size 0,
    # and not leave cache files around
    env.install_project(conf, repo, commit_hash)
    assert os.path.isfile(install_file)
    assert not os.listdir(cache_dir)
    env._set_installed_commit_hash(None)

    # It should succed with nonzero cache size
    conf.build_cache_size = 1
    env = get_env()
    env.install_project(conf, repo, commit_hash)

    assert os.path.isfile(cache_file)
    assert os.path.isfile(install_file)

    # Explicitly check uninstall works
    env._uninstall_project()
    assert os.path.isfile(cache_file)
    assert not os.path.isfile(install_file)

    # Check reinstall uses cache and doesn't call build command
    conf.build_command = ['python -c "import sys; sys.exit(1)"']
    env = get_env()
    env.install_project(conf, repo, commit_hash)

    assert os.path.isfile(install_file)
    assert os.path.isfile(cache_file)

    # Bad install command should cause a failure
    conf.install_command = ['python -c "import sys; sys.exit(1)"']
    env = get_env()
    with pytest.raises(util.ProcessError):
        env.install_project(conf, repo, commit_hash)
Ejemplo n.º 56
0
def test_code_extraction(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                                       regex=r'^code_extraction\.')

    expected_code = textwrap.dedent("""
    def track_test():
        # module-level 難
        return 0

    def setup():
        # module-level
        pass

    def setup_cache():
        # module-level
        pass
    """).strip()

    bench = b['code_extraction.track_test']
    assert bench['version'] == sha256(bench['code'].encode('utf-8')).hexdigest()
    assert bench['code'] == expected_code

    expected_code = textwrap.dedent("""
    class MyClass:
        def track_test(self):
            # class-level 難
            return 0

    def setup():
        # module-level
        pass

    class MyClass:
        def setup(self):
            # class-level
            pass

        def setup_cache(self):
            # class-level
            pass
    """).strip()

    bench = b['code_extraction.MyClass.track_test']
    assert bench['version'] == sha256(bench['code'].encode('utf-8')).hexdigest()

    if sys.version_info[:2] != (3, 2):
        # Python 3.2 doesn't have __qualname__
        assert bench['code'] == expected_code
Ejemplo n.º 57
0
def test_code_extraction(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)
    envs = list(environment.get_environments(conf, None))
    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf,
                                       repo,
                                       envs, [commit_hash],
                                       regex=r'^code_extraction\.')

    expected_code = textwrap.dedent("""
    def track_test():
        # module-level 難
        return 0

    def setup():
        # module-level
        pass

    def setup_cache():
        # module-level
        pass
    """).strip()

    bench = b['code_extraction.track_test']
    assert bench['version'] == sha256(
        bench['code'].encode('utf-8')).hexdigest()
    assert bench['code'] == expected_code

    expected_code = textwrap.dedent("""
    int track_pretty_source_test() {
        return 0;
    }

    def setup():
        # module-level
        pass

    def setup_cache():
        # module-level
        pass
    """).strip()

    bench = b['code_extraction.track_pretty_source_test']
    assert bench['version'] == sha256(
        bench['code'].encode('utf-8')).hexdigest()
    assert bench['code'] == expected_code

    expected_code = textwrap.dedent("""
    class MyClass:
        def track_test(self):
            # class-level 難
            return 0

    def setup():
        # module-level
        pass

    class MyClass:
        def setup(self):
            # class-level
            pass

        def setup_cache(self):
            # class-level
            pass
    """).strip()

    bench = b['code_extraction.MyClass.track_test']
    assert bench['version'] == sha256(
        bench['code'].encode('utf-8')).hexdigest()

    if sys.version_info[:2] != (3, 2):
        # Python 3.2 doesn't have __qualname__
        assert bench['code'] == expected_code
Ejemplo n.º 58
0
def test_find_benchmarks(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)

    envs = list(environment.get_environments(conf, None))

    b = benchmarks.Benchmarks(conf, repo, envs, regex='secondary')
    assert len(b) == 3

    b = benchmarks.Benchmarks(conf, repo, envs, regex='example')
    assert len(b) == 25

    b = benchmarks.Benchmarks(conf,
                              repo,
                              envs,
                              regex='time_example_benchmark_1')
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf,
                              repo,
                              envs,
                              regex=[
                                  'time_example_benchmark_1',
                                  'some regexp that does not match anything'
                              ])
    assert len(b) == 2

    b = benchmarks.Benchmarks(conf, repo, envs, regex='custom')
    assert sorted(b.keys()) == ['custom.time_function', 'custom.track_method']

    b = benchmarks.Benchmarks(conf, repo, envs)
    assert len(b) == 33

    start_timestamp = datetime.datetime.utcnow()

    b = benchmarks.Benchmarks(conf, repo, envs)
    times = b.run_benchmarks(envs[0], profile=True, show_stderr=True)

    end_timestamp = datetime.datetime.utcnow()

    assert len(times) == len(b)
    assert times['time_examples.TimeSuite.time_example_benchmark_1'][
        'result'] != [None]
    assert isinstance(
        times['time_examples.TimeSuite.time_example_benchmark_1']['stats'][0]
        ['std'], float)
    # The exact number of samples may vary if the calibration is not fully accurate
    assert len(times['time_examples.TimeSuite.time_example_benchmark_1']
               ['samples'][0]) in (8, 9, 10)
    # Benchmarks that raise exceptions should have a time of "None"
    assert times['time_secondary.TimeSecondary.time_exception']['result'] == [
        None
    ]
    assert times['subdir.time_subdir.time_foo']['result'] != [None]
    if not ON_PYPY:
        # XXX: the memory benchmarks don't work on Pypy, since asizeof
        # is CPython-only
        assert times['mem_examples.mem_list']['result'][0] > 1000
    assert times['time_secondary.track_value']['result'] == [42.0]
    assert 'profile' in times['time_secondary.track_value']
    assert 'stderr' in times['time_examples.time_with_warnings']
    assert times['time_examples.time_with_warnings']['errcode'] != 0

    assert times['time_examples.TimeWithBadTimer.time_it']['result'] == [0.0]

    assert times['params_examples.track_param']['params'] == [[
        "<class 'benchmark.params_examples.ClassOne'>",
        "<class 'benchmark.params_examples.ClassTwo'>"
    ]]
    assert times['params_examples.track_param']['result'] == [42, 42]

    assert times['params_examples.mem_param']['params'] == [['10', '20'],
                                                            ['2', '3']]
    assert len(times['params_examples.mem_param']['result']) == 2 * 2

    assert times['params_examples.ParamSuite.track_value']['params'] == [[
        "'a'", "'b'", "'c'"
    ]]
    assert times['params_examples.ParamSuite.track_value']['result'] == [
        1 + 0, 2 + 0, 3 + 0
    ]

    assert isinstance(times['params_examples.TuningTest.time_it']['result'][0],
                      float)

    assert isinstance(times['params_examples.time_skip']['result'][0], float)
    assert isinstance(times['params_examples.time_skip']['result'][1], float)
    assert util.is_nan(times['params_examples.time_skip']['result'][2])

    assert times['peakmem_examples.peakmem_list']['result'][0] >= 4 * 2**20

    assert times['cache_examples.ClassLevelSetup.track_example']['result'] == [
        500
    ]
    assert times['cache_examples.ClassLevelSetup.track_example2'][
        'result'] == [500]

    assert times['cache_examples.track_cache_foo']['result'] == [42]
    assert times['cache_examples.track_cache_bar']['result'] == [12]
    assert times['cache_examples.track_my_cache_foo']['result'] == [0]

    assert times['cache_examples.ClassLevelSetupFail.track_fail'][
        'result'] == None
    assert 'raise RuntimeError()' in times[
        'cache_examples.ClassLevelSetupFail.track_fail']['stderr']

    assert times['cache_examples.ClassLevelCacheTimeout.track_fail'][
        'result'] == None
    assert times['cache_examples.ClassLevelCacheTimeoutSuccess.track_success'][
        'result'] == [0]

    profile_path = join(tmpdir, 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value']['profile'])
    pstats.Stats(profile_path)

    # Check for running setup on each repeat (one extra run from profile)
    # The output would contain error messages if the asserts in the benchmark fail.
    expected = ["<%d>" % j for j in range(1, 12)]
    assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split(
    ) == expected

    # Calibration of iterations should not rerun setup
    expected = (['setup'] * 2, ['setup'] * 3)
    assert times['time_examples.TimeWithRepeatCalibrate.time_it'][
        'stderr'].split() in expected

    # Check run time timestamps
    for name, result in times.items():
        assert result['started_at'] >= start_timestamp
        assert result['ended_at'] >= result['started_at']
        assert result['ended_at'] <= end_timestamp
Ejemplo n.º 59
0
def test_find_benchmarks(tmpdir):
    tmpdir = six.text_type(tmpdir)
    os.chdir(tmpdir)

    shutil.copytree(BENCHMARK_DIR, 'benchmark')

    d = {}
    d.update(ASV_CONF_JSON)
    d['env_dir'] = "env"
    d['benchmark_dir'] = 'benchmark'
    d['repo'] = tools.generate_test_repo(tmpdir, [0]).path
    conf = config.Config.from_json(d)

    repo = get_repo(conf)

    envs = list(environment.get_environments(conf, None))

    commit_hash = repo.get_hash_from_name(repo.get_branch_name())

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                                       regex='secondary')
    assert len(b) == 3

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                                       regex='example')
    assert len(b) == 25

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                              regex='time_example_benchmark_1')
    assert len(b) == 2

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash],
                              regex=['time_example_benchmark_1',
                                     'some regexp that does not match anything'])
    assert len(b) == 2

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash], regex='custom')
    assert sorted(b.keys()) == ['custom.time_function', 'custom.track_method',
                                'named.track_custom_pretty_name']
    assert 'pretty_name' not in b['custom.track_method']
    assert b['custom.time_function']['pretty_name'] == 'My Custom Function'
    assert b['named.track_custom_pretty_name']['pretty_name'] == 'this.is/the.answer'

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])
    assert len(b) == 35

    assert 'named.OtherSuite.track_some_func' in b

    start_timestamp = datetime.datetime.utcnow()

    b = benchmarks.Benchmarks.discover(conf, repo, envs, [commit_hash])
    times = b.run_benchmarks(envs[0], profile=True, show_stderr=True)

    end_timestamp = datetime.datetime.utcnow()

    assert len(times) == len(b)
    assert times[
        'time_examples.TimeSuite.time_example_benchmark_1']['result'] != [None]
    assert isinstance(times['time_examples.TimeSuite.time_example_benchmark_1']['stats'][0]['std'], float)
    # The exact number of samples may vary if the calibration is not fully accurate
    assert len(times['time_examples.TimeSuite.time_example_benchmark_1']['samples'][0]) >= 5
    # Benchmarks that raise exceptions should have a time of "None"
    assert times[
        'time_secondary.TimeSecondary.time_exception']['result'] == [None]
    assert times[
        'subdir.time_subdir.time_foo']['result'] != [None]
    if not ON_PYPY:
        # XXX: the memory benchmarks don't work on Pypy, since asizeof
        # is CPython-only
        assert times[
            'mem_examples.mem_list']['result'][0] > 1000
    assert times[
        'time_secondary.track_value']['result'] == [42.0]
    assert 'profile' in times[
        'time_secondary.track_value']
    assert 'stderr' in times[
        'time_examples.time_with_warnings']
    assert times['time_examples.time_with_warnings']['errcode'] != 0

    assert times['time_examples.TimeWithBadTimer.time_it']['result'] == [0.0]

    assert times['params_examples.track_param']['params'] == [["<class 'benchmark.params_examples.ClassOne'>",
                                                               "<class 'benchmark.params_examples.ClassTwo'>"]]
    assert times['params_examples.track_param']['result'] == [42, 42]

    assert times['params_examples.mem_param']['params'] == [['10', '20'], ['2', '3']]
    assert len(times['params_examples.mem_param']['result']) == 2*2

    assert times['params_examples.ParamSuite.track_value']['params'] == [["'a'", "'b'", "'c'"]]
    assert times['params_examples.ParamSuite.track_value']['result'] == [1+0, 2+0, 3+0]

    assert isinstance(times['params_examples.TuningTest.time_it']['result'][0], float)

    assert isinstance(times['params_examples.time_skip']['result'][0], float)
    assert isinstance(times['params_examples.time_skip']['result'][1], float)
    assert util.is_nan(times['params_examples.time_skip']['result'][2])

    assert times['peakmem_examples.peakmem_list']['result'][0] >= 4 * 2**20

    assert times['cache_examples.ClassLevelSetup.track_example']['result'] == [500]
    assert times['cache_examples.ClassLevelSetup.track_example2']['result'] == [500]

    assert times['cache_examples.track_cache_foo']['result'] == [42]
    assert times['cache_examples.track_cache_bar']['result'] == [12]
    assert times['cache_examples.track_my_cache_foo']['result'] == [0]

    assert times['cache_examples.ClassLevelSetupFail.track_fail']['result'] == None
    assert 'raise RuntimeError()' in times['cache_examples.ClassLevelSetupFail.track_fail']['stderr']

    assert times['cache_examples.ClassLevelCacheTimeout.track_fail']['result'] == None
    assert times['cache_examples.ClassLevelCacheTimeoutSuccess.track_success']['result'] == [0]

    profile_path = join(tmpdir, 'test.profile')
    with open(profile_path, 'wb') as fd:
        fd.write(times['time_secondary.track_value']['profile'])
    pstats.Stats(profile_path)

    # Check for running setup on each repeat (one extra run from profile)
    # The output would contain error messages if the asserts in the benchmark fail.
    expected = ["<%d>" % j for j in range(1, 12)]
    assert times['time_examples.TimeWithRepeat.time_it']['stderr'].split() == expected

    # Calibration of iterations should not rerun setup
    expected = (['setup']*2, ['setup']*3)
    assert times['time_examples.TimeWithRepeatCalibrate.time_it']['stderr'].split() in expected

    # Check run time timestamps
    for name, result in times.items():
        assert result['started_at'] >= start_timestamp
        assert result['ended_at'] >= result['started_at']
        assert result['ended_at'] <= end_timestamp