Пример #1
0
def pytest_generate_tests(metafunc):
    """Generate the test on the fly to take --benchmark into account.
    """
    PARAMETRIZATION = {
        'benchmark_dataset_simu':
        lambda benchmarks: [(benchmark, dataset_class)
                            for benchmark in benchmarks for dataset_class in
                            list_benchmark_datasets(benchmark)
                            if dataset_class.name.lower() == 'simulated'],
        'benchmark_dataset':
        lambda benchmarks: [(benchmark, dataset_class)
                            for benchmark in benchmarks for dataset_class in
                            list_benchmark_datasets(benchmark)],
        'benchmark_solver':
        lambda benchmarks: [(benchmark, solver) for benchmark in benchmarks
                            for solver in list_benchmark_solvers(benchmark)]
    }
    for params, func in PARAMETRIZATION.items():
        if params in metafunc.fixturenames:
            benchmarks = metafunc.config.getoption("benchmark")
            if benchmarks is None or len(benchmarks) == 0:
                benchmarks = TEST_BENCHMARK_DIR.glob('*/')
            benchmarks = [Path(b).resolve() for b in benchmarks]
            benchmarks.sort()
            metafunc.parametrize(params, func(benchmarks), ids=class_ids)
Пример #2
0
def validate_dataset_patterns(benchmark, dataset_patterns):
    """Check that all provided patterns match at least one dataset"""

    # List all dataset strings.
    datasets = list_benchmark_datasets(benchmark)
    all_datasets = []
    for dataset_class in datasets:
        for dataset_parameters in product_param(dataset_class.parameters):
            all_datasets.append(
                dataset_class._get_parametrized_name(**dataset_parameters))

    _validate_patterns(all_datasets, dataset_patterns, name_type='dataset')
Пример #3
0
def test_solver(benchmark_solver):

    benchmark_name, solver_class = benchmark_solver
    if not solver_class.is_installed():
        pytest.skip("Solver is not installed")

    # Skip test_solver for julia in OSX as it throw a segfault
    # See issue#64
    if 'julia' in solver_class.name.lower() and sys.platform == 'darwin':
        pytest.skip('Julia causes segfault on OSX for now.')

    objective_class = get_benchmark_objective(benchmark_name)
    objective = objective_class.get_instance()

    datasets = list_benchmark_datasets(benchmark_name)
    simulated_dataset = [d for d in datasets if d.name.lower() == 'simulated']

    assert len(simulated_dataset) == 1, (
        "All benchmark need to implement a simulated dataset for "
        "testing purpose")

    dataset_class = simulated_dataset[0]
    dataset = dataset_class.get_instance()

    scale, data = dataset.get_data()
    objective.set_data(**data)

    solver = solver_class.get_instance()
    solver.set_objective(**objective.to_dict())
    stop_val = 5000 if solver_class.stop_strategy == 'iteration' else 1e-15
    solver.run(stop_val)
    beta_hat_i = solver.get_result()

    assert beta_hat_i.shape == (scale, )

    val_star = objective(beta_hat_i)

    for _ in range(100):
        eps = 1e-5 * np.random.randn(scale)
        val_eps = objective(beta_hat_i + eps)
        diff = val_eps - val_star
        assert diff > 0
Пример #4
0
def test_solver(benchmark_name, solver_class):

    if solver_class.install_cmd == 'pip':
        for package in solver_class.requirements_import:
            pytest.importorskip(package)
    elif not solver_class.is_installed():
        pytest.skip("Solver is not installed")

    objective_class = get_benchmark_objective(benchmark_name)
    objective = objective_class()

    datasets = list_benchmark_datasets(benchmark_name)
    simulated_dataset = [d for d in datasets if d.name.lower() == 'simulated']

    assert len(simulated_dataset) == 1, (
        "All benchmark need to implement a simulated dataset for "
        "testing purpose")

    dataset_class = simulated_dataset[0]
    dataset = dataset_class()

    scale, data = dataset.get_data()
    objective.set_data(**data)

    solver = solver_class()
    solver.set_objective(**objective.to_dict())
    sample = 1000 if solver_class.sampling_strategy == 'iteration' else 1e-15
    solver.run(sample)
    beta_hat_i = solver.get_result()

    assert beta_hat_i.shape == (scale, )

    val_star = objective(beta_hat_i)

    for _ in range(100):
        eps = 1e-9 * np.random.randn(scale)
        val_eps = objective(beta_hat_i + eps)
        diff = val_eps - val_star
        assert diff > 0
Пример #5
0
import pytest
import numpy as np

from benchopt.base import SAMPLING_STRATEGIES

from benchopt.util import get_all_benchmarks
from benchopt.util import get_benchmark_objective
from benchopt.util import list_benchmark_solvers
from benchopt.util import list_benchmark_datasets


BENCHMARKS = get_all_benchmarks()
SOLVERS = [(benchmark, solver) for benchmark in BENCHMARKS
           for solver in list_benchmark_solvers(benchmark)]
DATASETS = [(benchmark, dataset) for benchmark in BENCHMARKS
            for dataset in list_benchmark_datasets(benchmark)]


def class_ids(parameter):
    if hasattr(parameter, 'name'):
        return parameter.name.lower()
    return None


@pytest.mark.parametrize('benchmark_name, dataset_class', DATASETS,
                         ids=class_ids)
def test_benchmark_objective(benchmark_name, dataset_class):
    """Check that the objective function and the datasets are well defined."""
    objective_class = get_benchmark_objective(benchmark_name)
    objective = objective_class()
Пример #6
0
from benchopt.base import SAMPLING_STRATEGIES

from benchopt.util import get_all_benchmarks
from benchopt.util import get_benchmark_objective
from benchopt.util import list_benchmark_solvers
from benchopt.util import list_benchmark_datasets
from benchopt.util import create_venv, delete_venv


BENCHMARKS = get_all_benchmarks()
BENCH_AND_SOLVERS = [
    (benchmark, solver) for benchmark in BENCHMARKS
    for solver in list_benchmark_solvers(benchmark)]
BENCH_AND_DATASETS = [
    (benchmark, dataset_class) for benchmark in BENCHMARKS
    for dataset_class in list_benchmark_datasets(benchmark)]
BENCH_AND_SIMULATED = [
    (benchmark, dataset_class) for benchmark in BENCHMARKS
    for dataset_class in list_benchmark_datasets(benchmark)
    if dataset_class.name.lower() == 'simulated']


def class_ids(parameter):
    if hasattr(parameter, 'name'):
        return parameter.name.lower()
    return None


# Setup and clean a test env to install/uninstall all the solvers and check
# that they are correctly configured