def test(benchmark, env_name, pytest_args): benchmark = Benchmark(benchmark) from benchopt.tests import __file__ as _bench_test_module _bench_test_module = Path(_bench_test_module).parent pytest_args = ' '.join(("-p benchopt.tests.fixtures", f"--rootdir {_bench_test_module}", *pytest_args)) if len(pytest_args) == 0: pytest_args = '-vl' env_option = '' if env_name is not None: create_conda_env(env_name, with_pytest=True) if _run_shell_in_conda_env("pytest --version", env_name=env_name) != 0: raise ModuleNotFoundError( f"pytest is not installed in conda env {env_name}.\n" f"Please run `conda install -n {env_name} pytest` to test the " "benchmark in this environment.") objective = benchmark.get_benchmark_objective() if not objective.is_installed(): objective.install(env_name=env_name) env_option = f'--test-env {env_name}' _bench_test_file = _bench_test_module / "test_benchmarks.py" cmd = ( f'pytest {pytest_args} {_bench_test_file} ' f'--benchmark {benchmark.benchmark_dir} {env_option} ' # Make sure to not modify sys.path to add test file from current env # in sub conda env as there might be different python versions. '--import-mode importlib') raise SystemExit( _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False) != 0)
from pathlib import Path from benchopt.benchmark import Benchmark # Default benchmark TEST_BENCHMARK_DIR = Path(__file__).parent / 'test_benchmarks' DUMMY_BENCHMARK_PATH = TEST_BENCHMARK_DIR / 'dummy_benchmark' # Pattern to select specific datasets or solvers. SELECT_ONE_SIMULATED = r'simulated*500*rho=0\]' SELECT_ONE_PGD = r'python-pgd*step_size=1\]' try: DUMMY_BENCHMARK = Benchmark(DUMMY_BENCHMARK_PATH) TEST_OBJECTIVE = DUMMY_BENCHMARK.get_benchmark_objective() TEST_SOLVER = [ s for s in DUMMY_BENCHMARK.list_benchmark_solvers() if s.name == "Test-Solver" ][0] TEST_DATASET = [ d for d in DUMMY_BENCHMARK.list_benchmark_datasets() if d.name == "Test-Dataset" ][0] except Exception: DUMMY_BENCHMARK = None TEST_OBJECTIVE = None TEST_SOLVER = None TEST_DATASET = None