def test(benchmark, env_name, pytest_args): pytest_args = ' '.join(pytest_args) if len(pytest_args) == 0: pytest_args = '-vl' env_option = '' if env_name is not None: create_conda_env(env_name, with_pytest=True) if _run_shell_in_conda_env("pytest --version", env_name=env_name) != 0: raise ModuleNotFoundError( f"pytest is not installed in conda env {env_name}.\n" f"Please run `conda install -n {env_name} pytest` to test the " "benchmark in this environment.") env_option = f'--test-env {env_name}' from benchopt.tests import __file__ as _bench_test_module BENCHMARK_TEST_FILE = (Path(_bench_test_module).parent / "test_benchmarks.py") cmd = ( f'pytest {pytest_args} {BENCHMARK_TEST_FILE} ' f'--benchmark {benchmark} {env_option} ' # Make sure to not modify sys.path to add test file from current env # in sub conda env as there might be different python versions. '--import-mode importlib') raise SystemExit( _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False) != 0)
def run(benchmark, solver_names, forced_solvers, dataset_names, objective_filters, max_runs, n_repetitions, timeout, recreate=False, local=True, no_plot=False): """Run a benchmark in a separate conda env where the deps will be installed """ # Check that the dataset/solver patterns match actual dataset validate_dataset_patterns(benchmark, dataset_names) validate_solver_patterns(benchmark, solver_names + forced_solvers) if local: run_benchmark(benchmark, solver_names, forced_solvers, dataset_names=dataset_names, objective_filters=objective_filters, max_runs=max_runs, n_repetitions=n_repetitions, timeout=timeout, plot_result=not no_plot) return benchmark_name = Path(benchmark).resolve().name env_name = f"benchopt_{benchmark_name}" create_conda_env(env_name, recreate=recreate) # installed required datasets install_required_datasets(benchmark, dataset_names, env_name=env_name) # Get the solvers and install them install_required_solvers(benchmark, solver_names, forced_solvers=forced_solvers, env_name=env_name) # run the command in the conda env solvers_option = ' '.join(['-s ' + s for s in solver_names]) forced_solvers_option = ' '.join(['-f ' + s for s in forced_solvers]) datasets_option = ' '.join(['-d ' + d for d in dataset_names]) objective_option = ' '.join(['-p ' + p for p in objective_filters]) cmd = (f"benchopt run {benchmark} --local --n-repetitions {n_repetitions} " f"--max-runs {max_runs} --timeout {timeout} " f"{solvers_option} {forced_solvers_option} " f"{datasets_option} {objective_option} " f"{'--no-plot' if no_plot else ''} ") raise SystemExit( _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False) != 0)
def test_env_name(request): global _TEST_ENV_NAME if _TEST_ENV_NAME is None: env_name = request.config.getoption("--test-env") recreate = request.config.getoption("--recreate") if env_name is None: env_name = f"_benchopt_test_env_{uuid.uuid4()}" request.addfinalizer(delete_test_env) _TEST_ENV_NAME = env_name create_conda_env(_TEST_ENV_NAME, recreate=recreate) return _TEST_ENV_NAME
def test(benchmark_dir, env_name, pytest_args): pytest_args = ' '.join(pytest_args) if len(pytest_args) == 0: pytest_args = '-vl' env_option = '' if env_name is not None: create_conda_env(env_name, with_pytest=True) env_option = f'--test-env {env_name}' cmd = (f'pytest {pytest_args} {BENCHMARK_TEST_FILE} ' f'--benchmark {benchmark_dir} {env_option}') raise SystemExit( _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False) != 0)
def test(benchmark_dir, env_name, pytest_args): pytest_args = ' '.join(pytest_args) if len(pytest_args) == 0: pytest_args = '-vl' env_option = '' if env_name is not None: create_conda_env(env_name, with_pytest=True) if _run_shell_in_conda_env("pytest --version", env_name=env_name) != 0: raise ModuleNotFoundError( f"pytest is not installed in conda env {env_name}.\n" f"Please run `conda install -n {env_name} pytest` to test the " "benchmark in this environment.") env_option = f'--test-env {env_name}' cmd = (f'pytest {pytest_args} {BENCHMARK_TEST_FILE} ' f'--benchmark {benchmark_dir} {env_option}') raise SystemExit( _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False) != 0)
def run(benchmark, solver_names, forced_solvers, dataset_names, objective_filters, max_runs, n_repetitions, timeout, recreate=False, no_plot=False, pdb=False, env_name=False): """Run a benchmark in a separate conda env where the deps will be installed """ # Check that the dataset/solver patterns match actual dataset validate_benchmark(benchmark) validate_dataset_patterns(benchmark, dataset_names) validate_solver_patterns(benchmark, solver_names + forced_solvers) # If env_name is False, the flag `--local` has been used (default) so # run in the current environement. if env_name is False: run_benchmark(benchmark, solver_names, forced_solvers, dataset_names=dataset_names, objective_filters=objective_filters, max_runs=max_runs, n_repetitions=n_repetitions, timeout=timeout, plot_result=not no_plot, pdb=pdb) return # If env_name is True, the flag `--env` has been used. Create a conda env # specific to the benchmark. Else, use the <env_name> value. if env_name is True: benchmark_name = Path(benchmark).resolve().name env_name = f"benchopt_{benchmark_name}" create_conda_env(env_name, recreate=recreate) # installed required datasets install_required_datasets(benchmark, dataset_names, env_name=env_name) # Get the solvers and install them install_required_solvers(benchmark, solver_names, forced_solvers=forced_solvers, env_name=env_name) # run the command in the conda env solvers_option = ' '.join(['-s ' + s for s in solver_names]) forced_solvers_option = ' '.join(['-f ' + s for s in forced_solvers]) datasets_option = ' '.join(['-d ' + d for d in dataset_names]) objective_option = ' '.join(['-p ' + p for p in objective_filters]) cmd = ( rf"benchopt run {benchmark} --local --n-repetitions {n_repetitions} " rf"--max-runs {max_runs} --timeout {timeout} " rf"{solvers_option} {forced_solvers_option} " rf"{datasets_option} {objective_option} " rf"{'--no-plot' if no_plot else ''} " rf"{'--pdb' if pdb else ''} ".replace('\\', '\\\\')) raise SystemExit( _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False) != 0)
def run(benchmark, solver_names, forced_solvers, dataset_names, objective_filters, max_runs, n_repetitions, timeout, recreate=False, plot=True, pdb=False, env_name='False'): from benchopt.runner import run_benchmark # Check that the dataset/solver patterns match actual dataset benchmark = Benchmark(benchmark) benchmark.validate_dataset_patterns(dataset_names) benchmark.validate_solver_patterns(solver_names + forced_solvers) # If env_name is False, the flag `--local` has been used (default) so # run in the current environement. if env_name == 'False': run_benchmark(benchmark, solver_names, forced_solvers, dataset_names=dataset_names, objective_filters=objective_filters, max_runs=max_runs, n_repetitions=n_repetitions, timeout=timeout, plot_result=plot, pdb=pdb) return # If env_name is True, the flag `--env` has been used. Create a conda env # specific to the benchmark. Else, use the <env_name> value. if env_name == 'True': env_name = f"benchopt_{benchmark.name}" create_conda_env(env_name, recreate=recreate) # installed required datasets benchmark.install_required_datasets(dataset_names, env_name=env_name) # Get the solvers and install them benchmark.install_required_solvers(solver_names, forced_solvers=forced_solvers, env_name=env_name) # run the command in the conda env solvers_option = ' '.join(['-s ' + s for s in solver_names]) forced_solvers_option = ' '.join(['-f ' + s for s in forced_solvers]) datasets_option = ' '.join(['-d ' + d for d in dataset_names]) objective_option = ' '.join(['-p ' + p for p in objective_filters]) cmd = (rf"benchopt run --local {benchmark.benchmark_dir} " rf"--n-repetitions {n_repetitions} " rf"--max-runs {max_runs} --timeout {timeout} " rf"{solvers_option} {forced_solvers_option} " rf"{datasets_option} {objective_option} " rf"{'--plot' if plot else '--no-plot'} " rf"{'--pdb' if pdb else ''} ".replace('\\', '\\\\')) raise SystemExit( _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False) != 0)