def info(benchmark, solver_names, dataset_names, env_name='False', verbose=False): # benchmark benchmark = Benchmark(benchmark) print(f"Info regarding the benchmark '{benchmark.name}'") # validate solvers and datasets benchmark.validate_dataset_patterns(dataset_names) benchmark.validate_solver_patterns(solver_names) # get solvers and datasets in the benchmark all_solvers = benchmark.get_solvers() all_datasets = benchmark.get_datasets() # enable verbosity if any environment was provided if env_name is not None and env_name != 'False': verbose = True # conda env check only in verbose case if verbose: # Check conda env name env_name = check_conda_env(env_name, benchmark.name) # check conda environment validity check_benchopt = _run_shell_in_conda_env("benchopt --version", env_name=env_name, capture_stdout=True) if check_benchopt != 0: warnings.warn( f"Environment '{env_name}' does not exist " "or is not configured for benchopt, " "benchmark requirement availability will not be checked, " "see the command `benchopt install`.", UserWarning) env_name = None else: print("Checking benchmark requirement availability " f"in env '{env_name}'.") print("Note: you can install all dependencies from a benchmark " "with the command `benchopt install`.") # enable verbosity if any solver/dataset are specified in input if dataset_names or solver_names: verbose = True # print information print("-" * 10) if not dataset_names and not solver_names: dataset_names = ['all'] solver_names = ['all'] if dataset_names: print("# DATASETS", flush=True) print_info(dataset_names, all_datasets, env_name, verbose) if solver_names: print("# SOLVERS", flush=True) print_info(solver_names, all_solvers, env_name, verbose)
TEST_BENCHMARK_DIR = Path(__file__).parent / 'test_benchmarks' DUMMY_BENCHMARK_PATH = TEST_BENCHMARK_DIR / 'dummy_benchmark' # Pattern to select specific datasets or solvers. SELECT_ONE_SIMULATED = r'simulated*500*rho=0]' SELECT_ONE_PGD = r'python-pgd*step_size=1]' SELECT_ONE_OBJECTIVE = r'dummy*reg=0.1]' try: DUMMY_BENCHMARK = Benchmark(DUMMY_BENCHMARK_PATH) TEST_OBJECTIVE = DUMMY_BENCHMARK.get_benchmark_objective() TEST_SOLVER = [ s for s in DUMMY_BENCHMARK.get_solvers() if s.name == "Test-Solver" ][0] TEST_DATASET = [ d for d in DUMMY_BENCHMARK.get_datasets() if d.name == "Test-Dataset" ][0] except Exception: DUMMY_BENCHMARK = None TEST_OBJECTIVE = None TEST_SOLVER = None TEST_DATASET = None class CaptureRunOutput(object): """Context to capture run cmd output and files. """ def __init__(self): self.out = SuppressStd() self.output = None self.result_files = []