Exemple #1
0
def info(benchmark,
         solver_names,
         dataset_names,
         env_name='False',
         verbose=False):

    # benchmark
    benchmark = Benchmark(benchmark)
    print(f"Info regarding the benchmark '{benchmark.name}'")

    # validate solvers and datasets
    benchmark.validate_dataset_patterns(dataset_names)
    benchmark.validate_solver_patterns(solver_names)

    # get solvers and datasets in the benchmark
    all_solvers = benchmark.get_solvers()
    all_datasets = benchmark.get_datasets()

    # enable verbosity if any environment was provided
    if env_name is not None and env_name != 'False':
        verbose = True

    # conda env check only in verbose case
    if verbose:
        # Check conda env name
        env_name = check_conda_env(env_name, benchmark.name)
        # check conda environment validity
        check_benchopt = _run_shell_in_conda_env("benchopt --version",
                                                 env_name=env_name,
                                                 capture_stdout=True)
        if check_benchopt != 0:
            warnings.warn(
                f"Environment '{env_name}' does not exist "
                "or is not configured for benchopt, "
                "benchmark requirement availability will not be checked, "
                "see the command `benchopt install`.", UserWarning)
            env_name = None
        else:
            print("Checking benchmark requirement availability "
                  f"in env '{env_name}'.")
            print("Note: you can install all dependencies from a benchmark "
                  "with the command `benchopt install`.")
    # enable verbosity if any solver/dataset are specified in input
    if dataset_names or solver_names:
        verbose = True

    # print information
    print("-" * 10)

    if not dataset_names and not solver_names:
        dataset_names = ['all']
        solver_names = ['all']

    if dataset_names:
        print("# DATASETS", flush=True)
        print_info(dataset_names, all_datasets, env_name, verbose)

    if solver_names:
        print("# SOLVERS", flush=True)
        print_info(solver_names, all_solvers, env_name, verbose)
Exemple #2
0
def install(benchmark, solver_names, dataset_names, force=False,
            recreate=False, env_name='False', confirm=False, quiet=False):

    # Check that the dataset/solver patterns match actual dataset
    benchmark = Benchmark(benchmark)
    print(f"Installing '{benchmark.name}' requirements")
    benchmark.validate_dataset_patterns(dataset_names)
    benchmark.validate_solver_patterns(solver_names)

    # Get a list of all conda envs
    default_conda_env, conda_envs = list_conda_envs()

    # If env_name is False (default), installation in the current environement.
    if env_name == 'False':
        env_name = None
        # incompatible with the 'recreate' flag to avoid messing with the
        # user environement
        if recreate:
            msg = "Cannot recreate conda env without using options " + \
                "'-e/--env' or '--env-name'."
            raise RuntimeError(msg)

        # check if any current conda environment
        if default_conda_env is not None:
            # ask for user confirmation to install in current conda env
            if not confirm:
                click.confirm(
                    f"Install in the current env '{default_conda_env}'?",
                    abort=True
                )
        else:
            raise RuntimeError("No conda environment is activated.")
    else:
        # If env_name is True, the flag `--env` has been used. Create a conda
        # env specific to the benchmark. Else, use the <env_name> value.
        if env_name == 'True':
            env_name = f"benchopt_{benchmark.name}"
        else:
            # check provided <env_name>
            # (to avoid empty name like `--env-name ""`)
            if len(env_name) == 0:
                raise RuntimeError("Empty environment name.")
            # avoid recreating 'base' conda env`
            if env_name == 'base' and recreate:
                raise RuntimeError(
                    "Impossible to recreate 'base' conda environment."
                )

        # create environment if necessary
        create_conda_env(env_name, recreate=recreate, quiet=quiet)

    # install requirements
    print("# Install", flush=True)
    benchmark.install_all_requirements(
        include_solvers=solver_names, include_datasets=dataset_names,
        env_name=env_name, force=force, quiet=quiet,
    )
Exemple #3
0
def run(config_file=None, **kwargs):
    if config_file is not None:
        with open(config_file, "r") as f:
            config = yaml.safe_load(f)
    else:
        config = {}

    # XXX - Remove old and deprecated objective filters in version 1.3
    (benchmark, solver_names, forced_solvers, dataset_names, objective_filters,
     max_runs, n_repetitions, timeout, n_jobs, slurm, plot, html, pdb,
     do_profile, env_name, output, deprecated_objective_filters,
     old_objective_filters) = _get_run_args(kwargs, config)

    if len(old_objective_filters):
        warnings.warn(
            'Using the -p option is deprecated, use -o instead',
            FutureWarning,
        )
        objective_filters = old_objective_filters

    if len(deprecated_objective_filters):
        warnings.warn(
            'Using the --objective-filters option is deprecated, '
            'use --objective instead', FutureWarning)
        objective_filters = deprecated_objective_filters

    # Create the Benchmark object
    benchmark = Benchmark(benchmark)

    # If env_name is False, the flag `--local` has been used (default) so
    # run in the current environment.
    if env_name == 'False':

        print("Benchopt is running")
        if slurm is not None:
            print("Running on SLURM")
            set_slurm_launch()

        from benchopt.runner import run_benchmark

        if do_profile:
            from benchopt.utils.profiling import use_profile
            use_profile()  # needs to be called before validate_solver_patterns

        # Check that the dataset/solver patterns match actual dataset
        benchmark.validate_dataset_patterns(dataset_names)
        benchmark.validate_objective_filters(objective_filters)
        # pyyaml returns tuples: make sure everything is a list
        benchmark.validate_solver_patterns(
            list(solver_names) + list(forced_solvers))

        run_benchmark(benchmark,
                      solver_names,
                      forced_solvers,
                      dataset_names=dataset_names,
                      objective_filters=objective_filters,
                      max_runs=max_runs,
                      n_repetitions=n_repetitions,
                      timeout=timeout,
                      n_jobs=n_jobs,
                      slurm=slurm,
                      plot_result=plot,
                      html=html,
                      pdb=pdb,
                      output=output)

        print_stats()  # print profiling stats (does nothing if not profiling)

        return

    default_conda_env, all_conda_envs = list_conda_envs()

    # If env_name is True, the flag `--env` has been used. Create a conda env
    # specific to the benchmark (if not existing).
    # Else, use the <env_name> value.

    # check if any current conda environment
    if default_conda_env is None:
        raise RuntimeError(
            "No conda environment is activated. "
            "You should be in a conda environment to use "
            "'benchopt run' with options '-e/--env' or '--env-name'.")

    if env_name == 'True':
        env_name = f"benchopt_{benchmark.name}"
        install_cmd = f"`benchopt install -e {benchmark.benchmark_dir}`"
    else:
        # check provided <env_name>
        # (to avoid empty name like `--env-name ""`)
        if len(env_name) == 0:
            raise RuntimeError("Empty environment name.")

        install_cmd = (f"`benchopt install --env-name {env_name} "
                       f"{benchmark.benchmark_dir}`")

    # check if the environment exists
    if env_name not in all_conda_envs:
        raise RuntimeError(
            f"The default env '{env_name}' for benchmark {benchmark.name} "
            f"does not exist. Make sure to run {install_cmd} to create the "
            "benchmark and install the dependencies.")

    print(f"Launching benchopt in env {env_name}")

    # check if environment was set up with benchopt
    benchopt_version, is_editable = get_benchopt_version_in_env(env_name)
    if benchopt_version is None:
        raise RuntimeError(
            f"benchopt is not installed in env '{env_name}', "
            "see the command `benchopt install` to setup the environment.")
    # check against running version
    from benchopt import __version__ as benchopt_version_running
    _, is_editable_running = get_benchopt_requirement()
    if (benchopt_version_running != benchopt_version
            and not (is_editable_running and is_editable)):
        warnings.warn(
            f"Benchopt running version ({benchopt_version_running}) "
            f"and version in env {env_name} ({benchopt_version}) differ")

    # run the command in the conda env
    solvers_option = ' '.join([f"-s '{s}'" for s in solver_names])
    forced_solvers_option = ' '.join([f"-f '{s}'" for s in forced_solvers])
    datasets_option = ' '.join([f"-d '{d}'" for d in dataset_names])
    objective_option = ' '.join([f"-o '{o}'" for o in objective_filters])
    cmd = (rf"benchopt run --local {benchmark.benchmark_dir} "
           rf"--n-repetitions {n_repetitions} "
           rf"--max-runs {max_runs} --timeout {timeout} "
           rf"--n-jobs {n_jobs} {'--slurm' if slurm else ''} "
           rf"{solvers_option} {forced_solvers_option} "
           rf"{datasets_option} {objective_option} "
           rf"{'--plot' if plot else '--no-plot'} "
           rf"{'--html' if html else '--no-html'} "
           rf"{'--pdb' if pdb else ''} "
           rf"--output {output}".replace('\\', '\\\\'))
    raise SystemExit(
        _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False)
        != 0)
Exemple #4
0
def run(benchmark,
        solver_names,
        forced_solvers,
        dataset_names,
        objective_filters,
        max_runs,
        n_repetitions,
        timeout,
        recreate=False,
        plot=True,
        pdb=False,
        env_name='False'):

    # Check that the dataset/solver patterns match actual dataset
    benchmark = Benchmark(benchmark)
    benchmark.validate_dataset_patterns(dataset_names)
    benchmark.validate_solver_patterns(solver_names + forced_solvers)

    # If env_name is False, the flag `--local` has been used (default) so
    # run in the current environement.
    if env_name == 'False':
        run_benchmark(benchmark,
                      solver_names,
                      forced_solvers,
                      dataset_names=dataset_names,
                      objective_filters=objective_filters,
                      max_runs=max_runs,
                      n_repetitions=n_repetitions,
                      timeout=timeout,
                      plot_result=plot,
                      pdb=pdb)
        return

    # If env_name is True, the flag `--env` has been used. Create a conda env
    # specific to the benchmark. Else, use the <env_name> value.
    if env_name == 'True':
        env_name = f"benchopt_{benchmark.name}"
    create_conda_env(env_name, recreate=recreate)

    # installed required datasets
    benchmark.install_required_datasets(dataset_names, env_name=env_name)

    # Get the solvers and install them
    benchmark.install_required_solvers(solver_names,
                                       forced_solvers=forced_solvers,
                                       env_name=env_name)

    # run the command in the conda env
    solvers_option = ' '.join(['-s ' + s for s in solver_names])
    forced_solvers_option = ' '.join(['-f ' + s for s in forced_solvers])
    datasets_option = ' '.join(['-d ' + d for d in dataset_names])
    objective_option = ' '.join(['-p ' + p for p in objective_filters])
    cmd = (rf"benchopt run --local {benchmark.benchmark_dir} "
           rf"--n-repetitions {n_repetitions} "
           rf"--max-runs {max_runs} --timeout {timeout} "
           rf"{solvers_option} {forced_solvers_option} "
           rf"{datasets_option} {objective_option} "
           rf"{'--plot' if plot else '--no-plot'} "
           rf"{'--pdb' if pdb else ''} ".replace('\\', '\\\\'))
    raise SystemExit(
        _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False)
        != 0)
Exemple #5
0
def run(benchmark, solver_names, forced_solvers, dataset_names,
        objective_filters, max_runs, n_repetitions, timeout,
        plot=True, html=True, pdb=False, do_profile=False,
        env_name='False', old_objective_filters=None):
    if len(old_objective_filters):
        warnings.warn(
            'Using the -p option is deprecated, use -o instead',
            FutureWarning,
        )
        objective_filters = old_objective_filters

    from benchopt.runner import run_benchmark

    if do_profile:
        from benchopt.utils.profiling import use_profile
        use_profile()  # needs to be called before validate_solver_patterns

    # Check that the dataset/solver patterns match actual dataset
    benchmark = Benchmark(benchmark)
    benchmark.validate_dataset_patterns(dataset_names)
    benchmark.validate_solver_patterns(solver_names+forced_solvers)
    benchmark.validate_objective_filters(objective_filters)

    # If env_name is False, the flag `--local` has been used (default) so
    # run in the current environement.
    if env_name == 'False':
        run_benchmark(
            benchmark, solver_names, forced_solvers,
            dataset_names=dataset_names,
            objective_filters=objective_filters,
            max_runs=max_runs, n_repetitions=n_repetitions,
            timeout=timeout, plot_result=plot, html=html, pdb=pdb
        )

        print_stats()  # print profiling stats (does nothing if not profiling)

        return

    _, all_conda_envs = list_conda_envs()
    # If env_name is True, the flag `--env` has been used. Create a conda env
    # specific to the benchmark (if not existing).
    # Else, use the <env_name> value.
    if env_name == 'True':
        env_name = f"benchopt_{benchmark.name}"
        install_cmd = f"`benchopt install -e {benchmark.benchmark_dir}`"
    else:
        # check provided <env_name>
        # (to avoid empty name like `--env-name ""`)
        if len(env_name) == 0:
            raise RuntimeError("Empty environment name.")

        install_cmd = (
            f"`benchopt install --env-name {env_name} "
            f"{benchmark.benchmark_dir}`"
        )

    # check if the environment exists
    if env_name not in all_conda_envs:
        raise RuntimeError(
            f"The default env '{env_name}' for benchmark {benchmark.name} "
            f"does not exist. Make sure to run {install_cmd} to create the "
            "benchmark and install the dependencies."
        )

    # check if environment was set up with benchopt
    if get_benchopt_version_in_env(env_name) is None:
        raise RuntimeError(
            f"benchopt is not installed in env '{env_name}', "
            "see the command `benchopt install` to setup the environment."
        )

    # run the command in the conda env
    solvers_option = ' '.join(['-s ' + s for s in solver_names])
    forced_solvers_option = ' '.join([f"-f '{s}'" for s in forced_solvers])
    datasets_option = ' '.join([f"-d '{d}'" for d in dataset_names])
    objective_option = ' '.join([f"-p '{p}'" for p in objective_filters])
    cmd = (
        rf"benchopt run --local {benchmark.benchmark_dir} "
        rf"--n-repetitions {n_repetitions} "
        rf"--max-runs {max_runs} --timeout {timeout} "
        rf"{solvers_option} {forced_solvers_option} "
        rf"{datasets_option} {objective_option} "
        rf"{'--plot' if plot else '--no-plot'} "
        rf"{'--html' if html else '--no-html'} "
        rf"{'--pdb' if pdb else ''} "
        .replace('\\', '\\\\')
    )
    raise SystemExit(_run_shell_in_conda_env(
        cmd, env_name=env_name, capture_stdout=False
    ) != 0)