Ejemplo n.º 1
0
def install(benchmark, solver_names, dataset_names, force=False,
            recreate=False, env_name='False', confirm=False, quiet=False):

    # Check that the dataset/solver patterns match actual dataset
    benchmark = Benchmark(benchmark)
    print(f"Installing '{benchmark.name}' requirements")
    benchmark.validate_dataset_patterns(dataset_names)
    benchmark.validate_solver_patterns(solver_names)

    # Get a list of all conda envs
    default_conda_env, conda_envs = list_conda_envs()

    # If env_name is False (default), installation in the current environement.
    if env_name == 'False':
        env_name = None
        # incompatible with the 'recreate' flag to avoid messing with the
        # user environement
        if recreate:
            msg = "Cannot recreate conda env without using options " + \
                "'-e/--env' or '--env-name'."
            raise RuntimeError(msg)

        # check if any current conda environment
        if default_conda_env is not None:
            # ask for user confirmation to install in current conda env
            if not confirm:
                click.confirm(
                    f"Install in the current env '{default_conda_env}'?",
                    abort=True
                )
        else:
            raise RuntimeError("No conda environment is activated.")
    else:
        # If env_name is True, the flag `--env` has been used. Create a conda
        # env specific to the benchmark. Else, use the <env_name> value.
        if env_name == 'True':
            env_name = f"benchopt_{benchmark.name}"
        else:
            # check provided <env_name>
            # (to avoid empty name like `--env-name ""`)
            if len(env_name) == 0:
                raise RuntimeError("Empty environment name.")
            # avoid recreating 'base' conda env`
            if env_name == 'base' and recreate:
                raise RuntimeError(
                    "Impossible to recreate 'base' conda environment."
                )

        # create environment if necessary
        create_conda_env(env_name, recreate=recreate, quiet=quiet)

    # install requirements
    print("# Install", flush=True)
    benchmark.install_all_requirements(
        include_solvers=solver_names, include_datasets=dataset_names,
        env_name=env_name, force=force, quiet=quiet,
    )
Ejemplo n.º 2
0
def check_conda_env(env_name, benchmark_name=None):
    """Return name of valid and existing conda environment.

    Parameters
    ----------
    env_name : str | None
        Expected name of conda environment to be used.
        If 'False', default conda environment name is returned.
        If 'True', benchmark specific conda environment, i.e.
        "benchopt_{benchmark_name}", is returned.
        If None, None is returned.
        Otherwise 'env_name' is returned.
        In any case but None, the conda environment existence is checked.
    benchmark_name : str | None
        Name of the benchmark that will be used.
        Unused unless env_name=='True'.

    Returns
    -------
    env_name : str | None
        Name of valid conda environment or None.
    """
    # Check conda env (if relevant)
    if env_name is not None:

        # Get a list of all conda envs
        default_conda_env, conda_envs = list_conda_envs()

        # If env_name is False (default), check availability
        # in the current environement.
        if env_name == 'False':
            # check if any current conda environment
            if default_conda_env is not None:
                env_name = default_conda_env
            else:
                raise RuntimeError("No conda environment is activated.")
        else:
            # If env_name is 'True', the flag `--env` has been used.
            # Check the conda env dedicated to the benchmark.
            # Else, use the <env_name> value.
            if env_name == 'True':
                env_name = f"benchopt_{benchmark_name}"
            else:
                # check provided <env_name>
                # (to avoid empty name like `--env-name ""`)
                if len(env_name) == 0:
                    raise RuntimeError("Empty environment name.")
                if env_name not in conda_envs:
                    raise RuntimeError(
                        f"{env_name} is not an existing conda environment.")
    # output
    return env_name
Ejemplo n.º 3
0
def complete_conda_envs(ctx, param, incomplete):
    "Auto-completion for env-names."
    _, all_envs = list_conda_envs()
    return propose_from_list(all_envs, incomplete)
Ejemplo n.º 4
0
def run(config_file=None, **kwargs):
    if config_file is not None:
        with open(config_file, "r") as f:
            config = yaml.safe_load(f)
    else:
        config = {}

    # XXX - Remove old and deprecated objective filters in version 1.3
    (benchmark, solver_names, forced_solvers, dataset_names, objective_filters,
     max_runs, n_repetitions, timeout, n_jobs, slurm, plot, html, pdb,
     do_profile, env_name, output, deprecated_objective_filters,
     old_objective_filters) = _get_run_args(kwargs, config)

    if len(old_objective_filters):
        warnings.warn(
            'Using the -p option is deprecated, use -o instead',
            FutureWarning,
        )
        objective_filters = old_objective_filters

    if len(deprecated_objective_filters):
        warnings.warn(
            'Using the --objective-filters option is deprecated, '
            'use --objective instead', FutureWarning)
        objective_filters = deprecated_objective_filters

    # Create the Benchmark object
    benchmark = Benchmark(benchmark)

    # If env_name is False, the flag `--local` has been used (default) so
    # run in the current environment.
    if env_name == 'False':

        print("Benchopt is running")
        if slurm is not None:
            print("Running on SLURM")
            set_slurm_launch()

        from benchopt.runner import run_benchmark

        if do_profile:
            from benchopt.utils.profiling import use_profile
            use_profile()  # needs to be called before validate_solver_patterns

        # Check that the dataset/solver patterns match actual dataset
        benchmark.validate_dataset_patterns(dataset_names)
        benchmark.validate_objective_filters(objective_filters)
        # pyyaml returns tuples: make sure everything is a list
        benchmark.validate_solver_patterns(
            list(solver_names) + list(forced_solvers))

        run_benchmark(benchmark,
                      solver_names,
                      forced_solvers,
                      dataset_names=dataset_names,
                      objective_filters=objective_filters,
                      max_runs=max_runs,
                      n_repetitions=n_repetitions,
                      timeout=timeout,
                      n_jobs=n_jobs,
                      slurm=slurm,
                      plot_result=plot,
                      html=html,
                      pdb=pdb,
                      output=output)

        print_stats()  # print profiling stats (does nothing if not profiling)

        return

    default_conda_env, all_conda_envs = list_conda_envs()

    # If env_name is True, the flag `--env` has been used. Create a conda env
    # specific to the benchmark (if not existing).
    # Else, use the <env_name> value.

    # check if any current conda environment
    if default_conda_env is None:
        raise RuntimeError(
            "No conda environment is activated. "
            "You should be in a conda environment to use "
            "'benchopt run' with options '-e/--env' or '--env-name'.")

    if env_name == 'True':
        env_name = f"benchopt_{benchmark.name}"
        install_cmd = f"`benchopt install -e {benchmark.benchmark_dir}`"
    else:
        # check provided <env_name>
        # (to avoid empty name like `--env-name ""`)
        if len(env_name) == 0:
            raise RuntimeError("Empty environment name.")

        install_cmd = (f"`benchopt install --env-name {env_name} "
                       f"{benchmark.benchmark_dir}`")

    # check if the environment exists
    if env_name not in all_conda_envs:
        raise RuntimeError(
            f"The default env '{env_name}' for benchmark {benchmark.name} "
            f"does not exist. Make sure to run {install_cmd} to create the "
            "benchmark and install the dependencies.")

    print(f"Launching benchopt in env {env_name}")

    # check if environment was set up with benchopt
    benchopt_version, is_editable = get_benchopt_version_in_env(env_name)
    if benchopt_version is None:
        raise RuntimeError(
            f"benchopt is not installed in env '{env_name}', "
            "see the command `benchopt install` to setup the environment.")
    # check against running version
    from benchopt import __version__ as benchopt_version_running
    _, is_editable_running = get_benchopt_requirement()
    if (benchopt_version_running != benchopt_version
            and not (is_editable_running and is_editable)):
        warnings.warn(
            f"Benchopt running version ({benchopt_version_running}) "
            f"and version in env {env_name} ({benchopt_version}) differ")

    # run the command in the conda env
    solvers_option = ' '.join([f"-s '{s}'" for s in solver_names])
    forced_solvers_option = ' '.join([f"-f '{s}'" for s in forced_solvers])
    datasets_option = ' '.join([f"-d '{d}'" for d in dataset_names])
    objective_option = ' '.join([f"-o '{o}'" for o in objective_filters])
    cmd = (rf"benchopt run --local {benchmark.benchmark_dir} "
           rf"--n-repetitions {n_repetitions} "
           rf"--max-runs {max_runs} --timeout {timeout} "
           rf"--n-jobs {n_jobs} {'--slurm' if slurm else ''} "
           rf"{solvers_option} {forced_solvers_option} "
           rf"{datasets_option} {objective_option} "
           rf"{'--plot' if plot else '--no-plot'} "
           rf"{'--html' if html else '--no-html'} "
           rf"{'--pdb' if pdb else ''} "
           rf"--output {output}".replace('\\', '\\\\'))
    raise SystemExit(
        _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False)
        != 0)
Ejemplo n.º 5
0
def run(benchmark, solver_names, forced_solvers, dataset_names,
        objective_filters, max_runs, n_repetitions, timeout,
        plot=True, html=True, pdb=False, do_profile=False,
        env_name='False', old_objective_filters=None):
    if len(old_objective_filters):
        warnings.warn(
            'Using the -p option is deprecated, use -o instead',
            FutureWarning,
        )
        objective_filters = old_objective_filters

    from benchopt.runner import run_benchmark

    if do_profile:
        from benchopt.utils.profiling import use_profile
        use_profile()  # needs to be called before validate_solver_patterns

    # Check that the dataset/solver patterns match actual dataset
    benchmark = Benchmark(benchmark)
    benchmark.validate_dataset_patterns(dataset_names)
    benchmark.validate_solver_patterns(solver_names+forced_solvers)
    benchmark.validate_objective_filters(objective_filters)

    # If env_name is False, the flag `--local` has been used (default) so
    # run in the current environement.
    if env_name == 'False':
        run_benchmark(
            benchmark, solver_names, forced_solvers,
            dataset_names=dataset_names,
            objective_filters=objective_filters,
            max_runs=max_runs, n_repetitions=n_repetitions,
            timeout=timeout, plot_result=plot, html=html, pdb=pdb
        )

        print_stats()  # print profiling stats (does nothing if not profiling)

        return

    _, all_conda_envs = list_conda_envs()
    # If env_name is True, the flag `--env` has been used. Create a conda env
    # specific to the benchmark (if not existing).
    # Else, use the <env_name> value.
    if env_name == 'True':
        env_name = f"benchopt_{benchmark.name}"
        install_cmd = f"`benchopt install -e {benchmark.benchmark_dir}`"
    else:
        # check provided <env_name>
        # (to avoid empty name like `--env-name ""`)
        if len(env_name) == 0:
            raise RuntimeError("Empty environment name.")

        install_cmd = (
            f"`benchopt install --env-name {env_name} "
            f"{benchmark.benchmark_dir}`"
        )

    # check if the environment exists
    if env_name not in all_conda_envs:
        raise RuntimeError(
            f"The default env '{env_name}' for benchmark {benchmark.name} "
            f"does not exist. Make sure to run {install_cmd} to create the "
            "benchmark and install the dependencies."
        )

    # check if environment was set up with benchopt
    if get_benchopt_version_in_env(env_name) is None:
        raise RuntimeError(
            f"benchopt is not installed in env '{env_name}', "
            "see the command `benchopt install` to setup the environment."
        )

    # run the command in the conda env
    solvers_option = ' '.join(['-s ' + s for s in solver_names])
    forced_solvers_option = ' '.join([f"-f '{s}'" for s in forced_solvers])
    datasets_option = ' '.join([f"-d '{d}'" for d in dataset_names])
    objective_option = ' '.join([f"-p '{p}'" for p in objective_filters])
    cmd = (
        rf"benchopt run --local {benchmark.benchmark_dir} "
        rf"--n-repetitions {n_repetitions} "
        rf"--max-runs {max_runs} --timeout {timeout} "
        rf"{solvers_option} {forced_solvers_option} "
        rf"{datasets_option} {objective_option} "
        rf"{'--plot' if plot else '--no-plot'} "
        rf"{'--html' if html else '--no-html'} "
        rf"{'--pdb' if pdb else ''} "
        .replace('\\', '\\\\')
    )
    raise SystemExit(_run_shell_in_conda_env(
        cmd, env_name=env_name, capture_stdout=False
    ) != 0)