Exemplo n.º 1
0
def run(benchmark,
        solver_names,
        forced_solvers,
        dataset_names,
        objective_filters,
        max_runs,
        n_repetitions,
        timeout,
        recreate=False,
        local=True,
        no_plot=False):
    """Run a benchmark in a separate conda env where the deps will be installed
    """

    # Check that the dataset/solver patterns match actual dataset
    validate_dataset_patterns(benchmark, dataset_names)
    validate_solver_patterns(benchmark, solver_names + forced_solvers)

    if local:
        run_benchmark(benchmark,
                      solver_names,
                      forced_solvers,
                      dataset_names=dataset_names,
                      objective_filters=objective_filters,
                      max_runs=max_runs,
                      n_repetitions=n_repetitions,
                      timeout=timeout,
                      plot_result=not no_plot)
        return

    benchmark_name = Path(benchmark).resolve().name
    env_name = f"benchopt_{benchmark_name}"
    create_conda_env(env_name, recreate=recreate)

    # installed required datasets
    install_required_datasets(benchmark, dataset_names, env_name=env_name)

    # Get the solvers and install them
    install_required_solvers(benchmark,
                             solver_names,
                             forced_solvers=forced_solvers,
                             env_name=env_name)

    # run the command in the conda env
    solvers_option = ' '.join(['-s ' + s for s in solver_names])
    forced_solvers_option = ' '.join(['-f ' + s for s in forced_solvers])
    datasets_option = ' '.join(['-d ' + d for d in dataset_names])
    objective_option = ' '.join(['-p ' + p for p in objective_filters])
    cmd = (f"benchopt run {benchmark} --local --n-repetitions {n_repetitions} "
           f"--max-runs {max_runs} --timeout {timeout} "
           f"{solvers_option} {forced_solvers_option} "
           f"{datasets_option} {objective_option} "
           f"{'--no-plot' if no_plot else ''} ")
    raise SystemExit(
        _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False)
        != 0)
Exemplo n.º 2
0
def run(benchmarks, solver_names, forced_solvers, max_samples, repetition):
    """Run benchmark."""

    all_benchmarks = get_all_benchmarks()
    if benchmarks == 'all':
        benchmarks = all_benchmarks

    check_benchmarks(benchmarks, all_benchmarks)

    for benchmark in benchmarks:
        run_benchmark(benchmark,
                      solver_names,
                      forced_solvers,
                      max_samples=max_samples,
                      n_rep=repetition)
Exemplo n.º 3
0
def run(benchmark, solver_names, forced_solvers, dataset_names, max_samples,
        recreate, local, repetition):
    """Run a benchmark in a separate venv where the solvers will be installed
    """

    if local:
        run_benchmark(benchmark,
                      solver_names,
                      forced_solvers,
                      dataset_names,
                      max_samples=max_samples,
                      n_rep=repetition)
        return

    # Create the virtual env
    create_venv(benchmark, recreate=recreate)

    # installed required datasets
    install_required_datasets(benchmark, dataset_names, env_name=benchmark)

    # Get the solvers and install them
    solvers = list_benchmark_solvers(benchmark)
    exclude = get_benchmark_setting(benchmark, 'exclude_solvers')
    solvers = filter_solvers(solvers,
                             solver_names=solver_names,
                             forced_solvers=forced_solvers,
                             exclude=exclude)
    install_solvers(solvers=solvers,
                    forced_solvers=forced_solvers,
                    env_name=benchmark)

    # run the command in the virtual env
    solvers_option = ' '.join(['-s ' + s for s in solver_names])
    forced_solvers_option = ' '.join(['-f ' + s for s in forced_solvers])
    datasets_option = ' '.join(['-d ' + d for d in dataset_names])
    cmd = (f"benchopt run -l --max-samples {max_samples} -n {repetition} "
           f"{solvers_option} {forced_solvers_option} {datasets_option} "
           f"{benchmark}")
    raise SystemExit(
        _run_bash_in_env(cmd, env_name=benchmark, capture_stdout=False))
Exemplo n.º 4
0
def run(benchmark,
        solver_names,
        forced_solvers,
        dataset_names,
        objective_filters,
        max_runs,
        n_repetitions,
        timeout,
        recreate=False,
        no_plot=False,
        pdb=False,
        env_name=False):
    """Run a benchmark in a separate conda env where the deps will be installed
    """

    # Check that the dataset/solver patterns match actual dataset
    validate_benchmark(benchmark)
    validate_dataset_patterns(benchmark, dataset_names)
    validate_solver_patterns(benchmark, solver_names + forced_solvers)

    # If env_name is False, the flag `--local` has been used (default) so
    # run in the current environement.
    if env_name is False:
        run_benchmark(benchmark,
                      solver_names,
                      forced_solvers,
                      dataset_names=dataset_names,
                      objective_filters=objective_filters,
                      max_runs=max_runs,
                      n_repetitions=n_repetitions,
                      timeout=timeout,
                      plot_result=not no_plot,
                      pdb=pdb)
        return

    # If env_name is True, the flag `--env` has been used. Create a conda env
    # specific to the benchmark. Else, use the <env_name> value.
    if env_name is True:
        benchmark_name = Path(benchmark).resolve().name
        env_name = f"benchopt_{benchmark_name}"
    create_conda_env(env_name, recreate=recreate)

    # installed required datasets
    install_required_datasets(benchmark, dataset_names, env_name=env_name)

    # Get the solvers and install them
    install_required_solvers(benchmark,
                             solver_names,
                             forced_solvers=forced_solvers,
                             env_name=env_name)

    # run the command in the conda env
    solvers_option = ' '.join(['-s ' + s for s in solver_names])
    forced_solvers_option = ' '.join(['-f ' + s for s in forced_solvers])
    datasets_option = ' '.join(['-d ' + d for d in dataset_names])
    objective_option = ' '.join(['-p ' + p for p in objective_filters])
    cmd = (
        rf"benchopt run {benchmark} --local --n-repetitions {n_repetitions} "
        rf"--max-runs {max_runs} --timeout {timeout} "
        rf"{solvers_option} {forced_solvers_option} "
        rf"{datasets_option} {objective_option} "
        rf"{'--no-plot' if no_plot else ''} "
        rf"{'--pdb' if pdb else ''} ".replace('\\', '\\\\'))
    raise SystemExit(
        _run_shell_in_conda_env(cmd, env_name=env_name, capture_stdout=False)
        != 0)
Exemplo n.º 5
0
"""

import os
from pathlib import Path
import matplotlib.pyplot as plt
from benchopt import run_benchmark
from benchopt.viz import plot_benchmark, PLOT_KINDS

BENCHMARK_PATH = Path(os.getcwd()).parent / 'benchmarks' / 'lasso'

try:
    df = run_benchmark(
        str(BENCHMARK_PATH),
        ['Python-PGD*use_acceleration=False', 'R-PGD', 'Julia-PGD'],
        dataset_names=['Simulated*n_samples=100,n_features=500*'],
        objective_filters=['reg=0.5'],
        max_runs=100,
        timeout=100,
        n_repetitions=5,
        plot_result=False,
        show_progress=False)
except RuntimeError:
    raise RuntimeError(
        "This example can only work when Lasso benchmark is cloned in the "
        "example folder. Please run:\n"
        "$ git clone https://github.com/benchopt/benchmark_lasso "
        f"{BENCHMARK_PATH.resolve()}")

kinds = list(PLOT_KINDS.keys())
figs = plot_benchmark(df, benchmark=str(BENCHMARK_PATH), kinds=kinds)
plt.show()
Exemplo n.º 6
0
"""

import os
from pathlib import Path
import matplotlib.pyplot as plt
from benchopt import run_benchmark
from benchopt.viz import plot_benchmark, PLOT_KINDS

BENCHMARK_PATH = Path(os.getcwd()).parent / 'benchmarks' / 'logreg_l2'

try:
    df = run_benchmark(
        str(BENCHMARK_PATH), ['sklearn', 'lightning'],
        dataset_names=['Simulated*n_samples=200,n_features=500*'],
        max_runs=100,
        timeout=20,
        n_repetitions=3,
        plot_result=False,
        show_progress=False)
except RuntimeError:
    raise RuntimeError(
        "This example can only work when Lasso benchmark is cloned in the "
        "example folder. Please run:\n"
        "$ git clone https://github.com/benchopt/benchmark_logreg_l2 "
        f"{BENCHMARK_PATH.resolve()}")

kinds = list(PLOT_KINDS.keys())
figs = plot_benchmark(df, benchmark=str(BENCHMARK_PATH), kinds=kinds)
plt.show()
Exemplo n.º 7
0
import os
from pathlib import Path
import matplotlib.pyplot as plt
from benchopt import run_benchmark
from benchopt.benchmark import Benchmark
from benchopt.tests import SELECT_ONE_SIMULATED
from benchopt.plotting import plot_benchmark, PLOT_KINDS

BENCHMARK_PATH = Path(os.getcwd()).parent / 'benchmarks' / 'lasso'

try:
    df = run_benchmark(
        Benchmark(BENCHMARK_PATH),
        ['Python-PGD[^-]*use_acceleration=False', 'R-PGD', 'Julia-PGD'],
        dataset_names=[SELECT_ONE_SIMULATED],
        objective_filters=['reg=0.5'],
        max_runs=100,
        timeout=100,
        n_repetitions=5,
        plot_result=False,
        show_progress=False)
except RuntimeError:
    raise RuntimeError(
        "This example can only work when Lasso benchmark is cloned in the "
        "example folder. Please run:\n"
        "$ git clone https://github.com/benchopt/benchmark_lasso "
        f"{BENCHMARK_PATH.resolve()}")

kinds = list(PLOT_KINDS.keys())
figs = plot_benchmark(df, benchmark=Benchmark(BENCHMARK_PATH), kinds=kinds)
plt.show()
Exemplo n.º 8
0
from pathlib import Path
import matplotlib.pyplot as plt
from benchopt import run_benchmark
from benchopt.benchmark import Benchmark
from benchopt.plotting import plot_benchmark, PLOT_KINDS

BENCHMARK_PATH = (Path().resolve().parent / 'benchmarks' /
                  'benchmark_logreg_l2')

try:
    save_file = run_benchmark(
        Benchmark(BENCHMARK_PATH),
        ['sklearn[liblinear]', 'sklearn[newton-cg]', 'lightning'],
        dataset_names=['Simulated*[n_features=500,n_samples=200]'],
        objective_filters=['L2 Logistic Regression[lmbd=1.0]'],
        max_runs=100,
        timeout=20,
        n_repetitions=15,
        plot_result=False,
        show_progress=True)

except RuntimeError:
    raise RuntimeError(
        "This example can only work when Logreg-l2 benchmark is cloned in a "
        "`benchmarks` folder. Please run:\n"
        "$ git clone https://github.com/benchopt/benchmark_logreg_l2 "
        f"{BENCHMARK_PATH.resolve()}")

kinds = list(PLOT_KINDS.keys())
figs = plot_benchmark(save_file,
                      benchmark=Benchmark(BENCHMARK_PATH),
import matplotlib.pyplot as plt
from benchopt import run_benchmark
from benchopt.benchmark import Benchmark
from benchopt.plotting import plot_benchmark, PLOT_KINDS

BENCHMARK_PATH = Path().resolve().parent / 'benchmarks' / 'benchmark_lasso'

if not BENCHMARK_PATH.exists():
    raise RuntimeError(
        "This example can only work when Lasso benchmark is cloned in the "
        "example folder. Please run:\n"
        "$ git clone https://github.com/benchopt/benchmark_lasso "
        f"{BENCHMARK_PATH.resolve()}")

save_file = run_benchmark(
    Benchmark(BENCHMARK_PATH), ['Python-PGD[use_acceleration=False]', 'R-PGD'],
    dataset_names=["Simulated[n_features=5000,n_samples=100,rho=0]"],
    objective_filters=['*[fit_intercept=False,reg=0.5]'],
    max_runs=100,
    timeout=100,
    n_repetitions=5,
    plot_result=False,
    show_progress=False)

kinds = list(PLOT_KINDS.keys())
figs = plot_benchmark(save_file,
                      benchmark=Benchmark(BENCHMARK_PATH),
                      kinds=kinds,
                      html=False)
plt.show()
Exemplo n.º 10
0
from glob import glob

from benchopt import run_benchmark

benchs = glob("benchmarks/*/bench_*.py")
for b in benchs:
    b = b.replace('/', '.').replace('.py', '')
    run_benchmark(b)