def test_convergence_plot_options(options):
    problems = get_benchmark_problems("example")
    stop_after_10 = {
        "stopping_max_criterion_evaluations": 10,
        "stopping_max_iterations": 10,
    }
    optimizers = {
        "lbfgsb": {
            "algorithm": "scipy_lbfgsb",
            "algo_options": stop_after_10
        },
        "nm": {
            "algorithm": "scipy_neldermead",
            "algo_options": stop_after_10
        },
    }
    results = run_benchmark(
        problems,
        optimizers,
        n_cores=1,  # must be 1 for the test to work
        logging_directory="logging",
    )

    convergence_plot(problems=problems,
                     results=results,
                     problem_subset=["bard_good_start"],
                     **options)
    plt.close()
Exemplo n.º 2
0
def test_run_benchmark_dict_options():
    all_problems = get_benchmark_problems("more_wild")
    first_two_names = list(all_problems)[:2]
    first_two = {name: all_problems[name] for name in first_two_names}

    optimize_options = {
        "default_lbfgsb": "scipy_lbfgsb",
        "tuned_lbfgsb": {
            "algorithm": "scipy_lbfgsb",
            "algo_options": {
                "convergence.relative_criterion_tolerance": 1e-10
            },
        },
    }

    result = run_benchmark(
        problems=first_two,
        optimize_options=optimize_options,
    )

    expected_keys = {
        ("linear_full_rank_good_start", "default_lbfgsb"),
        ("linear_full_rank_bad_start", "default_lbfgsb"),
        ("linear_full_rank_good_start", "tuned_lbfgsb"),
        ("linear_full_rank_bad_start", "tuned_lbfgsb"),
    }
    assert set(result) == expected_keys
Exemplo n.º 3
0
def test_run_benchmark_failing():
    all_problems = get_benchmark_problems("more_wild")
    failing_name = "jennrich_sampson"
    failing = {failing_name: all_problems[failing_name]}

    optimize_options = ["scipy_lbfgsb"]

    with pytest.warns():
        result = run_benchmark(problems=failing,
                               optimize_options=optimize_options)

    key = (failing_name, "scipy_lbfgsb")
    assert isinstance(result[key]["solution"], str)
Exemplo n.º 4
0
def test_custom_benchmarks(raw_problems, internal_criterion_func):
    problems = get_benchmark_problems_custom(raw_problems,
                                             internal_criterion_func)

    optimize_options = ["scipy_lbfgsb"]
    result = run_benchmark(problems=problems,
                           optimize_options=optimize_options)

    expected_keys = {
        ("linear_full_rank_good_start", "scipy_lbfgsb"),
        ("rosenbrock_good_start", "scipy_lbfgsb"),
    }
    assert set(result) == expected_keys
Exemplo n.º 5
0
def test_run_benchmark_list_options():
    all_problems = get_benchmark_problems("example")
    first_two_names = list(all_problems)[:2]
    first_two = {name: all_problems[name] for name in first_two_names}
    optimize_options = ["scipy_lbfgsb", "scipy_neldermead"]

    result = run_benchmark(
        problems=first_two,
        optimize_options=optimize_options,
    )

    expected_keys = {
        ("helical_valley_good_start", "scipy_lbfgsb"),
        ("rosenbrock_good_start", "scipy_lbfgsb"),
        ("helical_valley_good_start", "scipy_neldermead"),
        ("rosenbrock_good_start", "scipy_neldermead"),
    }
    assert set(result) == expected_keys
Exemplo n.º 6
0
def test_run_benchmark_list_options(tmpdir):
    all_problems = get_benchmark_problems("example")
    first_two_names = list(all_problems)[:2]
    first_two = {name: all_problems[name] for name in first_two_names}

    optimize_options = ["scipy_lbfgsb", "scipy_neldermead"]

    logging_directory = tmpdir / "benchmark_logs"

    res = run_benchmark(
        problems=first_two,
        optimize_options=optimize_options,
        logging_directory=logging_directory,
    )

    expected_keys = {
        ("linear_full_rank_good_start", "scipy_lbfgsb"),
        ("rosenbrock_good_start", "scipy_lbfgsb"),
        ("linear_full_rank_good_start", "scipy_neldermead"),
        ("rosenbrock_good_start", "scipy_neldermead"),
    }

    assert set(res) == expected_keys
Exemplo n.º 7
0
def test_profile_plot_options(options):
    problems = get_benchmark_problems("example")
    stop_after_10 = {
        "stopping_max_criterion_evaluations": 10,
        "stopping_max_iterations": 10,
    }
    optimizers = {
        "lbfgsb": {
            "algorithm": "scipy_lbfgsb",
            "algo_options": stop_after_10
        },
        "neldermead": {
            "algorithm": "scipy_neldermead",
            "algo_options": stop_after_10,
        },
    }
    results = run_benchmark(
        problems,
        optimizers,
        n_cores=1,  # must be 1 for the test to work
    )

    profile_plot(problems=problems, results=results, **options)
    plt.close()