コード例 #1
0
def test_convergence_plot_options(options):
    problems = get_benchmark_problems("example")
    stop_after_10 = {
        "stopping_max_criterion_evaluations": 10,
        "stopping_max_iterations": 10,
    }
    optimizers = {
        "lbfgsb": {
            "algorithm": "scipy_lbfgsb",
            "algo_options": stop_after_10
        },
        "nm": {
            "algorithm": "scipy_neldermead",
            "algo_options": stop_after_10
        },
    }
    results = run_benchmark(
        problems,
        optimizers,
        n_cores=1,  # must be 1 for the test to work
        logging_directory="logging",
    )

    convergence_plot(problems=problems,
                     results=results,
                     problem_subset=["bard_good_start"],
                     **options)
    plt.close()
コード例 #2
0
def test_run_benchmark_dict_options():
    all_problems = get_benchmark_problems("more_wild")
    first_two_names = list(all_problems)[:2]
    first_two = {name: all_problems[name] for name in first_two_names}

    optimize_options = {
        "default_lbfgsb": "scipy_lbfgsb",
        "tuned_lbfgsb": {
            "algorithm": "scipy_lbfgsb",
            "algo_options": {
                "convergence.relative_criterion_tolerance": 1e-10
            },
        },
    }

    result = run_benchmark(
        problems=first_two,
        optimize_options=optimize_options,
    )

    expected_keys = {
        ("linear_full_rank_good_start", "default_lbfgsb"),
        ("linear_full_rank_bad_start", "default_lbfgsb"),
        ("linear_full_rank_good_start", "tuned_lbfgsb"),
        ("linear_full_rank_bad_start", "tuned_lbfgsb"),
    }
    assert set(result) == expected_keys
コード例 #3
0
def test_run_benchmark_failing():
    all_problems = get_benchmark_problems("more_wild")
    failing_name = "jennrich_sampson"
    failing = {failing_name: all_problems[failing_name]}

    optimize_options = ["scipy_lbfgsb"]

    with pytest.warns():
        result = run_benchmark(problems=failing,
                               optimize_options=optimize_options)

    key = (failing_name, "scipy_lbfgsb")
    assert isinstance(result[key]["solution"], str)
コード例 #4
0
def test_run_benchmark_list_options():
    all_problems = get_benchmark_problems("example")
    first_two_names = list(all_problems)[:2]
    first_two = {name: all_problems[name] for name in first_two_names}
    optimize_options = ["scipy_lbfgsb", "scipy_neldermead"]

    result = run_benchmark(
        problems=first_two,
        optimize_options=optimize_options,
    )

    expected_keys = {
        ("helical_valley_good_start", "scipy_lbfgsb"),
        ("rosenbrock_good_start", "scipy_lbfgsb"),
        ("helical_valley_good_start", "scipy_neldermead"),
        ("rosenbrock_good_start", "scipy_neldermead"),
    }
    assert set(result) == expected_keys
コード例 #5
0
def test_get_problems(name, additive_noise, multiplicative_noise):
    is_noisy = any((additive_noise, multiplicative_noise))
    problems = get_benchmark_problems(
        name=name,
        additive_noise=additive_noise,
        multiplicative_noise=multiplicative_noise,
    )
    first_name = list(problems)[0]
    first = problems[first_name]
    func = first["inputs"]["criterion"]
    params = first["inputs"]["params"]

    np.random.seed()
    first_eval = func(params)["value"]
    second_eval = func(params)["value"]

    if is_noisy:
        assert first_eval != second_eval
    else:
        assert first_eval == second_eval
コード例 #6
0
def test_run_benchmark_list_options(tmpdir):
    all_problems = get_benchmark_problems("example")
    first_two_names = list(all_problems)[:2]
    first_two = {name: all_problems[name] for name in first_two_names}

    optimize_options = ["scipy_lbfgsb", "scipy_neldermead"]

    logging_directory = tmpdir / "benchmark_logs"

    res = run_benchmark(
        problems=first_two,
        optimize_options=optimize_options,
        logging_directory=logging_directory,
    )

    expected_keys = {
        ("linear_full_rank_good_start", "scipy_lbfgsb"),
        ("rosenbrock_good_start", "scipy_lbfgsb"),
        ("linear_full_rank_good_start", "scipy_neldermead"),
        ("rosenbrock_good_start", "scipy_neldermead"),
    }

    assert set(res) == expected_keys
コード例 #7
0
def test_profile_plot_options(options):
    problems = get_benchmark_problems("example")
    stop_after_10 = {
        "stopping_max_criterion_evaluations": 10,
        "stopping_max_iterations": 10,
    }
    optimizers = {
        "lbfgsb": {
            "algorithm": "scipy_lbfgsb",
            "algo_options": stop_after_10
        },
        "neldermead": {
            "algorithm": "scipy_neldermead",
            "algo_options": stop_after_10,
        },
    }
    results = run_benchmark(
        problems,
        optimizers,
        n_cores=1,  # must be 1 for the test to work
    )

    profile_plot(problems=problems, results=results, **options)
    plt.close()