Exemplo n.º 1
0
def test_non_unique_metric_names_fail(loops):
    test_fcn, parameter_space = emukit.test_functions.forrester_function()
    with pytest.raises(ValueError):
        Benchmarker(
            loops, test_fcn, parameter_space,
            [MinimumObservedValueMetric("x"),
             MinimumObservedValueMetric("x")])
Exemplo n.º 2
0
def test_minimum_observed_value_metric():
    x_observations = np.random.rand(50, 2)
    y_observations = np.random.rand(50, 2)

    mock_model = mock.create_autospec(IModel)

    model_updater_mock = mock.create_autospec(ModelUpdater)
    model_updater_mock.model = mock_model
    mock_loop = mock.create_autospec(OuterLoop)
    mock_loop.model_updaters = [model_updater_mock]

    loop_state = create_loop_state(x_observations, y_observations)
    loop_state.metrics = dict()

    metric = MinimumObservedValueMetric()
    metric_value = metric.evaluate(mock_loop, loop_state)

    assert metric_value.shape == (2, )
Exemplo n.º 3
0
def test_benchmarker_runs(loops):
    test_fcn, parameter_space = emukit.test_functions.forrester_function()

    x_test = np.random.rand(50, 1)
    benchmark = Benchmarker(loops, test_fcn, parameter_space, [
        MinimumObservedValueMetric(),
        TimeMetric(),
        MeanSquaredErrorMetric(x_test, test_fcn(x_test))
    ])
    results = benchmark.run_benchmark()