Ejemplo n.º 1
0
def test_numerical_gradient():
    debug_output = False
    random = np.random.RandomState(42)
    eps = 1e-6

    for model in default_models():
        ei = EIAcquisitionFunction(model)

        for iter in range(10):
            high = 1.0 if iter < 5 else 0.02
            x = random.uniform(low=0.0, high=high, size=(2, ))
            f0, analytical_gradient = ei.compute_acq_with_gradient(x)
            analytical_gradient = analytical_gradient.flatten()
            if debug_output:
                print('x0 = {}, f(x_0) = {}, grad(x_0) = {}'.format(
                    x, f0, analytical_gradient))

            for i in range(2):
                h = np.zeros_like(x)
                h[i] = eps
                fpeps = ei.compute_acq(x + h)[0]
                fmeps = ei.compute_acq(x - h)[0]
                numerical_derivative = (fpeps - fmeps) / (2 * eps)
                if debug_output:
                    print(
                        'f(x0+eps) = {}, f(x0-eps) = {}, findiff = {}, deriv = {}'
                        .format(fpeps[0], fmeps[0], numerical_derivative[0],
                                analytical_gradient[i]))
                np.testing.assert_almost_equal(numerical_derivative.item(),
                                               analytical_gradient[i],
                                               decimal=4)
Ejemplo n.º 2
0
def test_best_value():
    # test that the best value affects expected improvement
    for model in default_models():
        ei = EIAcquisitionFunction(model)

        random = np.random.RandomState(42)
        test_X = random.uniform(low=0.0, high=1.0, size=(10, 2))

        acq_best0 = list(ei.compute_acq(test_X).flatten())
        zero_row = np.zeros((1, 2))
        acq0_best0 = ei.compute_acq(zero_row)

        # override current best
        def new_current_best():
            return np.array([10])

        model.current_best = new_current_best

        acq_best2 = list(ei.compute_acq(test_X).flatten())
        acq0_best2 = ei.compute_acq(zero_row)

        # if the best is only 2 the acquisition function should be better (lower value)
        assert all(a2 < a0 for a2, a0 in zip(acq_best2, acq_best0))

        # there should be a considerable gap at the point of the best evaluation
        assert acq0_best2 < acq0_best0 - 1.0
Ejemplo n.º 3
0
def test_sanity_check():
    # - test that values are negative as we should be returning *minus* expected improvement
    # - test that values that are further from evaluated candidates have higher expected improvement
    #   given similar mean
    # - test that points closer to better points have higher expected improvement
    for model in default_models(do_mcmc=False):
        ei = EIAcquisitionFunction(model)
        X = np.array([
            (0.0, 0.0),  # 0
            (1.0, 0.0),  # 1
            (0.0, 1.0),  # 2
            (1.0, 1.0),  # 3
            (0.2, 0.0),  # 4
            (0.0, 0.2),  # 5
            (0.1, 0.0),  # 6
            (0.0, 0.1),  # 7
            (0.1, 0.1),  # 8
            (0.9, 0.9),  # 9
        ])
        _acq = ei.compute_acq(X).flatten()
        #print('Negative EI values:')
        #print(_acq)
        acq = list(_acq)

        assert all(a <= 0 for a in acq), acq

        # lower evaluations should correspond to better acquisition
        # second inequality is less equal because last two values are likely zero
        assert acq[0] < acq[1] <= acq[3], acq
        # Note: The improvement here is tiny, just 0.01%:
        assert acq[8] < acq[9], acq

        # further from an evaluated point should correspond to better acquisition
        assert acq[6] < acq[4] < acq[1], acq
        assert acq[7] < acq[5] < acq[2], acq
Ejemplo n.º 4
0
def test_value_same_as_with_gradient():
    # test that compute_acq and compute_acq_with_gradients return the same acquisition values
    for model in default_models():
        ei = EIAcquisitionFunction(model)

        random = np.random.RandomState(42)
        X = random.uniform(low=0.0, high=1.0, size=(10, 2))

        # assert same as computation with gradients
        vec1 = ei.compute_acq(X).flatten()
        vec2 = np.array([ei.compute_acq_with_gradient(x)[0] for x in X])
        np.testing.assert_almost_equal(vec1, vec2)
Ejemplo n.º 5
0
def test_optimization_improves():
    debug_output = False
    # Pick a random point, optimize and the expected improvement should be better:
    # But only if the starting point is not too far from the origin
    random = np.random.RandomState(42)
    for model in default_models():
        ei = EIAcquisitionFunction(model)
        opt = LBFGSOptimizeAcquisition(model.state, model,
                                       EIAcquisitionFunction)
        if debug_output:
            print('\n\nGP MCMC' if model.does_mcmc() else 'GP Opt')
            fzero = ei.compute_acq(np.zeros((1, 2)))[0]
            print('f(0) = {}'.format(fzero))
        if debug_output and not model.does_mcmc():
            print('Hyperpars: {}'.format(model.get_params()))
            # Plot the thing!
            plot_ei_mean_std(model, ei, max_grid=0.001)
            plot_ei_mean_std(model, ei, max_grid=0.01)
            plot_ei_mean_std(model, ei, max_grid=0.1)
            plot_ei_mean_std(model, ei, max_grid=1.0)

        non_zero_acq_at_least_once = False
        for iter in range(10):
            #initial_point = random.uniform(low=0.0, high=1.0, size=(2,))
            initial_point = random.uniform(low=0.0, high=0.1, size=(2, ))
            acq0, df0 = ei.compute_acq_with_gradient(initial_point)
            if debug_output:
                print('\nInitial point: f(x0) = {}, x0 = {}'.format(
                    acq0, initial_point))
                print('grad0 = {}'.format(df0))
            if acq0 != 0:
                non_zero_acq_at_least_once = True
                optimized = np.array(opt.optimize(tuple(initial_point)))
                acq_opt = ei.compute_acq(optimized)[0]
                if debug_output:
                    print('Final point: f(x1) = {}, x1 = {}'.format(
                        acq_opt, optimized))
                assert acq_opt < 0
                assert acq_opt < acq0

        assert non_zero_acq_at_least_once
def test_gp_fantasizing():
    """
    Compare whether acquisition function evaluations (values, gradients) with
    fantasizing are the same as averaging them by hand.
    """
    random_seed = 4567
    _set_seeds(random_seed)
    num_fantasy_samples = 10
    num_pending = 5

    hp_ranges = HyperparameterRanges_Impl(
        HyperparameterRangeContinuous('x', 0.0, 1.0, LinearScaling()),
        HyperparameterRangeContinuous('y', 0.0, 1.0, LinearScaling()))
    X = [
        (0.0, 0.0),
        (1.0, 0.0),
        (0.0, 1.0),
        (1.0, 1.0),
    ]
    num_data = len(X)
    Y = [
        dictionarize_objective(np.random.randn(1, 1)) for _ in range(num_data)
    ]
    # Draw fantasies. This is done for a number of fixed pending candidates
    # The model parameters are fit in the first iteration, when there are
    # no pending candidates

    # Note: It is important to not normalize targets, because this would be
    # done on the observed targets only, not the fantasized ones, so it
    # would be hard to compare below.
    pending_evaluations = []
    for _ in range(num_pending):
        pending_cand = tuple(np.random.rand(2, ))
        pending_evaluations.append(PendingEvaluation(pending_cand))
    state = TuningJobState(hp_ranges,
                           [CandidateEvaluation(x, y) for x, y in zip(X, Y)],
                           failed_candidates=[],
                           pending_evaluations=pending_evaluations)
    gpmodel = default_gpmodel(state,
                              random_seed,
                              optimization_config=DEFAULT_OPTIMIZATION_CONFIG)
    model = GaussProcSurrogateModel(state,
                                    DEFAULT_METRIC,
                                    random_seed,
                                    gpmodel,
                                    fit_parameters=True,
                                    num_fantasy_samples=num_fantasy_samples,
                                    normalize_targets=False)
    fantasy_samples = model.fantasy_samples
    # Evaluate acquisition function and gradients with fantasizing
    num_test = 50
    X_test = [
        hp_ranges.to_ndarray(tuple(np.random.rand(2, )))
        for _ in range(num_test)
    ]
    acq_func = EIAcquisitionFunction(model)
    fvals, grads = _compute_acq_with_gradient_many(acq_func, X_test)
    # Do the same computation by averaging by hand
    fvals_cmp = np.empty((num_fantasy_samples, ) + fvals.shape)
    grads_cmp = np.empty((num_fantasy_samples, ) + grads.shape)
    X_full = X + state.pending_candidates
    for it in range(num_fantasy_samples):
        Y_full = Y + [
            dictionarize_objective(eval.fantasies[DEFAULT_METRIC][:, it])
            for eval in fantasy_samples
        ]
        state2 = TuningJobState(
            hp_ranges,
            [CandidateEvaluation(x, y) for x, y in zip(X_full, Y_full)],
            failed_candidates=[],
            pending_evaluations=[])
        # We have to skip parameter optimization here
        model2 = GaussProcSurrogateModel(
            state2,
            DEFAULT_METRIC,
            random_seed,
            gpmodel,
            fit_parameters=False,
            num_fantasy_samples=num_fantasy_samples,
            normalize_targets=False)
        acq_func2 = EIAcquisitionFunction(model2)
        fvals_, grads_ = _compute_acq_with_gradient_many(acq_func2, X_test)
        fvals_cmp[it, :] = fvals_
        grads_cmp[it, :] = grads_
    # Comparison
    fvals2 = np.mean(fvals_cmp, axis=0)
    grads2 = np.mean(grads_cmp, axis=0)
    assert np.allclose(fvals, fvals2)
    assert np.allclose(grads, grads2)