Esempio n. 1
0
def optimize_posterior_mean_and_std(model, X_lower, X_upper, startpoints=None, with_gradients=True):
    def f(x):
        mu, var = model.predict(x[np.newaxis, :])
        return (mu + np.sqrt(var))

    def df(x):
        dmu, dvar = model.predictive_gradients(x[np.newaxis, :])
        _, var = model.predict(x[np.newaxis, :])
        std = np.sqrt(var)
        # To get the gradients of the standard deviation
        # We need to apply chain rule (s(x)=sqrt[v(x)] => s'(x) = 1/2 * v'(x) / sqrt[v(x)]
        dstd = 0.5 * dvar / std
        return (dmu[:, :, 0] + dstd)[0, :]

    if startpoints is None:
        startpoints = []
        startpoints.append(compute_incumbent(model)[0])

    x_opt = np.zeros([len(startpoints), X_lower.shape[0]])
    fval = np.zeros([len(startpoints)])
    for i, startpoint in enumerate(startpoints):
        if with_gradients:
            res = optimize.fmin_l_bfgs_b(f, startpoint, df, bounds=list(zip(X_lower, X_upper)))
            x_opt[i] = res[0]
            fval[i] = res[1]
        else:
            res = optimize.minimize(f, startpoint, bounds=list(zip(X_lower, X_upper)), method="L-BFGS-B")
            x_opt[i] = res["x"]
            fval[i] = res["fun"]

    # Return the point with the lowest function value

    best = np.argmin(fval)
    return x_opt[best], fval[best]
Esempio n. 2
0
    def test_optimize_posterior_mean_and_std(self):
        inc, inc_val = optimize_posterior_mean_and_std(self.m, self.X_lower, self.X_upper, with_gradients=True)

        assert len(inc.shape) == 1
        assert np.all(inc >= self.X_lower)
        assert np.all(inc <= self.X_upper)
        assert np.all(inc_val <= compute_incumbent(self.m)[1])
Esempio n. 3
0
def optimize_posterior_mean(model, X_lower, X_upper, inc=None, with_gradients=False):
    def f(x):
        return model.predict(x[np.newaxis, :])[0]

    def df(x):
        dmu = model.predictive_gradients(x[np.newaxis, :])[0]
        return dmu[0, :, :]

    if inc is None:
        inc, _ = compute_incumbent(model)

    if with_gradients:
        res = optimize.minimize(f, inc, bounds=list(zip(X_lower, X_upper)), jac=df)
    else:
        res = optimize.minimize(f, inc, bounds=list(zip(X_lower, X_upper)))
    return res["x"], res["fun"]