def lcb_f(x): x = format_x(x, context) mu, var = self.predict(x) lcb = mu - scale * np.sqrt(var) if negate: lcb = -lcb # minimize return lcb # maximize
def ac_f(x): # TODO: acquisition predicate or unify with lcb_f x = format_x(x, context) mu, var = self.predict(x) ret = mu / np.sqrt(var) # self.beta < mu / np.sqrt(var) # self.beta*np.sqrt(var) < mu # 0 < self.beta*np.sqrt(var) - mu return ret.T[0]
def lcb_fg(x): x = format_x(x, context) mu, var = self.predict(x) f = mu - scale * np.sqrt(var) dmdx, dvdx = self.predictive_gradients(x) g = dmdx - 0.5 * dvdx / np.sqrt(var) if negate: f, g = -f, -g # minimize return f[0, 0], g[0, self.func.param_idx] # maximize
def ac_f(x): # x = helper.grid_around_point(x, self.robust_delta, n=3, x_range=self.func.x_range) x = format_x(x, context) mu, var = self.predict(x) if self.use_var: f = mu / np.sqrt(var) else: f = mu if negate: f = -f # minimize return f # maximize
def ac_f(x): x = format_x(x, context) mu, var = self.predict( x, noise=noise, **kwargs ) # self.model.predict_quantiles(x, quantiles=(2.5, 97.5)) # TODO: obtain gradient wrt query point for optimization # if x.shape[0] == 1: # #print(self.model.predict_jacobian(Xnew=x)) # dmu_dX, dv_dX = self.model.predictive_gradients(Xnew=x) # dmu_dX = dmu_dX[:, :, 0] # ds_dX = dv_dX / (2 * np.sqrt(var)) # #print(dmu_dX, ds_dX) # #print(dmu_dX.shape, dv_dX.shape) lse = -np.abs(mu) + scale * np.sqrt(var) if negate: lse = -lse # minimize return lse # maximize
def ac_fg(x): # TODO: could instead sample a fixed number of points # x = helper.grid_around_point(x, self.robust_delta, n=3, x_range=self.func.x_range) x = format_x(x, context) mu, var = self.predict(x) dmdx, dvdx = self.predictive_gradients(x) if self.use_var: f = mu / np.sqrt(var) g = (np.sqrt(var) * dmdx - 0.5 * mu * dvdx / np.sqrt(var)) / var # return np.average(f, axis=0)[0], np.average(g, axis=0)[self.func.param_idx] g = g[0, :] # return f[0, 0], g[0, self.func.param_idx] else: f, g = mu, dmdx if negate: f, g = -f, -g # minimize return f[0, 0], g[self.func.param_idx] # maximize
def ac_f(x): x = format_x(x, context) _, var = self.predict(x, **kwargs) if negate: var = -var # minimize return var # maximize
def ac_f(x): x = format_x(x, context) mu, _ = self.predict(x) if negate: mu = -mu return mu