class ProbabilityOfImprovement(Acquisition): """ Probability of Improvement acquisition function for single-objective global optimization. .. math:: \\alpha(\\mathbf x_{\\star}) = \\int_{-\\infty}^{f_{\\min}} \\, p( f_{\\star}\\,|\\, \\mathbf x, \\mathbf y, \\mathbf x_{\\star} ) \\, d f_{\\star} """ def __init__(self, model): """ :param model: GPflow model (single output) representing our belief of the objective """ super(ProbabilityOfImprovement, self).__init__(model) self.fmin = DataHolder(np.zeros(1)) self._setup() def _setup(self): super(ProbabilityOfImprovement, self)._setup() feasible_samples = self.data[0][ self.highest_parent.feasible_data_index(), :] samples_mean, _ = self.models[0].predict_f(feasible_samples) self.fmin.set_data(np.min(samples_mean, axis=0)) def build_acquisition(self, Xcand): candidate_mean, candidate_var = self.models[0].build_predict(Xcand) candidate_var = tf.maximum(candidate_var, stability) normal = tf.contrib.distributions.Normal(candidate_mean, tf.sqrt(candidate_var)) return normal.cdf(self.fmin, name=self.__class__.__name__)
class ExpectedImprovement(Acquisition): """ Expected Improvement acquisition function for single-objective global optimization. Introduced by (Mockus et al, 1975). Key reference: :: @article{Jones:1998, title={Efficient global optimization of expensive black-box functions}, author={Jones, Donald R and Schonlau, Matthias and Welch, William J}, journal={Journal of Global optimization}, volume={13}, number={4}, pages={455--492}, year={1998}, publisher={Springer} } This acquisition function is the expectation of the improvement over the current best observation w.r.t. the predictive distribution. The definition is closely related to the :class:`.ProbabilityOfImprovement`, but adds a multiplication with the improvement w.r.t the current best observation to the integral. .. math:: \\alpha(\\mathbf x_{\\star}) = \\int \\max(f_{\\min} - f_{\\star}, 0) \\, p( f_{\\star}\\,|\\, \\mathbf x, \\mathbf y, \\mathbf x_{\\star} ) \\, d f_{\\star} """ def __init__(self, model): """ :param model: GPflow model (single output) representing our belief of the objective """ super(ExpectedImprovement, self).__init__(model) self.fmin = DataHolder(np.zeros(1)) self._setup() def _setup(self): super(ExpectedImprovement, self)._setup() # Obtain the lowest posterior mean for the previous - feasible - evaluations feasible_samples = self.data[0][ self.highest_parent.feasible_data_index(), :] samples_mean, _ = self.models[0].predict_f(feasible_samples) self.fmin.set_data(np.min(samples_mean, axis=0)) def build_acquisition(self, Xcand, **kwargs): # Obtain predictive distributions for candidates candidate_mean, candidate_var = self._build_acquisition( Xcand, **kwargs) candidate_var = tf.maximum(candidate_var, stability) # Compute EI normal = tf.contrib.distributions.Normal(candidate_mean, tf.sqrt(candidate_var)) t1 = (self.fmin - candidate_mean) * normal.cdf(self.fmin) t2 = candidate_var * normal.prob(self.fmin) return tf.add(t1, t2, name=self.__class__.__name__)
class ProbabilityOfImprovement(Acquisition): """ Probability of Improvement acquisition function for single-objective global optimization. Key reference: :: @article{Kushner:1964, author = "Kushner, Harold J", journal = "Journal of Basic Engineering", number = "1", pages = "97--106", publisher = "American Society of Mechanical Engineers", title = "{A new method of locating the maximum point of an arbitrary multipeak curve in the presence of noise}", volume = "86", year = "1964" } .. math:: \\alpha(\\mathbf x_{\\star}) = \\int_{-\\infty}^{f_{\\min}} \\, p( f_{\\star}\\,|\\, \\mathbf x, \\mathbf y, \\mathbf x_{\\star} ) \\, d f_{\\star} """ def __init__(self, model): """ :param model: GPflow model (single output) representing our belief of the objective """ super(ProbabilityOfImprovement, self).__init__(model) self.fmin = DataHolder(np.zeros(1)) self._setup() def _setup(self): super(ProbabilityOfImprovement, self)._setup() feasible_samples = self.data[0][ self.highest_parent.feasible_data_index(), :] samples_mean, _ = self.models[0].predict_f(feasible_samples) self.fmin.set_data(np.min(samples_mean, axis=0)) def build_acquisition(self, Xcand, **kwargs): candidate_mean, candidate_var = self._build_acquisition( Xcand, **kwargs) candidate_var = tf.maximum(candidate_var, stability) normal = tf.contrib.distributions.Normal(candidate_mean, tf.sqrt(candidate_var)) return normal.cdf(self.fmin, name=self.__class__.__name__)
class MinValueEntropySearch(Acquisition): """ Max-value entropy search acquisition function for single-objective global optimization. Introduced by (Wang et al., 2017). Key reference: :: @InProceedings{Wang:2017, title = {Max-value Entropy Search for Efficient {B}ayesian Optimization}, author = {Zi Wang and Stefanie Jegelka}, booktitle = {Proceedings of the 34th International Conference on Machine Learning}, pages = {3627--3635}, year = {2017}, editor = {Doina Precup and Yee Whye Teh}, volume = {70}, series = {Proceedings of Machine Learning Research}, address = {International Convention Centre, Sydney, Australia}, month = {06--11 Aug}, publisher = {PMLR}, } """ def __init__(self, model, domain, gridsize=10000, num_samples=10): assert isinstance(model, Model) super(MinValueEntropySearch, self).__init__(model) assert self.data[1].shape[1] == 1 self.gridsize = gridsize self.num_samples = num_samples self.samples = DataHolder(np.zeros(num_samples, dtype=np_float_type)) self._domain = domain def _setup(self): super(MinValueEntropySearch, self)._setup() # Apply Gumbel sampling m = self.models[0] valid = self.feasible_data_index() # Work with feasible data X = self.data[0][valid, :] N = np.shape(X)[0] Xrand = RandomDesign(self.gridsize, self._domain).generate() fmean, fvar = m.predict_f(np.vstack((X, Xrand))) idx = np.argmin(fmean[:N]) right = fmean[idx].flatten() # + 2*np.sqrt(fvar[idx]).flatten() left = right probf = lambda x: np.exp( np.sum(norm.logcdf(-(x - fmean) / np.sqrt(fvar)), axis=0)) i = 0 while probf(left) < 0.75: left = 2.**i * np.min(fmean - 5. * np.sqrt(fvar)) + (1. - 2.**i) * right i += 1 # Binary search for 3 percentiles q1, med, q2 = map( lambda val: bisect(lambda x: probf(x) - val, left, right, maxiter=10000, xtol=0.01), [0.25, 0.5, 0.75]) beta = (q1 - q2) / (np.log(np.log(4. / 3.)) - np.log(np.log(4.))) alpha = med + beta * np.log(np.log(2.)) # obtain samples from y* mins = -np.log( -np.log(np.random.rand(self.num_samples).astype(np_float_type)) ) * beta + alpha self.samples.set_data(mins) def build_acquisition(self, Xcand): fmean, fvar = self.models[0].build_predict(Xcand) norm = tf.contrib.distributions.Normal( tf.constant(0.0, dtype=float_type), tf.constant(1.0, dtype=float_type)) gamma = (fmean - tf.expand_dims(self.samples, axis=0)) / tf.sqrt(fvar) return tf.reduce_sum(gamma * norm.prob(gamma) / (2. * norm.cdf(gamma)) - norm.log_cdf(gamma), axis=1, keep_dims=True) / self.num_samples
class LinearTransform(DataTransform): """ A simple linear transform of the form .. math:: \\mathbf Y = (\\mathbf A \\mathbf X^{T})^{T} + \\mathbf b \\otimes \\mathbf 1_{N}^{T} """ def __init__(self, A, b): """ :param A: scaling matrix. Either a P-dimensional vector, or a P x P transformation matrix. For the latter, the inverse and backward methods are not guaranteed to work as A must be invertible. It is also possible to specify a matrix with size P x Q with Q != P to achieve a lower dimensional representation of X. In this case, A is not invertible, hence inverse and backward transforms are not supported. :param b: A P-dimensional offset vector. """ super(LinearTransform, self).__init__() assert A is not None assert b is not None b = np.atleast_1d(b) A = np.atleast_1d(A) if len(A.shape) == 1: A = np.diag(A) assert (len(b.shape) == 1) assert (len(A.shape) == 2) self.A = DataHolder(A) self.b = DataHolder(b) def build_forward(self, X): return tf.matmul(X, tf.transpose(self.A)) + self.b @AutoFlow((float_type, [None, None])) def backward(self, Y): """ Overwrites the default backward approach, to avoid an explicit matrix inversion. """ return self.build_backward(Y) def build_backward(self, Y): """ TensorFlow implementation of the inverse mapping """ L = tf.cholesky(tf.transpose(self.A)) XT = tf.cholesky_solve(L, tf.transpose(Y - self.b)) return tf.transpose(XT) def build_backward_variance(self, Yvar): """ Additional method for scaling variance backward (used in :class:`.Normalizer`). Can process both the diagonal variances returned by predict_f, as well as full covariance matrices. :param Yvar: size N x N x P or size N x P :return: Yvar scaled, same rank and size as input """ rank = tf.rank(Yvar) # Because TensorFlow evaluates both fn1 and fn2, the transpose can't be in the same line. If a full cov # matrix is provided fn1 turns it into a rank 4, then tries to transpose it as a rank 3. # Splitting it in two steps however works fine. Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.matrix_diag(tf.transpose(Yvar)), lambda: Yvar) Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.transpose(Yvar, perm=[1, 2, 0]), lambda: Yvar) N = tf.shape(Yvar)[0] D = tf.shape(Yvar)[2] L = tf.cholesky(tf.square(tf.transpose(self.A))) Yvar = tf.reshape(Yvar, [N * N, D]) scaled_var = tf.reshape( tf.transpose(tf.cholesky_solve(L, tf.transpose(Yvar))), [N, N, D]) return tf.cond(tf.equal(rank, 2), lambda: tf.reduce_sum(scaled_var, axis=1), lambda: scaled_var) def assign(self, other): """ Assign the parameters of another :class:`LinearTransform`. Useful to avoid graph re-compilation. :param other: :class:`.LinearTransform` object """ assert other is not None assert isinstance(other, LinearTransform) self.A.set_data(other.A.value) self.b.set_data(other.b.value) def __invert__(self): A_inv = np.linalg.inv(self.A.value.T) return LinearTransform(A_inv, -np.dot(self.b.value, A_inv))