class ProbabilityOfImprovement(Acquisition): """ Probability of Improvement acquisition function for single-objective global optimization. .. math:: \\alpha(\\mathbf x_{\\star}) = \\int_{-\\infty}^{f_{\\min}} \\, p( f_{\\star}\\,|\\, \\mathbf x, \\mathbf y, \\mathbf x_{\\star} ) \\, d f_{\\star} """ def __init__(self, model): """ :param model: GPflow model (single output) representing our belief of the objective """ super(ProbabilityOfImprovement, self).__init__(model) self.fmin = DataHolder(np.zeros(1)) self.setup() def setup(self): super(ProbabilityOfImprovement, self).setup() samples_mean, _ = self.models[0].predict_f(self.data[0]) self.fmin.set_data(np.min(samples_mean, axis=0)) def build_acquisition(self, Xcand): candidate_mean, candidate_var = self.models[0].build_predict(Xcand) candidate_var = tf.maximum(candidate_var, stability) normal = tf.contrib.distributions.Normal(candidate_mean, tf.sqrt(candidate_var)) return normal.cdf(self.fmin, name=self.__class__.__name__)
class ExpectedImprovement(Acquisition): """ Expected Improvement acquisition function for single-objective global optimization. Introduced by (Mockus et al, 1975). Key reference: :: @article{Jones:1998, title={Efficient global optimization of expensive black-box functions}, author={Jones, Donald R and Schonlau, Matthias and Welch, William J}, journal={Journal of Global optimization}, volume={13}, number={4}, pages={455--492}, year={1998}, publisher={Springer} } This acquisition function is the expectation of the improvement over the current best observation w.r.t. the predictive distribution. The definition is closely related to the :class:`.ProbabilityOfImprovement`, but adds a multiplication with the improvement w.r.t the current best observation to the integral. .. math:: \\alpha(\\mathbf x_{\\star}) = \\int \\max(f_{\\min} - f_{\\star}, 0) \\, p( f_{\\star}\\,|\\, \\mathbf x, \\mathbf y, \\mathbf x_{\\star} ) \\, d f_{\\star} """ def __init__(self, model): """ :param model: GPflow model (single output) representing our belief of the objective """ super(ExpectedImprovement, self).__init__(model) assert (isinstance(model, Model)) self.fmin = DataHolder(np.zeros(1)) self.setup() def setup(self): super(ExpectedImprovement, self).setup() # Obtain the lowest posterior mean for the previous - feasible - evaluations feasible_samples = self.data[0][ self.highest_parent.feasible_data_index(), :] samples_mean, _ = self.models[0].predict_f(feasible_samples) self.fmin.set_data(np.min(samples_mean, axis=0)) def build_acquisition(self, Xcand): # Obtain predictive distributions for candidates candidate_mean, candidate_var = self.models[0].build_predict(Xcand) candidate_var = tf.maximum(candidate_var, stability) # Compute EI normal = tf.contrib.distributions.Normal(candidate_mean, tf.sqrt(candidate_var)) t1 = (self.fmin - candidate_mean) * normal.cdf(self.fmin) t2 = candidate_var * normal.prob(self.fmin) return tf.add(t1, t2, name=self.__class__.__name__)
class LinearTransform(DataTransform): """ A simple linear transform of the form .. math:: \\mathbf Y = (\\mathbf A \\mathbf X^{T})^{T} + \\mathbf b \\otimes \\mathbf 1_{N}^{T} """ def __init__(self, A, b): """ :param A: scaling matrix. Either a P-dimensional vector, or a P x P transformation matrix. For the latter, the inverse and backward methods are not guaranteed to work as A must be invertible. It is also possible to specify a matrix with size P x Q with Q != P to achieve a lower dimensional representation of X. In this case, A is not invertible, hence inverse and backward transforms are not supported. :param b: A P-dimensional offset vector. """ super(LinearTransform, self).__init__() assert A is not None assert b is not None b = np.atleast_1d(b) A = np.atleast_1d(A) if len(A.shape) == 1: A = np.diag(A) assert (len(b.shape) == 1) assert (len(A.shape) == 2) self.A = DataHolder(A) self.b = DataHolder(b) def build_forward(self, X): return tf.matmul(X, tf.transpose(self.A)) + self.b @AutoFlow((float_type, [None, None])) def backward(self, Y): """ Overwrites the default backward approach, to avoid an explicit matrix inversion. """ return self.build_backward(Y) def build_backward(self, Y): """ TensorFlow implementation of the inverse mapping """ L = tf.cholesky(tf.transpose(self.A)) XT = tf.cholesky_solve(L, tf.transpose(Y-self.b)) return tf.transpose(XT) def build_backward_variance(self, Yvar): """ Additional method for scaling variance backward (used in :class:`.Normalizer`). Can process both the diagonal variances returned by predict_f, as well as full covariance matrices. :param Yvar: size N x N x P or size N x P :return: Yvar scaled, same rank and size as input """ rank = tf.rank(Yvar) # Because TensorFlow evaluates both fn1 and fn2, the transpose can't be in the same line. If a full cov # matrix is provided fn1 turns it into a rank 4, then tries to transpose it as a rank 3. # Splitting it in two steps however works fine. Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.matrix_diag(tf.transpose(Yvar)), lambda: Yvar) Yvar = tf.cond(tf.equal(rank, 2), lambda: tf.transpose(Yvar, perm=[1, 2, 0]), lambda: Yvar) N = tf.shape(Yvar)[0] D = tf.shape(Yvar)[2] L = tf.cholesky(tf.square(tf.transpose(self.A))) Yvar = tf.reshape(Yvar, [N * N, D]) scaled_var = tf.reshape(tf.transpose(tf.cholesky_solve(L, tf.transpose(Yvar))), [N, N, D]) return tf.cond(tf.equal(rank, 2), lambda: tf.reduce_sum(scaled_var, axis=1), lambda: scaled_var) def assign(self, other): """ Assign the parameters of another :class:`LinearTransform`. Can be useful to avoid graph re-compilation. :param other: :class:`.LinearTransform` object """ assert other is not None assert isinstance(other, LinearTransform) self.A.set_data(other.A.value) self.b.set_data(other.b.value) def __invert__(self): A_inv = np.linalg.inv(self.A.value.T) return LinearTransform(A_inv, -np.dot(self.b.value, A_inv)) def __str__(self): return 'XA + b'