示例#1
0
文件: EI.py 项目: voegtlel/RoBO
    def __call__(self, X, derivative=False, **kwargs):
        """
        A call to the object returns the EI and derivative values.

        :param X: The point at which the function is to be evaluated.
        :type X: np.ndarray (1,D)
        :param derivative: This controls whether the derivative is to be returned.
        :type derivative: Boolean
        :return: The value of EI and optionally its derivative at X.
        :rtype: np.ndarray(1, 1) or (np.ndarray(1, 1), np.ndarray(1, D))
        """

        if X.shape[0] > 1:
            raise BayesianOptimizationError(
                BayesianOptimizationError.SINGLE_INPUT_ONLY,
                "EI is only for single X inputs")

        if len(X.shape) == 1:
            X = X[:, np.newaxis]

        if np.any(X < self.X_lower) or np.any(X > self.X_upper):
            if derivative:
                f = 0
                df = np.zeros((1, X.shape[1]))
                return np.array([[f]]), np.array([df])
            else:
                return np.array([[0]])

        m, v = self.model.predict(X, full_cov=True)
        incumbent = self.compute_incumbent(self.model)
        eta, _ = self.model.predict(np.array([incumbent]))

        s = np.sqrt(v)
        z = (eta - m - self.par) / s
        f = (eta - m - self.par) * norm.cdf(z) + s * norm.pdf(z)
        if derivative:
            dmdx, ds2dx = self.model.predictive_gradients(X)
            dmdx = dmdx[0]
            ds2dx = ds2dx[0][:, None]
            dsdx = ds2dx / (2 * s)
            df = (-dmdx * norm.cdf(z) + (dsdx * norm.pdf(z))).T
        if (f < 0).any():
            f[np.where(f < 0)] = 0.0
            if derivative:
                df[np.where(f < 0), :] = np.zeros_like(X)
        if (f < 0).any():
            raise Exception
        if len(f.shape) == 1:
            f = np.array([f])
        if derivative:
            if len(df.shape) == 3:
                return_df = df
            else:
                return_df = np.array([df])
            return f, return_df
        else:
            return f
示例#2
0
    def dh_fun(self, x):
        if x.shape[0] > 1:
            raise BayesianOptimizationError(
                BayesianOptimizationError.SINGLE_INPUT_ONLY,
                "dHdx_local is only for single x inputs")
        new_pmin = self.change_pmin_by_innovation(x, self.f)
        # Calculate the Kullback-Leibler divergence w.r.t. this pmin approximation
        H_old = np.sum(np.multiply(self.pmin, (self.logP + self.lmb)))
        H_new = np.sum(np.multiply(new_pmin, (np.log(new_pmin) + self.lmb)))

        return np.array([[-H_new + H_old]])
示例#3
0
    def _gp_innovation_local(self, x):

        if x.shape[0] > 1:
            raise BayesianOptimizationError(BayesianOptimizationError.SINGLE_INPUT_ONLY, "single inputs please")

        m, v = self.model.predict(x)
        s = np.sqrt(v)
        v_projected = self.model.predict_variance(x, self.zb)
        Lx = v_projected / s
        dLxdx = None
        return Lx, dLxdx
示例#4
0
文件: UCB.py 项目: voegtlel/RoBO
    def __call__(self, X, derivative=False, **kwargs):

        if derivative:
            raise BayesianOptimizationError(
                BayesianOptimizationError.NO_DERIVATIVE,
                "UCB  does not support derivative calculation until now")
        if np.any(X < self.X_lower) or np.any(X > self.X_upper):
            return np.array([[-np.finfo(np.float).max]])
        mean, var = self.model.predict(X)
        #minimize in f so maximize negative lower bound
        return -(mean - self.par * np.sqrt(var))
示例#5
0
文件: PI.py 项目: voegtlel/RoBO
    def __call__(self, X, incumbent, derivative=False, **kwargs):
        """
        A call to the object returns the PI and derivative values.

        :param x: The point at which the function is to be evaluated.
        :type x: np.ndarray (1,n)
        :param incumbent: The current incumbent
        :type incumbent: np.ndarray (1,D)
        :param derivative: This controls whether the derivative is to be returned.
        :type derivative: Boolean
        :return: The value of PI and its derivative at x.
        """
        if X.shape[0] > 1:
            raise BayesianOptimizationError(
                BayesianOptimizationError.SINGLE_INPUT_ONLY,
                "PI is only for single x inputs")
        if np.any(X < self.X_lower) or np.any(X > self.X_upper):
            if derivative:
                f = 0
                df = np.zeros((1, X.shape[1]))
                return np.array([[f]]), np.array([df])
            else:
                return np.array([[0]])

        dim = X.shape[1]
        m, v = self.model.predict(X)
        eta = self.model.predict(np.array([incumbent]))
        s = np.sqrt(v)
        z = (eta - m - self.par) / s
        f = norm.cdf(z)
        if derivative:
            dmdx, ds2dx = self.model.predictive_gradients(X)
            dmdx = dmdx[0]
            ds2dx = ds2dx[0][:, None]
            dsdx = ds2dx / (2 * s)
            df = (-(-norm.pdf(z) / s) * (dmdx + dsdx * z)).T

        if len(f.shape) == 1:
            return_f = np.array([f])
        else:
            return_f = f
        if derivative:
            if len(df.shape) == 3:
                return_df = df
            else:
                return_df = np.array([df])

            return return_f, return_df
        else:
            return return_f
示例#6
0
 def __call__(self, X, derivative=False, **kwargs):
     """
     :param X: The point at which the function is to be evaluated. Its shape is (1,D), where n is the dimension of the search space.
     :type X: np.ndarray (1, n)
     :param derivative: Controls whether the derivative is calculated and returned.
     :type derivative: Boolean
     :return: The expected difference of the loss function at X and optionally its derivative.
     :rtype: np.ndarray(1, 1) or (np.ndarray(1, 1), np.ndarray(1, D)).
     :raises BayesianOptimizationError: if X.shape[0] > 1. Only single X can be evaluated.
     """
     if derivative:
         raise BayesianOptimizationError(
             BayesianOptimizationError.NO_DERIVATIVE,
             "EntropyMC does not support derivative calculation until now")
     return self.dh_fun(X)
示例#7
0
 def __call__(self, X, derivative=False, **kwargs):
     """
     :param x: The point at which the function is to be evaluated. Its shape is (1,D), where D is the dimension of the search space.
     :type x: np.ndarray (1, D)
     :param derivative: Controls whether the derivative is calculated and returned.
     :type derivative: Boolean
     :return: The expected difference of the loss function at X and optionally its derivative.
     :rtype: np.ndarray(1, 1) or (np.ndarray(1, 1), np.ndarray(1, D)).
     :raises BayesianOptimizationError: if X.shape[0] > 1. Only single X can be evaluated.
     """
     if X.shape[0] > 1:
         raise BayesianOptimizationError(BayesianOptimizationError.SINGLE_INPUT_ONLY, "Entropy is only for single X inputs")
     if np.any(X < self.X_lower) or np.any(X > self.X_upper):
         if derivative:
             f = 0
             df = np.zeros((1, X.shape[1]))
             return np.array([[f]]), np.array([df])
         else:
             return np.array([[0]])
     return self.dh_fun(X, invertsign=True, derivative=derivative)
示例#8
0
文件: LogEI.py 项目: voegtlel/RoBO
    def __call__(self, X, derivative=False, **kwargs):
        """
        A call to the object returns the log(EI) and derivative values.

        :param X: The point at which the function is to be evaluated.
        :type X: np.ndarray (1,D)
        :param incumbent: The current incumbent
        :type incumbent: np.ndarray (1,D)
        :param derivative: This controls whether the derivative is to be returned.
        :type derivative: Boolean
        :return: The value of log(EI)
        :rtype: np.ndarray(1, 1)
        :raises BayesianOptimizationError: if X.shape[0] > 1. Only single X can be evaluated.
        """

        if derivative:
            raise BayesianOptimizationError(
                BayesianOptimizationError.NO_DERIVATIVE,
                "LogEI does not support derivative calculation until now")

        if len(X.shape) == 1:
            X = X[:, np.newaxis]

        if np.any(X < self.X_lower) or np.any(X > self.X_upper):
            return np.array([[-np.finfo(np.float).max]])
        m, v = self.model.predict(X)

        eta, _ = self.model.predict(
            np.array([self.compute_incumbent(self.model)]))

        f_min = eta - self.par

        s = np.sqrt(v)

        z = (f_min - m) / s

        log_ei = np.zeros((m.size, 1))
        for i in range(0, m.size):
            mu, sigma = m[i], s[i]

            #    par_s = self.par * sigma

            # Degenerate case 1: first term vanishes
            if np.any(abs(f_min - mu)) == 0:
                if sigma > 0:
                    log_ei[i] = np.log(sigma) + norm.logpdf(z[i])
                else:
                    log_ei[i] = -np.Infinity
            # Degenerate case 2: second term vanishes and first term has a special form.
            elif sigma == 0:
                if mu < np.any(f_min):
                    log_ei[i] = np.log(f_min - mu)
                else:
                    log_ei[i] = -np.Infinity
            # Normal case
            else:
                b = np.log(sigma) + norm.logpdf(z[i])
                # log(y+z) is tricky, we distinguish two cases:
                if np.any(f_min > mu):
                    # When y>0, z>0, we define a=ln(y), b=ln(z).
                    # Then y+z = exp[ max(a,b) + ln(1 + exp(-|b-a|)) ],
                    # and thus log(y+z) = max(a,b) + ln(1 + exp(-|b-a|))
                    a = np.log(f_min - mu) + norm.logcdf(z[i])

                    log_ei[i] = max(a, b) + np.log(1 + np.exp(-abs(b - a)))
                else:
                    # When y<0, z>0, we define a=ln(-y), b=ln(z), and it has to be true that b >= a in order to satisfy y+z>=0.
                    # Then y+z = exp[ b + ln(exp(b-a) -1) ],
                    # and thus log(y+z) = a + ln(exp(b-a) -1)
                    a = np.log(mu - f_min) + norm.logcdf(z[i])
                    if a >= b:
                        # a>b can only happen due to numerical inaccuracies or approximation errors
                        log_ei[i] = -np.Infinity
                    else:
                        log_ei[i] = b + np.log(1 - np.exp(a - b))

        return log_ei