Example #1
0
 def __init__(self):
     self.likelihood = lambda x, y: utils.logistic_sigmoid(x, y)
     self.loglikelihood = lambda x, y: np.log(self.likelihood(x, y))
     self.max_iter = 100
     self.posterior_mode = None
     self.covariance_function = utils.squared_exponential
     self.covariance_explicit = utils.squared_exponential_explicit
     self.covariance_params = [0., 0., 0.]
     self.is_fitted = False
Example #2
0
    def hessian(A, x, y):
        """ Computes the hessian of the logistic loss function at the current
        estimate x.

        H(x) = (1/n)(<A',D,A>)
        D is the diagonal matrix D_ii=(sigmoid(z_i)(1 - sigmoid(z_i)) : R^(n,n)
        z_i = y_i(<A_i,x>) : R^1

        :param x: Regression weight vector : R^p
        :param A: Design/Sensing matrix : R^(n,p)
        :param y: Observation label vector : {-1,1}^n
        :return: Hessian H(x) : R^(p,p)
        """
        z = y * A.dot(x)  # decision value for each observation
        # transform z:R^n into diag_matrix:R^(n,n)
        D = np.diag(logistic_sigmoid(z))
        return A.T.dot(D).dot(A) / y.size  # normalized hessian
Example #3
0
    def gradient(A, x, y):
        """ Computes the gradient of the logistic loss function at the current
        estimate x.

        grad_x = (1/n) * sum_i{(y_i * A_i)(sigmoid(z_i) - 1)} : R^p
        z_i = y_i(<A_i,x>) : R^1

        :param x: Regression weight vector : R^p
        :param A: Design/Sensing matrix : R^(n,p)
        :param y: Observation label vector : {-1,1}^n
        :return: Normalized gradient grad_x : R^p
        """
        z = y * A.dot(x)  # decision value for each observation
        phi = (logistic_sigmoid(z) - 1) * y
        grad_x = A.T.dot(phi)
        # Gradient normalized by the num obs
        return grad_x / y.size