Exemple #1
0
    def average_label_entropy(self,
                              X,
                              y,
                              theta=None,
                              eval_gradient=False,
                              verbose=False):
        '''Evaluate the average label entropy of the Gaussian field model on a
        dataset.

        Parameters
        ----------
        X: 2D array or list of objects
            Feature vectors or other generic representations of input data.
        y: 1D array
            Label of each data point. Values of None or NaN indicates
            missing labels that will be filled in by the model.
        theta: 1D array
            Hyperparameters for the weight class.
        eval_gradients:
            Whether or not to evaluate the gradient of the average label
            entropy with respect to weight hyperparameters.
        verbose: bool
            If true, print out some additional information as a markdown table.

        Returns
        -------
        average_label_entropy: float
            The average label entropy of the Gaussian field prediction on the
            unlabeled nodes.
        grad: 1D array
            Gradient with respect to the hyperparameters.
        '''
        if theta is not None:
            self.weight.theta = theta

        if eval_gradient is True:
            z, dz, t_metric, t_solve, t_chain = self._predict_gradient(X, y)
        else:
            z = self._predict(X, y)
        loss = -np.mean(z * np.log(z) + (1 - z) * np.log(1 - z))
        if eval_gradient is True:
            dloss = np.log(z) - np.log(1 - z)
            grad = -np.mean(dloss * dz.T, axis=1) * np.exp(self.weight.theta)
            retval = (loss, grad)
        else:
            retval = loss

        if verbose and eval_gradient is True:
            mprint.table(
                ('Avg.Entropy', '%12.5g', loss),
                ('Gradient', '%12.5g', np.linalg.norm(grad)),
                ('Metric time', '%12.2g', t_metric),
                ('Solver time', '%12.2g', t_solve),
                ('BackProp time', '%14.2g', t_chain),
            )

        return retval
Exemple #2
0
def test_markdown_table_multicol():
    out = sys.stdout = StringIO()
    markdown.table(('Hello', '%9d', 0), ('Hello', '%12f', 0),
                   ('Hello', '%15g', 0),
                   print_header=False)
    cols = out.getvalue().strip().strip('|').split('|')
    assert (len(cols) == 3)
    assert (len(cols[0]) == 9)
    assert (len(cols[1]) == 12)
    assert (len(cols[2]) == 15)
Exemple #3
0
def test_markdown_row_print_header():
    out = sys.stdout = StringIO()
    markdown.table_start()
    markdown.table(
        ('Hello', '%9d', 0),
        ('Hello', '%12f', 0),
        ('Hello', '%15g', 0),
    )
    assert (len(out.getvalue().strip().split('\n')) == 3)

    out = sys.stdout = StringIO()
    markdown.table(
        ('Hello', '%9d', 0),
        ('Hello', '%12f', 0),
        ('Hello', '%15g', 0),
    )
    assert (len(out.getvalue().strip().split('\n')) == 1)

    out = sys.stdout = StringIO()
    markdown.table(('Hello', '%9d', 0), ('Hello', '%12f', 0),
                   ('Hello', '%15g', 0),
                   print_header=True)
    assert (len(out.getvalue().strip().split('\n')) == 3)
Exemple #4
0
    def log_marginal_likelihood(self,
                                theta=None,
                                C=None,
                                X=None,
                                y=None,
                                eval_gradient=False,
                                clone_kernel=True,
                                verbose=False):
        """Returns the log-marginal likelihood of a given set of log-scale
        hyperparameters.

        Parameters
        ----------
        theta: array-like
            Kernel hyperparameters for which the log-marginal likelihood is
            to be evaluated. If None, the current hyperparameters will be used.
        C: list of objects or feature vectors.
            The core set that defines the subspace of low-rank approximation.
            If None, `self.C` will be used.
        X: list of objects or feature vectors.
            Input values of the training data. If None, `self.X` will be used.
        y: 1D array
            Output/target values of the training data. If None, `self.y` will
            be used.
        eval_gradient: boolean
            If True, the gradient of the log-marginal likelihood with respect
            to the kernel hyperparameters at position theta will be returned
            alongside.
        clone_kernel: boolean
            If True, the kernel is copied so that probing with theta does not
            alter the trained kernel. If False, the kernel hyperparameters will
            be modified in-place.
        verbose: boolean
            If True, the log-likelihood value and its components will be
            printed to the screen.

        Returns
        -------
        log_likelihood: float
            Log-marginal likelihood of theta for training data.
        log_likelihood_gradient: 1D array
            Gradient of the log-marginal likelihood with respect to the kernel
            hyperparameters at position theta. Only returned when eval_gradient
            is True.
        """
        theta = theta if theta is not None else self.kernel.theta
        C = C if C is not None else self._C
        X = X if X is not None else self._X
        if y is not None:
            y_mask, y = self.mask(y)
        else:
            y = self._y
            y_mask = self._y_mask

        if clone_kernel is True:
            kernel = self.kernel.clone_with_theta(theta)
        else:
            kernel = self.kernel
            kernel.theta = theta

        t_kernel = time.perf_counter()

        if eval_gradient is True:
            Kxc, d_Kxc = self._gramian(None, X, C, kernel=kernel, jac=True)
            Kcc, d_Kcc = self._gramian(self.alpha, C, kernel=kernel, jac=True)
            Kxc, d_Kxc = Kxc[y_mask, :], d_Kxc[y_mask, :, :]
        else:
            Kxc = self._gramian(None, X, C, kernel=kernel)
            Kcc = self._gramian(self.alpha, C, kernel=kernel)
            Kxc = Kxc[y_mask, :]

        t_kernel = time.perf_counter() - t_kernel

        t_linalg = time.perf_counter()

        Kcc_rsqrt = self._corespace(Kcc=Kcc)
        F = Kxc @ Kcc_rsqrt
        K = lr.dot(F, rcond=self.beta, mode='clamp')
        K_inv = K.pinv()

        logdet = K.logdet()
        Ky = K_inv @ y
        yKy = y @ Ky
        logP = yKy + logdet

        if eval_gradient is True:
            D_theta = np.zeros_like(theta)
            K_inv2 = K_inv**2
            for i, t in enumerate(theta):
                d_F = d_Kxc[:, :, i] @ Kcc_rsqrt
                d_K = lr.dot(F, d_F.T) + lr.dot(d_F, F.T) - lr.dot(
                    F @ Kcc_rsqrt.T @ d_Kcc[:, :, i], Kcc_rsqrt @ F.T)
                d_logdet = (K_inv @ d_K).trace()
                d_Kinv_part = K_inv2 @ d_K - K_inv2 @ d_K @ (K @ K_inv)
                d_Kinv = d_Kinv_part + d_Kinv_part.T - K_inv @ d_K @ K_inv
                d_yKy = d_Kinv.quadratic(y, y)
                D_theta[i] = (d_logdet + d_yKy) * np.exp(t)
            retval = (logP, D_theta)
        else:
            retval = logP

        t_linalg = time.perf_counter() - t_linalg

        if verbose:
            mprint.table(
                ('logP', '%12.5g', yKy + logdet),
                ('dlogP', '%12.5g', np.linalg.norm(D_theta)),
                ('y^T.K.y', '%12.5g', yKy),
                ('log|K| ', '%12.5g', logdet),
                ('Cond(K)', '%12.5g', K.cond()),
                ('GPU time', '%10.2g', t_kernel),
                ('CPU time', '%10.2g', t_linalg),
            )

        return retval
Exemple #5
0
    def squared_loocv_error(self,
                            theta=None,
                            X=None,
                            y=None,
                            eval_gradient=False,
                            clone_kernel=True,
                            verbose=False):
        """Returns the squared LOOCV error of a given set of log-scale
        hyperparameters.

        Parameters
        ----------
        theta: array-like
            Kernel hyperparameters for which the log-marginal likelihood is
            to be evaluated. If None, the current hyperparameters will be used.
        X: list of objects or feature vectors.
            Input values of the training data. If None, `self.X` will be used.
        y: 1D array
            Output/target values of the training data. If None, `self.y` will
            be used.
        eval_gradient: boolean
            If True, the gradient of the log-marginal likelihood with respect
            to the kernel hyperparameters at position theta will be returned
            alongside.
        clone_kernel: boolean
            If True, the kernel is copied so that probing with theta does not
            alter the trained kernel. If False, the kernel hyperparameters will
            be modified in-place.
        verbose: boolean
            If True, the log-likelihood value and its components will be
            printed to the screen.

        Returns
        -------
        squared_error: float
            Squared LOOCV error of theta for training data.
        squared_error_gradient: 1D array
            Gradient of the Squared LOOCV error with respect to the kernel
            hyperparameters at position theta. Only returned when eval_gradient
            is True.
        """
        theta = theta if theta is not None else self.kernel.theta
        X = X if X is not None else self._X
        if y is not None:
            y_mask, y = self.mask(y)
        else:
            y = self._y
            y_mask = self._y_mask

        if clone_kernel is True:
            kernel = self.kernel.clone_with_theta(theta)
        else:
            kernel = self.kernel
            kernel.theta = theta

        t_kernel = time.perf_counter()

        if eval_gradient is True:
            K, dK = self._gramian(self.alpha, X, kernel=kernel, jac=True)
            K = K[y_mask, :][:, y_mask]
            dK = dK[y_mask, :, :][:, y_mask, :]
        else:
            K = self._gramian(self.alpha, X, kernel=kernel)
            K = K[y_mask, :][:, y_mask]

        t_kernel = time.perf_counter() - t_kernel

        t_linalg = time.perf_counter()

        Kinv, logdet = self._invert(K, rcond=self.beta)
        if not isinstance(Kinv, np.ndarray):
            Kinv = Kinv.todense()
        Kinv_diag = Kinv.diagonal()
        Ky = Kinv @ y
        e = Ky / Kinv_diag
        squared_error = 0.5 * np.sum(e**2)

        if eval_gradient is True:
            D_theta = np.zeros_like(theta)
            for i, t in enumerate(theta):
                dk = dK[:, :, i]
                KdK = Kinv @ dk
                D_theta[i] = (
                    -(e / Kinv_diag) @ (KdK @ Ky) +
                    (e**2 / Kinv_diag) @ (KdK @ Kinv).diagonal()) * np.exp(t)
            retval = (squared_error, D_theta)
        else:
            retval = squared_error

        t_linalg = time.perf_counter() - t_linalg

        if verbose:
            mprint.table(
                ('Sq.Err.', '%12.5g', squared_error),
                ('d(SqErr)', '%12.5g', squared_error),
                ('log|K| ', '%12.5g', logdet),
                ('Cond(K)', '%12.5g', np.linalg.cond(K)),
                ('t_GPU (s)', '%10.2g', t_kernel),
                ('t_CPU (s)', '%10.2g', t_linalg),
            )

        return retval
Exemple #6
0
    def log_marginal_likelihood(self,
                                theta=None,
                                X=None,
                                y=None,
                                eval_gradient=False,
                                clone_kernel=True,
                                verbose=False):
        """Returns the log-marginal likelihood of a given set of log-scale
        hyperparameters.

        Parameters
        ----------
        theta: array-like
            Kernel hyperparameters for which the log-marginal likelihood is
            to be evaluated. If None, the current hyperparameters will be used.
        X: list of objects or feature vectors.
            Input values of the training data. If None, `self.X` will be used.
        y: 1D array
            Output/target values of the training data. If None, `self.y` will
            be used.
        eval_gradient: boolean
            If True, the gradient of the log-marginal likelihood with respect
            to the kernel hyperparameters at position theta will be returned
            alongside.
        clone_kernel: boolean
            If True, the kernel is copied so that probing with theta does not
            alter the trained kernel. If False, the kernel hyperparameters will
            be modified in-place.
        verbose: boolean
            If True, the log-likelihood value and its components will be
            printed to the screen.

        Returns
        -------
        log_likelihood: float
            Log-marginal likelihood of theta for training data.
        log_likelihood_gradient: 1D array
            Gradient of the log-marginal likelihood with respect to the kernel
            hyperparameters at position theta. Only returned when eval_gradient
            is True.
        """
        theta = theta if theta is not None else self.kernel.theta
        X = X if X is not None else self._X
        if y is not None:
            y_mask, y = self.mask(y)
        else:
            y = self._y
            y_mask = self._y_mask

        if clone_kernel is True:
            kernel = self.kernel.clone_with_theta(theta)
        else:
            kernel = self.kernel
            kernel.theta = theta

        t_kernel = time.perf_counter()

        if eval_gradient is True:
            K, dK = self._gramian(self.alpha, X, kernel=kernel, jac=True)
            K = K[y_mask, :][:, y_mask]
            dK = dK[y_mask, :, :][:, y_mask, :]
        else:
            K = self._gramian(self.alpha, X, kernel=kernel)
            K = K[y_mask, :][:, y_mask]

        t_kernel = time.perf_counter() - t_kernel

        t_linalg = time.perf_counter()

        Kinv, logdet = self._invert(K, rcond=self.beta)
        Ky = Kinv @ y
        yKy = y @ Ky

        if eval_gradient is True:
            if not isinstance(Kinv, np.ndarray):
                Kinv = Kinv.todense()
            d_theta = (np.einsum('ij,ijk->k', Kinv, dK) -
                       np.einsum('i,ijk,j', Ky, dK, Ky))
            retval = (yKy + logdet, d_theta * np.exp(theta))
        else:
            retval = yKy + logdet

        t_linalg = time.perf_counter() - t_linalg

        if verbose:
            mprint.table(
                ('logP', '%12.5g', yKy + logdet),
                ('dlogP', '%12.5g', np.linalg.norm(d_theta)),
                ('y^T.K.y', '%12.5g', yKy),
                ('log|K| ', '%12.5g', logdet),
                ('Cond(K)', '%12.5g', np.linalg.cond(K)),
                ('GPU time', '%10.2g', t_kernel),
                ('CPU time', '%10.2g', t_linalg),
            )

        return retval
Exemple #7
0
    def loocv_error(self,
                    X,
                    y,
                    p=2,
                    theta=None,
                    eval_gradient=False,
                    verbose=False):
        '''Evaluate the leave-one-out cross validation error and gradient.

        Parameters
        ----------
        X: 2D array or list of objects
            Feature vectors or other generic representations of input data.
        y: 1D array
            Label of each data point. Values of None or NaN indicates
            missing labels that will be filled in by the model.
        p: float > 1
            The order of the p-norm for LOOCV error.
        theta: 1D array
            Hyperparameters for the weight class.
        eval_gradients:
            Whether or not to evaluate the gradient of the average label
            entropy with respect to weight hyperparameters.
        verbose: bool
            If true, print out some additional information as a markdown table.

        Returns
        -------
        err: 1D array
            LOOCV Error
        grad: 1D array
            Gradient with respect to the hyperparameters.
        '''
        if theta is not None:
            self.weight.theta = theta

        labeled = np.isfinite(y)
        y = y[labeled]
        n = len(y)
        t_metric = time.perf_counter()
        if eval_gradient is True:
            W, dW = self.weight(X[labeled], eval_gradient=True)
        else:
            if self.weight == 'precomputed':
                W = X[labeled, :][:, labeled]
            else:
                W = self.weight(X[labeled])
        t_metric = time.perf_counter() - t_metric

        t_chain = time.perf_counter()
        W += self.smoothing
        D = W.sum(axis=1)
        P = (1 / D)[:, None] * W
        e = y - P @ y
        loocv_error_p = np.mean(np.abs(e)**p)
        loocv_error = loocv_error_p**(1 / p)
        if eval_gradient is True:
            derr_de = (loocv_error_p**(1 / p - 1) * np.abs(e)**(p - 1) *
                       np.sign(e) / n)
            derr_dtheta = (np.einsum('pq, pqi',
                                     (derr_de / D**2 * (W @ y))[:, None], dW) -
                           np.einsum('p, q, pqi', derr_de / D, y, dW))
            retval = (loocv_error, derr_dtheta)
        else:
            retval = loocv_error
        t_chain = time.perf_counter() - t_chain

        if verbose and eval_gradient is True:
            mprint.table(
                ('LOOCV Err.', '%12.5g', loocv_error),
                ('Gradient', '%12.5g', np.linalg.norm(derr_dtheta)),
                ('Metric time', '%12.2g', t_metric),
                ('BackProp time', '%14.2g', t_chain),
            )

        return retval
Exemple #8
0
    def log_marginal_likelihood(self,
                                theta_ext,
                                X=None,
                                y=None,
                                eval_gradient=False,
                                clone_kernel=True,
                                verbose=False):
        """Returns the log-marginal likelihood of a given set of log-scale
        hyperparameters.

        Parameters
        ----------
        theta_ext: array-like
            Kernel hyperparameters and per-sample noise prior for which the
            log-marginal likelihood is to be evaluated. If None, the current
            hyperparameters will be used.
        X: list of objects or feature vectors.
            Input values of the training data. If None, `self.X` will be used.
        y: 1D array
            Output/target values of the training data. If None, `self.y` will
            be used.
        eval_gradient: boolean
            If True, the gradient of the log-marginal likelihood with respect
            to the kernel hyperparameters at position theta will be returned
            alongside.
        clone_kernel: boolean
            If True, the kernel is copied so that probing with theta does not
            alter the trained kernel. If False, the kernel hyperparameters will
            be modified in-place.
        verbose: boolean
            If True, the log-likelihood value and its components will be
            printed to the screen.

        Returns
        -------
        log_likelihood: float
            Log-marginal likelihood of theta for training data.
        log_likelihood_gradient: 1D array
            Gradient of the log-marginal likelihood with respect to the kernel
            hyperparameters at position theta. Only returned when eval_gradient
            is True.
        """
        X = X if X is not None else self._X
        y = y if y is not None else self._y
        theta, log_sigma = fold_like(theta_ext, (self.kernel.theta, y))
        sigma = np.exp(log_sigma)

        if clone_kernel is True:
            kernel = self.kernel.clone_with_theta(theta)
        else:
            kernel = self.kernel
            kernel.theta = theta

        t_kernel = time.perf_counter()

        if eval_gradient is True:
            K, dK = self._gramian(sigma**2, X, kernel=kernel, jac=True)
        else:
            K = self._gramian(sigma**2, X, kernel=kernel)

        t_kernel = time.perf_counter() - t_kernel

        t_linalg = time.perf_counter()

        Kinv, logdet = self._invert_pseudoinverse(K, rcond=self.beta)
        Kinv_diag = Kinv.diagonal()
        Ky = Kinv @ y
        yKy = y @ Ky

        if eval_gradient is True:
            d_theta = (np.einsum('ij,ijk->k', Kinv, dK) -
                       np.einsum('i,ijk,j', Ky, dK, Ky))
            d_alpha = (Kinv_diag - Ky**2) * 2 * sigma
            retval = (yKy + logdet, np.concatenate(
                (d_theta, d_alpha)) * np.exp(theta_ext))
        else:
            retval = yKy + logdet

        t_linalg = time.perf_counter() - t_linalg

        if verbose:
            mprint.table(
                ('logP', '%12.5g', yKy + logdet),
                ('dlogP', '%12.5g', np.linalg.norm(d_theta)),
                ('y^T.K.y', '%12.5g', yKy),
                ('log|K| ', '%12.5g', logdet),
                ('Cond(K)', '%12.5g', np.linalg.cond(K)),
                ('GPU time', '%10.2g', t_kernel),
                ('CPU time', '%10.2g', t_linalg),
            )

        return retval